diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a18002288..60735ef18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -120,45 +120,48 @@ Flowise has 3 different modules in a single mono repository. Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables) -| Variable | Description | Type | Default | -| ---------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- | -| PORT | The HTTP port Flowise runs on | Number | 3000 | -| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | | -| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | | -| FLOWISE_USERNAME | Username to login | String | | -| FLOWISE_PASSWORD | Password to login | String | | -| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb | -| DEBUG | Print logs from components | Boolean | | -| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | -| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | -| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 | -| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` | -| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` | -| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | | -| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | | -| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` | -| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` | -| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | | -| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false | -| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | -| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | -| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | | -| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean | | -| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` | -| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` | -| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` | -| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | | -| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | | -| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | | -| S3_STORAGE_REGION | Region for S3 bucket | String | | -| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | | -| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false | -| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | | -| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | | +| Variable | Description | Type | Default | +| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- | +| PORT | The HTTP port Flowise runs on | Number | 3000 | +| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | | +| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | | +| FLOWISE_USERNAME | Username to login | String | | +| FLOWISE_PASSWORD | Password to login | String | | +| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb | +| DEBUG | Print logs from components | Boolean | | +| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` | +| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` | +| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 | +| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` | +| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` | +| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | | +| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | | +| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` | +| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` | +| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | | +| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false | +| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | +| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | +| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | | +| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` | +| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` | +| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` | +| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | | +| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | | +| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | | +| S3_STORAGE_REGION | Region for S3 bucket | String | | +| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | | +| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false | +| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | | +| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | | +| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | | +| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true | +| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | | +| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | | You can also specify the env variables when using `npx`. For example: diff --git a/Dockerfile b/Dockerfile index dfbf58d1b..a824b7f80 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,10 @@ RUN apk add --no-cache build-base cairo-dev pango-dev # Install Chromium RUN apk add --no-cache chromium +# Install curl for container-level health checks +# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126 +RUN apk add --no-cache curl + #install PNPM globaly RUN npm install -g pnpm diff --git a/README.md b/README.md index 543054da2..d1c9b2da4 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ - - -# Flowise - Build LLM Apps Easily +

+ + +

[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases) [![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW) @@ -10,11 +11,11 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -English | [中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md) +English | [繁體中文](./i18n/README-TW.md) | [简体中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md) -

Drag & drop UI to build your customized LLM flow

+

Build AI Agents, Visually

- + ## ⚡Quick Start @@ -182,9 +183,9 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [ [![Deploy on Elestio](https://elest.io/images/logos/deploy-to-elestio-btn.png)](https://elest.io/open-source/flowiseai) - - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + - [Sealos](https://template.sealos.io/deploy?templateName=flowise) - [![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + [![Deploy on Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise) - [RepoCloud](https://repocloud.io/details/?app_id=29) diff --git a/docker/.env.example b/docker/.env.example index bff5ef8f9..56ac56a80 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -42,13 +42,11 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # LANGCHAIN_API_KEY=your_api_key # LANGCHAIN_PROJECT=your_project -# DISABLE_FLOWISE_TELEMETRY=true - # Uncomment the following line to enable model list config, load the list of models from your local config file # see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format # MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path -# STORAGE_TYPE=local (local | s3) +# STORAGE_TYPE=local (local | s3 | gcs) # BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage # S3_STORAGE_BUCKET_NAME=flowise # S3_STORAGE_ACCESS_KEY_ID= @@ -56,6 +54,10 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # S3_STORAGE_REGION=us-west-2 # S3_ENDPOINT_URL= # S3_FORCE_PATH_STYLE=false +# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path +# GOOGLE_CLOUD_STORAGE_PROJ_ID= +# GOOGLE_CLOUD_STORAGE_BUCKET_NAME= +# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true # SHOW_COMMUNITY_NODES=true # DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable) @@ -86,6 +88,8 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # QUEUE_NAME=flowise-queue # QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000 # WORKER_CONCURRENCY=100000 +# REMOVE_ON_AGE=86400 +# REMOVE_ON_COUNT=10000 # REDIS_URL= # REDIS_HOST=localhost # REDIS_PORT=6379 @@ -94,4 +98,6 @@ BLOB_STORAGE_PATH=/root/.flowise/storage # REDIS_TLS= # REDIS_CERT= # REDIS_KEY= -# REDIS_CA= \ No newline at end of file +# REDIS_CA= +# REDIS_KEEP_ALIVE= +# ENABLE_BULLMQ_DASHBOARD= \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 762e3d296..82a55d6a2 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ RUN npm install -g flowise FROM node:20-alpine # Install runtime dependencies -RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev +RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev curl # Set the environment variable for Puppeteer to find Chromium ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 42b81bab2..3e5584863 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -28,7 +28,6 @@ services: - LOG_LEVEL=${LOG_LEVEL} - LOG_PATH=${LOG_PATH} - BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH} - - DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY} - MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON} - GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY} - GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY} @@ -38,6 +37,8 @@ services: - WORKER_CONCURRENCY=${WORKER_CONCURRENCY} - QUEUE_NAME=${QUEUE_NAME} - QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN} + - REMOVE_ON_AGE=${REMOVE_ON_AGE} + - REMOVE_ON_COUNT=${REMOVE_ON_COUNT} - REDIS_URL=${REDIS_URL} - REDIS_HOST=${REDIS_HOST} - REDIS_PORT=${REDIS_PORT} @@ -47,8 +48,16 @@ services: - REDIS_CERT=${REDIS_CERT} - REDIS_KEY=${REDIS_KEY} - REDIS_CA=${REDIS_CA} + - REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE} + - ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD} ports: - '${PORT}:${PORT}' + healthcheck: + test: ['CMD', 'curl', '-f', 'http://localhost:${PORT}/api/v1/ping'] + interval: 10s + timeout: 5s + retries: 5 + start_period: 30s volumes: - ~/.flowise:/root/.flowise entrypoint: /bin/sh -c "sleep 3; flowise start" diff --git a/docker/worker/docker-compose.yml b/docker/worker/docker-compose.yml index 88a8631d0..193d9cd0d 100644 --- a/docker/worker/docker-compose.yml +++ b/docker/worker/docker-compose.yml @@ -28,7 +28,6 @@ services: - LOG_LEVEL=${LOG_LEVEL} - LOG_PATH=${LOG_PATH} - BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH} - - DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY} - MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON} - GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY} - GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY} @@ -38,6 +37,8 @@ services: - WORKER_CONCURRENCY=${WORKER_CONCURRENCY} - QUEUE_NAME=${QUEUE_NAME} - QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN} + - REMOVE_ON_AGE=${REMOVE_ON_AGE} + - REMOVE_ON_COUNT=${REMOVE_ON_COUNT} - REDIS_URL=${REDIS_URL} - REDIS_HOST=${REDIS_HOST} - REDIS_PORT=${REDIS_PORT} @@ -47,6 +48,8 @@ services: - REDIS_CERT=${REDIS_CERT} - REDIS_KEY=${REDIS_KEY} - REDIS_CA=${REDIS_CA} + - REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE} + - ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD} ports: - '${PORT}:${PORT}' volumes: diff --git a/i18n/CONTRIBUTING-ZH.md b/i18n/CONTRIBUTING-ZH.md index 0ab1eb961..45626785e 100644 --- a/i18n/CONTRIBUTING-ZH.md +++ b/i18n/CONTRIBUTING-ZH.md @@ -140,7 +140,6 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package | DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | | | SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` | | FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 | -| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 | | MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` | | STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` | | BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` | diff --git a/i18n/README-JA.md b/i18n/README-JA.md index dab0dd843..a329059ed 100644 --- a/i18n/README-JA.md +++ b/i18n/README-JA.md @@ -1,8 +1,9 @@ - - -# Flowise - LLM アプリを簡単に構築 +

+ + +

[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases) [![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW) @@ -10,11 +11,11 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | [中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md) +[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md) -

ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI

+

AIエージェントをビジュアルに構築

- + ## ⚡ クイックスタート diff --git a/i18n/README-KR.md b/i18n/README-KR.md index b34e22628..c02b0b066 100644 --- a/i18n/README-KR.md +++ b/i18n/README-KR.md @@ -1,8 +1,9 @@ - - -# Flowise - 간편한 LLM 애플리케이션 제작 +

+ + +

[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases) [![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW) @@ -10,11 +11,11 @@ [![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | [中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어 +[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어 -

드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기

+

AI 에이전트를 시각적으로 구축하세요

- + ## ⚡빠른 시작 가이드 diff --git a/i18n/README-TW.md b/i18n/README-TW.md new file mode 100644 index 000000000..f051e844e --- /dev/null +++ b/i18n/README-TW.md @@ -0,0 +1,217 @@ + + +

+ + +

+ +[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases) +[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW) +[![Twitter Follow](https://img.shields.io/twitter/follow/FlowiseAI?style=social)](https://twitter.com/FlowiseAI) +[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) +[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) + +[English](../README.md) | 繁體中文 | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | [한국어](./README-KR.md) + +

可視化建構 AI/LLM 流程

+ + + +## ⚡ 快速開始 + +下載並安裝 [NodeJS](https://nodejs.org/en/download) >= 18.15.0 + +1. 安裝 Flowise + ```bash + npm install -g flowise + ``` +2. 啟動 Flowise + + ```bash + npx flowise start + ``` + + 使用用戶名和密碼 + + ```bash + npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234 + ``` + +3. 打開 [http://localhost:3000](http://localhost:3000) + +## 🐳 Docker + +### Docker Compose + +1. 克隆 Flowise 項目 +2. 進入項目根目錄的 `docker` 文件夾 +3. 複製 `.env.example` 文件,粘貼到相同位置,並重命名為 `.env` 文件 +4. `docker compose up -d` +5. 打開 [http://localhost:3000](http://localhost:3000) +6. 您可以通過 `docker compose stop` 停止容器 + +### Docker 映像 + +1. 本地構建映像: + ```bash + docker build --no-cache -t flowise . + ``` +2. 運行映像: + + ```bash + docker run -d --name flowise -p 3000:3000 flowise + ``` + +3. 停止映像: + ```bash + docker stop flowise + ``` + +## 👨‍💻 開發者 + +Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。 + +- `server`: 提供 API 邏輯的 Node 後端 +- `ui`: React 前端 +- `components`: 第三方節點集成 +- `api-documentation`: 從 express 自動生成的 swagger-ui API 文檔 + +### 先決條件 + +- 安裝 [PNPM](https://pnpm.io/installation) + ```bash + npm i -g pnpm + ``` + +### 設置 + +1. 克隆存儲庫 + + ```bash + git clone https://github.com/FlowiseAI/Flowise.git + ``` + +2. 進入存儲庫文件夾 + + ```bash + cd Flowise + ``` + +3. 安裝所有模塊的所有依賴項: + + ```bash + pnpm install + ``` + +4. 構建所有代碼: + + ```bash + pnpm build + ``` + +
+ 退出代碼 134(JavaScript 堆內存不足) + 如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 堆大小並重新運行腳本: + + export NODE_OPTIONS="--max-old-space-size=4096" + pnpm build + +
+ +5. 啟動應用: + + ```bash + pnpm start + ``` + + 您現在可以訪問 [http://localhost:3000](http://localhost:3000) + +6. 對於開發構建: + + - 在 `packages/ui` 中創建 `.env` 文件並指定 `VITE_PORT`(參考 `.env.example`) + - 在 `packages/server` 中創建 `.env` 文件並指定 `PORT`(參考 `.env.example`) + - 運行 + + ```bash + pnpm dev + ``` + + 任何代碼更改都會自動重新加載應用程序 [http://localhost:8080](http://localhost:8080) + +## 🔒 認證 + +要啟用應用級別的身份驗證,請在 `packages/server` 中的 `.env` 文件中添加 `FLOWISE_USERNAME` 和 `FLOWISE_PASSWORD`: + +``` +FLOWISE_USERNAME=user +FLOWISE_PASSWORD=1234 +``` + +## 🌱 環境變量 + +Flowise 支持不同的環境變量來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變量。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables) + +## 📖 文檔 + +[Flowise 文檔](https://docs.flowiseai.com/) + +## 🌐 自我托管 + +在您現有的基礎設施中部署 Flowise 自我托管,我們支持各種 [部署](https://docs.flowiseai.com/configuration/deployment) + +- [AWS](https://docs.flowiseai.com/configuration/deployment/aws) +- [Azure](https://docs.flowiseai.com/configuration/deployment/azure) +- [Digital Ocean](https://docs.flowiseai.com/configuration/deployment/digital-ocean) +- [GCP](https://docs.flowiseai.com/configuration/deployment/gcp) +- [阿里雲](https://computenest.console.aliyun.com/service/instance/create/default?type=user&ServiceName=Flowise社区版) +-
+ 其他 + + - [Railway](https://docs.flowiseai.com/configuration/deployment/railway) + + [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) + + - [Render](https://docs.flowiseai.com/configuration/deployment/render) + + [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/configuration/deployment/render) + + - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) + + HuggingFace Spaces + + - [Elestio](https://elest.io/open-source/flowiseai) + + [![Deploy on Elestio](https://elest.io/images/logos/deploy-to-elestio-btn.png)](https://elest.io/open-source/flowiseai) + + - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + + [![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + + - [RepoCloud](https://repocloud.io/details/?app_id=29) + + [![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29) + +
+ +## ☁️ Flowise 雲 + +[開始使用 Flowise 雲](https://flowiseai.com/) + +## 🙋 支持 + +隨時在 [討論](https://github.com/FlowiseAI/Flowise/discussions) 中提出任何問題、提出問題和請求新功能 + +## 🙌 貢獻 + +感謝這些出色的貢獻者 + + + + + +請參閱 [貢獻指南](CONTRIBUTING.md)。如果您有任何問題或問題,請通過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。 +[![Star History Chart](https://api.star-history.com/svg?repos=FlowiseAI/Flowise&type=Timeline)](https://star-history.com/#FlowiseAI/Flowise&Date) + +## 📄 許可證 + +此存儲庫中的源代碼根據 [Apache 許可證版本 2.0](LICENSE.md) 提供。 diff --git a/i18n/README-ZH.md b/i18n/README-ZH.md index dcbe172b5..5f313fb32 100644 --- a/i18n/README-ZH.md +++ b/i18n/README-ZH.md @@ -1,8 +1,9 @@ - - -# Flowise - 轻松构建 LLM 应用程序 +

+ + +

[![发布说明](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases) [![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW) @@ -10,11 +11,11 @@ [![GitHub星图](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise) [![GitHub分支](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork) -[English](../README.md) | 中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md) +[English](../README.md) | [繁體中文](./README-TW.md) | 简体中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md) -

拖放界面构建定制化的LLM流程

+

可视化构建 AI/LLM 流程

- + ## ⚡ 快速入门 @@ -170,9 +171,9 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) - - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + - [Sealos](https://template.sealos.io/deploy?templateName=flowise) - [![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise) + [![部署到 Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise) - [RepoCloud](https://repocloud.io/details/?app_id=29) diff --git a/images/flowise_agentflow.gif b/images/flowise_agentflow.gif new file mode 100644 index 000000000..0e51d24f4 Binary files /dev/null and b/images/flowise_agentflow.gif differ diff --git a/images/flowise_dark.svg b/images/flowise_dark.svg new file mode 100644 index 000000000..f5c0725fa --- /dev/null +++ b/images/flowise_dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/images/flowise_white.svg b/images/flowise_white.svg new file mode 100644 index 000000000..2a93a7449 --- /dev/null +++ b/images/flowise_white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/package.json b/package.json index dbc40a39b..f7855fef5 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "2.2.5", + "version": "3.0.0", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ @@ -13,7 +13,7 @@ "scripts": { "build": "turbo run build", "build-force": "pnpm clean && turbo run build --force", - "dev": "turbo run dev --parallel", + "dev": "turbo run dev --parallel --no-cache", "start": "run-script-os", "start:windows": "cd packages/server/bin && run start", "start:default": "cd packages/server/bin && ./run start", @@ -62,7 +62,20 @@ "sqlite3" ], "overrides": { - "set-value": "^3.0.3" + "axios": "1.7.9", + "body-parser": "2.0.2", + "braces": "3.0.3", + "cross-spawn": "7.0.6", + "glob-parent": "6.0.2", + "http-proxy-middleware": "3.0.3", + "json5": "2.2.3", + "nth-check": "2.1.1", + "path-to-regexp": "0.1.12", + "prismjs": "1.29.0", + "semver": "7.7.1", + "set-value": "4.1.0", + "unset-value": "2.0.1", + "webpack-dev-middleware": "7.4.2" } }, "engines": { @@ -70,11 +83,11 @@ "pnpm": ">=9" }, "resolutions": { - "@google/generative-ai": "^0.15.0", + "@google/generative-ai": "^0.24.0", "@grpc/grpc-js": "^1.10.10", "@langchain/core": "0.3.37", "@qdrant/openapi-typescript-fetch": "1.2.6", - "openai": "4.82.0", + "openai": "4.96.0", "protobufjs": "7.4.0" }, "eslintIgnore": [ diff --git a/packages/api-documentation/package.json b/packages/api-documentation/package.json index 891cda8e0..780920f7c 100644 --- a/packages/api-documentation/package.json +++ b/packages/api-documentation/package.json @@ -5,7 +5,6 @@ "scripts": { "build": "tsc", "start": "node dist/index.js", - "dev": "concurrently \"tsc-watch --noClear -p ./tsconfig.json\" \"nodemon\"", "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0" }, "license": "SEE LICENSE IN LICENSE.md", diff --git a/packages/components/README-ZH.md b/packages/components/README-ZH.md index 52d43eb25..e672b6e7f 100644 --- a/packages/components/README-ZH.md +++ b/packages/components/README-ZH.md @@ -6,7 +6,7 @@ Flowise 的应用集成。包含节点和凭据。 -![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true) +![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true) 安装: diff --git a/packages/components/README.md b/packages/components/README.md index f8e083746..867ad1ca1 100644 --- a/packages/components/README.md +++ b/packages/components/README.md @@ -6,7 +6,7 @@ English | [中文](./README-ZH.md) Apps integration for Flowise. Contain Nodes and Credentials. -![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true) +![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true) Install: diff --git a/packages/components/credentials/HTTPApiKey.credential.ts b/packages/components/credentials/HTTPApiKey.credential.ts new file mode 100644 index 000000000..92aaa6056 --- /dev/null +++ b/packages/components/credentials/HTTPApiKey.credential.ts @@ -0,0 +1,28 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class HTTPApiKeyCredential implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'HTTP Api Key' + this.name = 'httpApiKey' + this.version = 1.0 + this.inputs = [ + { + label: 'Key', + name: 'key', + type: 'string' + }, + { + label: 'Value', + name: 'value', + type: 'password' + } + ] + } +} + +module.exports = { credClass: HTTPApiKeyCredential } diff --git a/packages/components/credentials/HTTPBasicAuth.credential.ts b/packages/components/credentials/HTTPBasicAuth.credential.ts new file mode 100644 index 000000000..43b712e6f --- /dev/null +++ b/packages/components/credentials/HTTPBasicAuth.credential.ts @@ -0,0 +1,28 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class HttpBasicAuthCredential implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'HTTP Basic Auth' + this.name = 'httpBasicAuth' + this.version = 1.0 + this.inputs = [ + { + label: 'Basic Auth Username', + name: 'basicAuthUsername', + type: 'string' + }, + { + label: 'Basic Auth Password', + name: 'basicAuthPassword', + type: 'password' + } + ] + } +} + +module.exports = { credClass: HttpBasicAuthCredential } diff --git a/packages/components/credentials/HTTPBearerToken.credential.ts b/packages/components/credentials/HTTPBearerToken.credential.ts new file mode 100644 index 000000000..f258aeb66 --- /dev/null +++ b/packages/components/credentials/HTTPBearerToken.credential.ts @@ -0,0 +1,23 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class HTTPBearerTokenCredential implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'HTTP Bearer Token' + this.name = 'httpBearerToken' + this.version = 1.0 + this.inputs = [ + { + label: 'Token', + name: 'token', + type: 'password' + } + ] + } +} + +module.exports = { credClass: HTTPBearerTokenCredential } diff --git a/packages/components/credentials/JiraApi.credential.ts b/packages/components/credentials/JiraApi.credential.ts new file mode 100644 index 000000000..6638f2e0b --- /dev/null +++ b/packages/components/credentials/JiraApi.credential.ts @@ -0,0 +1,33 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class JiraApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Jira API' + this.name = 'jiraApi' + this.version = 1.0 + this.description = + 'Refer to official guide on how to get accessToken on Github' + this.inputs = [ + { + label: 'User Name', + name: 'username', + type: 'string', + placeholder: 'username@example.com' + }, + { + label: 'Access Token', + name: 'accessToken', + type: 'password', + placeholder: '' + } + ] + } +} + +module.exports = { credClass: JiraApi } diff --git a/packages/components/credentials/LitellmApi.credential.ts b/packages/components/credentials/LitellmApi.credential.ts new file mode 100644 index 000000000..6bf866f5c --- /dev/null +++ b/packages/components/credentials/LitellmApi.credential.ts @@ -0,0 +1,23 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class LitellmApi implements INodeCredential { + label: string + name: string + version: number + inputs: INodeParams[] + + constructor() { + this.label = 'Litellm API' + this.name = 'litellmApi' + this.version = 1.0 + this.inputs = [ + { + label: 'API Key', + name: 'litellmApiKey', + type: 'password' + } + ] + } +} + +module.exports = { credClass: LitellmApi } diff --git a/packages/components/credentials/Mem0MemoryApi.credential.ts b/packages/components/credentials/Mem0MemoryApi.credential.ts new file mode 100644 index 000000000..dcb3010d5 --- /dev/null +++ b/packages/components/credentials/Mem0MemoryApi.credential.ts @@ -0,0 +1,27 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class Mem0MemoryApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Mem0 Memory API' + this.name = 'mem0MemoryApi' + this.version = 1.0 + this.description = + 'Visit Mem0 Platform to get your API credentials' + this.inputs = [ + { + label: 'API Key', + name: 'apiKey', + type: 'password', + description: 'API Key from Mem0 dashboard' + } + ] + } +} + +module.exports = { credClass: Mem0MemoryApi } diff --git a/packages/components/credentials/NvdiaNIMApi.credential.ts b/packages/components/credentials/NvdiaNIMApi.credential.ts index 2a56a381a..4910032df 100644 --- a/packages/components/credentials/NvdiaNIMApi.credential.ts +++ b/packages/components/credentials/NvdiaNIMApi.credential.ts @@ -1,6 +1,6 @@ -import { INodeParams, INodeCredential } from '../src/Interface' +import { INodeCredential, INodeParams } from '../src/Interface' -class NvdiaNIMApi implements INodeCredential { +class NvidiaNIMApi implements INodeCredential { label: string name: string version: number @@ -8,17 +8,17 @@ class NvdiaNIMApi implements INodeCredential { inputs: INodeParams[] constructor() { - this.label = 'Nvdia NIM API Key' - this.name = 'nvdiaNIMApi' + this.label = 'NVIDIA NGC API Key' + this.name = 'nvidiaNIMApi' this.version = 1.0 this.inputs = [ { - label: 'Nvdia NIM API Key', - name: 'nvdiaNIMApiKey', + label: 'NVIDIA NGC API Key', + name: 'nvidiaNIMApiKey', type: 'password' } ] } } -module.exports = { credClass: NvdiaNIMApi } +module.exports = { credClass: NvidiaNIMApi } diff --git a/packages/components/credentials/OpikApi.credential.ts b/packages/components/credentials/OpikApi.credential.ts new file mode 100644 index 000000000..db5d66077 --- /dev/null +++ b/packages/components/credentials/OpikApi.credential.ts @@ -0,0 +1,39 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class OpikApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Opik API' + this.name = 'opikApi' + this.version = 1.0 + this.description = + 'Refer to Opik documentation on how to configure Opik credentials' + this.inputs = [ + { + label: 'API Key', + name: 'opikApiKey', + type: 'password', + placeholder: '' + }, + { + label: 'URL', + name: 'opikUrl', + type: 'string', + placeholder: 'https://www.comet.com/opik/api' + }, + { + label: 'Workspace', + name: 'opikWorkspace', + type: 'string', + placeholder: 'default' + } + ] + } +} + +module.exports = { credClass: OpikApi } diff --git a/packages/components/credentials/PerplexityApi.credential.ts b/packages/components/credentials/PerplexityApi.credential.ts new file mode 100644 index 000000000..faf5cfc47 --- /dev/null +++ b/packages/components/credentials/PerplexityApi.credential.ts @@ -0,0 +1,27 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class PerplexityApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Perplexity API' + this.name = 'perplexityApi' + this.version = 1.0 + this.description = + 'Refer to official guide on how to get API key' + this.inputs = [ + { + label: 'Perplexity API Key', + name: 'perplexityApiKey', + type: 'password', + placeholder: '' + } + ] + } +} + +module.exports = { credClass: PerplexityApi } diff --git a/packages/components/credentials/PostgresUrl.credential.ts b/packages/components/credentials/PostgresUrl.credential.ts new file mode 100644 index 000000000..fe3bf5045 --- /dev/null +++ b/packages/components/credentials/PostgresUrl.credential.ts @@ -0,0 +1,25 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class PostgresUrl implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Postgres URL' + this.name = 'PostgresUrl' + this.version = 1.0 + this.inputs = [ + { + label: 'Postgres URL', + name: 'postgresUrl', + type: 'string', + placeholder: 'postgresql://localhost/mydb' + } + ] + } +} + +module.exports = { credClass: PostgresUrl } diff --git a/packages/components/credentials/SlackApi.credential.ts b/packages/components/credentials/SlackApi.credential.ts new file mode 100644 index 000000000..d5f5f31a8 --- /dev/null +++ b/packages/components/credentials/SlackApi.credential.ts @@ -0,0 +1,32 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class SlackApi implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'Slack API' + this.name = 'slackApi' + this.version = 1.0 + this.description = + 'Refer to official guide on how to get botToken and teamId on Slack' + this.inputs = [ + { + label: 'Bot Token', + name: 'botToken', + type: 'password' + }, + { + label: 'Team ID', + name: 'teamId', + type: 'string', + placeholder: '' + } + ] + } +} + +module.exports = { credClass: SlackApi } diff --git a/packages/components/credentials/TavilyApi.credential.ts b/packages/components/credentials/TavilyApi.credential.ts index 161ff4df0..32e1380bf 100644 --- a/packages/components/credentials/TavilyApi.credential.ts +++ b/packages/components/credentials/TavilyApi.credential.ts @@ -10,8 +10,8 @@ class TavilyApi implements INodeCredential { constructor() { this.label = 'Tavily API' this.name = 'tavilyApi' - this.version = 1.0 - this.description = 'Tavily API is a real-time API to access Google search results' + this.version = 1.1 + this.description = 'Tavily API is a search engine designed for LLMs and AI agents' this.inputs = [ { label: 'Tavily Api Key', diff --git a/packages/components/models.json b/packages/components/models.json index 02ef6f2f1..aee73adb2 100644 --- a/packages/components/models.json +++ b/packages/components/models.json @@ -3,90 +3,135 @@ { "name": "awsChatBedrock", "models": [ + { + "label": "anthropic.claude-sonnet-4-20250514-v1:0", + "name": "anthropic.claude-sonnet-4-20250514-v1:0", + "description": "Claude 4 Sonnet", + "input_cost": 0.000003, + "output_cost": 0.000015 + }, + { + "label": "anthropic.claude-opus-4-20250514-v1:0", + "name": "anthropic.claude-opus-4-20250514-v1:0", + "description": "Claude 4 Opus", + "input_cost": 0.000015, + "output_cost": 0.000075 + }, { "label": "anthropic.claude-3-7-sonnet-20250219-v1:0", "name": "anthropic.claude-3-7-sonnet-20250219-v1:0", - "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7 - hybrid reasoning model" + "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3-5-haiku-20241022-v1:0", "name": "anthropic.claude-3-5-haiku-20241022-v1:0", - "description": "(20241022-v1:0) specific version of Claude Haiku 3.5 - fastest model" + "description": "(20241022-v1:0) specific version of Claude Haiku 3.5", + "input_cost": 8e-7, + "output_cost": 4e-6 }, { "label": "anthropic.claude-3.5-sonnet-20241022-v2:0", "name": "anthropic.claude-3-5-sonnet-20241022-v2:0", - "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3.5-sonnet-20240620-v1:0", - "name": "anthropic.claude-3-5-sonnet-20240620-v1:0", - "description": "(20240620-v1:0) specific version of Claude Sonnet 3.5 - most intelligent model" + "name": "anthropic.claude-3.5-sonnet-20240620-v1:0", + "description": "(20240620-v1:0) specific version of Claude Sonnet 3.5", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3-opus", "name": "anthropic.claude-3-opus-20240229-v1:0", - "description": "Powerful model for highly complex tasks, reasoning and analysis" + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "anthropic.claude-3-sonnet", "name": "anthropic.claude-3-sonnet-20240229-v1:0", - "description": "Balance of intelligence and speed" + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "anthropic.claude-3-haiku", "name": "anthropic.claude-3-haiku-20240307-v1:0", - "description": "Fastest and most compact model for near-instant responsiveness" + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 }, { "label": "anthropic.claude-instant-v1", "name": "anthropic.claude-instant-v1", - "description": "Text generation, conversation" + "description": "Text generation, conversation", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "anthropic.claude-v2:1", "name": "anthropic.claude-v2:1", - "description": "Text generation, conversation, complex reasoning and analysis" + "description": "Text generation, conversation, complex reasoning and analysis", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "anthropic.claude-v2", "name": "anthropic.claude-v2", - "description": "Text generation, conversation, complex reasoning and analysis" + "description": "Text generation, conversation, complex reasoning and analysis", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "meta.llama2-13b-chat-v1", "name": "meta.llama2-13b-chat-v1", - "description": "Text generation, conversation" + "description": "Text generation, conversation", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "meta.llama2-70b-chat-v1", "name": "meta.llama2-70b-chat-v1", - "description": "Text generation, conversation" + "description": "Text generation, conversation", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "meta.llama3-8b-instruct-v1:0", "name": "meta.llama3-8b-instruct-v1:0", - "description": "Text summarization, text classification, sentiment analysis" + "description": "Text summarization, text classification, sentiment analysis", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "meta.llama3-70b-instruct-v1:0", "name": "meta.llama3-70b-instruct-v1:0", - "description": "Language modeling, dialog systems, code generation, text summarization, text classification, sentiment analysis" + "description": "Language modeling, dialog systems, code generation, text summarization, text classification, sentiment analysis", + "input_cost": 0.00195, + "output_cost": 0.00256 }, { "label": "mistral.mistral-7b-instruct-v0:2", "name": "mistral.mistral-7b-instruct-v0:2", - "description": "Classification, text generation, code generation" + "description": "Classification, text generation, code generation", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral.mixtral-8x7b-instruct-v0:1", "name": "mistral.mixtral-8x7b-instruct-v0:1", - "description": "Complex reasoning and analysis, text generation, code generation" + "description": "Complex reasoning and analysis, text generation, code generation", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral.mistral-large-2402-v1:0", "name": "mistral.mistral-large-2402-v1:0", - "description": "Complex reasoning and analysis, text generation, code generation, RAG, agents" + "description": "Complex reasoning and analysis, text generation, code generation, RAG, agents", + "input_cost": 0.002, + "output_cost": 0.006 } ], "regions": [ @@ -235,49 +280,83 @@ { "name": "azureChatOpenAI", "models": [ + { + "label": "gpt-4.1", + "name": "gpt-4.1", + "input_cost": 2e-6, + "output_cost": 8e-6 + }, { "label": "o3-mini", - "name": "o3-mini" + "name": "o3-mini", + "input_cost": 1.1e-6, + "output_cost": 4.4e-6 }, { "label": "o1", - "name": "o1" + "name": "o1", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-preview", - "name": "o1-preview" + "name": "o1-preview", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-mini", - "name": "o1-mini" + "name": "o1-mini", + "input_cost": 3e-6, + "output_cost": 0.000012 }, { "label": "gpt-4o-mini", - "name": "gpt-4o-mini" + "name": "gpt-4o-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-35-turbo", - "name": "gpt-35-turbo" + "name": "gpt-35-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-35-turbo-16k", - "name": "gpt-35-turbo-16k" + "name": "gpt-35-turbo-16k", + "input_cost": 3e-6, + "output_cost": 4e-6 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 + }, + { + "label": "gpt-4.5-preview", + "name": "gpt-4.5-preview", + "input_cost": 0.000075, + "output_cost": 0.00015 } ] }, @@ -286,99 +365,118 @@ "models": [ { "label": "gpt-4o-mini", - "name": "gpt-4o-mini" + "name": "gpt-4o-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-turbo", - "name": "gpt-4-turbo" + "name": "gpt-4-turbo", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { - "label": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo" + "label": "gpt-35-turbo", + "name": "gpt-35-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { - "label": "gpt-3.5-turbo-16k", - "name": "gpt-3.5-turbo-16k" + "label": "gpt-35-turbo-16k", + "name": "gpt-35-turbo-16k", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-preview", - "name": "gpt-4-1106-preview" + "name": "gpt-4-1106-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 } ] }, { "name": "chatAnthropic", "models": [ + { + "label": "claude-sonnet-4-0", + "name": "claude-sonnet-4-0", + "description": "Claude 4 Sonnet", + "input_cost": 0.000003, + "output_cost": 0.000015 + }, + { + "label": "claude-opus-4-0", + "name": "claude-opus-4-0", + "description": "Claude 4 Opus", + "input_cost": 0.000015, + "output_cost": 0.000075 + }, { "label": "claude-3-7-sonnet-latest", "name": "claude-3-7-sonnet-latest", - "description": "Most recent snapshot version of Claude Sonnet 3.7 model - hybrid reasoning model" + "description": "Most recent snapshot version of Claude Sonnet 3.7", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-5-haiku-latest", "name": "claude-3-5-haiku-latest", - "description": "Most recent snapshot version of Claude Haiku 3.5 - fastest model" + "description": "Most recent snapshot version of Claude Haiku 3.5", + "input_cost": 8e-7, + "output_cost": 4e-6 }, { "label": "claude-3.5-sonnet-latest", "name": "claude-3-5-sonnet-latest", - "description": "Most recent snapshot version of Claude Sonnet 3.5 model - most intelligent model" - }, - { - "label": "claude-3.5-sonnet-20241022", - "name": "claude-3-5-sonnet-20241022", - "description": "(20241022) specific version of Claude Sonnet 3.5 - most intelligent model" - }, - { - "label": "claude-3.5-sonnet-20240620", - "name": "claude-3-5-sonnet-20240620", - "description": "(20240620) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "Most recent snapshot version of Claude Sonnet 3.5 model", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-opus", "name": "claude-3-opus-20240229", - "description": "Powerful model for highly complex tasks, reasoning and analysis" + "description": "Powerful model for highly complex tasks, reasoning and analysis", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-sonnet", "name": "claude-3-sonnet-20240229", - "description": "Ideal balance of intelligence and speed for enterprise workloads" + "description": "Ideal balance of intelligence and speed for enterprise workloads", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "claude-3-haiku", "name": "claude-3-haiku-20240307", - "description": "Fastest and most compact model, designed for near-instant responsiveness" - }, - { - "label": "claude-2.0 (legacy)", - "name": "claude-2.0", - "description": "Claude 2 latest major version, automatically get updates to the model as they are released" - }, - { - "label": "claude-2.1 (legacy)", - "name": "claude-2.1", - "description": "Claude 2 latest full version" - }, - { - "label": "claude-instant-1.2 (legacy)", - "name": "claude-instant-1.2", - "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + "description": "Fastest and most compact model, designed for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 } ] }, @@ -388,27 +486,37 @@ { "label": "claude-3-haiku", "name": "claude-3-haiku", - "description": "Fastest and most compact model, designed for near-instant responsiveness" + "description": "Fastest and most compact model, designed for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 }, { "label": "claude-3-opus", "name": "claude-3-opus", - "description": "Most powerful model for highly complex tasks" + "description": "Most powerful model for highly complex tasks", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-sonnet", "name": "claude-3-sonnet", - "description": "Ideal balance of intelligence and speed for enterprise workloads" + "description": "Ideal balance of intelligence and speed for enterprise workloads", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "claude-2.1 (legacy)", "name": "claude-2.1", - "description": "Claude 2 latest major version, automatically get updates to the model as they are released" + "description": "Claude 2 latest major version, automatically get updates to the model as they are released", + "input_cost": 0.000008, + "output_cost": 0.000024 }, { "label": "claude-instant-1.2 (legacy)", "name": "claude-instant-1.2", - "description": "Claude Instant latest major version, automatically get updates to the model as they are released" + "description": "Claude Instant latest major version, automatically get updates to the model as they are released", + "input_cost": 0.000008, + "output_cost": 0.000024 } ] }, @@ -416,24 +524,40 @@ "name": "chatGoogleGenerativeAI", "models": [ { - "label": "gemini-2.0-flash-001", - "name": "gemini-2.0-flash-001" + "label": "gemini-2.5-pro-preview-03-25", + "name": "gemini-2.5-pro-preview-03-25", + "input_cost": 1.25e-6, + "output_cost": 0.00001 }, { - "label": "gemini-2.0-flash-lite-001", - "name": "gemini-2.0-flash-lite-001" + "label": "gemini-2.0-flash", + "name": "gemini-2.0-flash", + "input_cost": 1e-7, + "output_cost": 4e-7 + }, + { + "label": "gemini-2.0-flash-lite", + "name": "gemini-2.0-flash-lite", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-flash", - "name": "gemini-1.5-flash" + "name": "gemini-1.5-flash", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-flash-8b", - "name": "gemini-1.5-flash-8b" + "name": "gemini-1.5-flash-8b", + "input_cost": 3.75e-8, + "output_cost": 1.5e-7 }, { "label": "gemini-1.5-pro", - "name": "gemini-1.5-pro" + "name": "gemini-1.5-pro", + "input_cost": 1.25e-6, + "output_cost": 5e-6 } ] }, @@ -442,7 +566,9 @@ "models": [ { "label": "qwen-plus", - "name": "qwen-plus" + "name": "qwen-plus", + "input_cost": 0.0016, + "output_cost": 0.0064 } ] }, @@ -451,63 +577,117 @@ "models": [ { "label": "gemini-1.5-flash-002", - "name": "gemini-1.5-flash-002" + "name": "gemini-1.5-flash-002", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-flash-001", - "name": "gemini-1.5-flash-001" + "name": "gemini-1.5-flash-001", + "input_cost": 7.5e-8, + "output_cost": 3e-7 }, { "label": "gemini-1.5-pro-002", - "name": "gemini-1.5-pro-002" + "name": "gemini-1.5-pro-002", + "input_cost": 1.25e-6, + "output_cost": 5e-6 }, { "label": "gemini-1.5-pro-001", - "name": "gemini-1.5-pro-001" + "name": "gemini-1.5-pro-001", + "input_cost": 1.25e-6, + "output_cost": 5e-6 }, { "label": "gemini-1.0-pro", - "name": "gemini-1.0-pro" + "name": "gemini-1.0-pro", + "input_cost": 1.25e-7, + "output_cost": 3.75e-7 }, { "label": "gemini-1.0-pro-vision", - "name": "gemini-1.0-pro-vision" + "name": "gemini-1.0-pro-vision", + "input_cost": 1.25e-7, + "output_cost": 3.75e-7 + }, + { + "label": "claude-sonnet-4@20250514", + "name": "claude-sonnet-4@20250514", + "description": "Claude 4 Sonnet", + "input_cost": 0.000003, + "output_cost": 0.000015 + }, + { + "label": "claude-opus-4@20250514", + "name": "claude-opus-4@20250514", + "description": "Claude 4 Opus", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-7-sonnet@20250219", "name": "claude-3-7-sonnet@20250219", - "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7 - hybrid reasoning model" + "description": "(20250219-v1:0) specific version of Claude Sonnet 3.7", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-5-haiku@20241022", "name": "claude-3-5-haiku@20241022", - "description": "(20241022-v1:0) specific version of Claude Haiku 3.5 - fastest model" + "description": "(20241022-v1:0) specific version of Claude Haiku 3.5", + "input_cost": 8e-7, + "output_cost": 4e-6 }, { "label": "claude-3-5-sonnet-v2@20241022", "name": "claude-3-5-sonnet-v2@20241022", - "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5 - most intelligent model" + "description": "(20241022-v2:0) specific version of Claude Sonnet 3.5", + "input_cost": 3e-6, + "output_cost": 0.000015 }, { "label": "claude-3-opus@20240229", "name": "claude-3-opus@20240229", - "description": "Powerful model for highly complex tasks, reasoning and analysis" + "description": "Powerful model for highly complex tasks, reasoning and analysis", + "input_cost": 0.000015, + "output_cost": 0.000075 }, { "label": "claude-3-sonnet@20240229", "name": "claude-3-sonnet@20240229", - "description": "Balance of intelligence and speed" + "description": "Balance of intelligence and speed", + "input_cost": 0.000003, + "output_cost": 0.000015 }, { "label": "claude-3-haiku@20240307", "name": "claude-3-haiku@20240307", - "description": "Fastest and most compact model for near-instant responsiveness" + "description": "Fastest and most compact model for near-instant responsiveness", + "input_cost": 2.5e-7, + "output_cost": 1.25e-6 } ] }, { "name": "groqChat", "models": [ + { + "label": "meta-llama/llama-4-maverick-17b-128e-instruct", + "name": "meta-llama/llama-4-maverick-17b-128e-instruct" + }, + { + "label": "meta-llama/llama-4-scout-17b-16e-instruct", + "name": "meta-llama/llama-4-scout-17b-16e-instruct" + }, + { + "label": "coumpound-beta", + "name": "compound-beta" + }, + { + "label": "compound-beta-mini", + "name": "compound-beta-mini" + }, { "label": "deepseek-r1-distill-llama-70b", "name": "deepseek-r1-distill-llama-70b" @@ -536,29 +716,13 @@ "label": "llama-3.2-90b-text-preview", "name": "llama-3.2-90b-text-preview" }, - { - "label": "llama-3.1-405b-reasoning", - "name": "llama-3.1-405b-reasoning" - }, - { - "label": "llama-3.1-70b-versatile", - "name": "llama-3.1-70b-versatile" - }, { "label": "llama-3.1-8b-instant", "name": "llama-3.1-8b-instant" }, { - "label": "llama3-groq-70b-8192-tool-use-preview", - "name": "llama3-groq-70b-8192-tool-use-preview" - }, - { - "label": "llama3-groq-8b-8192-tool-use-preview", - "name": "llama3-groq-8b-8192-tool-use-preview" - }, - { - "label": "gemma-7b-it", - "name": "gemma-7b-it" + "label": "gemma-2-9b-it", + "name": "gemma-2-9b-it" }, { "label": "llama3-70b-8192", @@ -569,8 +733,16 @@ "name": "llama3-8b-8192" }, { - "label": "mixtral-8x7b-32768", - "name": "mixtral-8x7b-32768" + "label": "mixtral-saba-24b", + "name": "mixtral-saba-24b" + }, + { + "label": "qwen-qwq-32b", + "name": "qwen-qwq-32b" + }, + { + "label": "allam-2-7b", + "name": "allam-2-7b" } ] }, @@ -578,12 +750,16 @@ "name": "chatCohere", "models": [ { - "label": "command-r", - "name": "command-r" + "label": "command-a", + "name": "command-a", + "input_cost": 0.0025, + "output_cost": 0.01 }, { "label": "command-r-plus", - "name": "command-r-plus" + "name": "command-r-plus", + "input_cost": 0.0025, + "output_cost": 0.01 } ] }, @@ -592,128 +768,218 @@ "models": [ { "label": "deepseek-chat", - "name": "deepseek-chat" + "name": "deepseek-chat", + "input_cost": 0.00027, + "output_cost": 0.0011 }, { "label": "deepseek-reasoner", - "name": "deepseek-reasoner" + "name": "deepseek-reasoner", + "input_cost": 0.00055, + "output_cost": 0.00219 } ] }, { "name": "chatOpenAI", "models": [ + { + "label": "gpt-4.1", + "name": "gpt-4.1", + "input_cost": 2e-6, + "output_cost": 8e-6 + }, + { + "label": "gpt-4.1-mini", + "name": "gpt-4.1-mini", + "input_cost": 4e-7, + "output_cost": 1.6e-6 + }, + { + "label": "gpt-4.1-nano", + "name": "gpt-4.1-nano", + "input_cost": 1e-7, + "output_cost": 4e-7 + }, + { + "label": "gpt-4.5-preview", + "name": "gpt-4.5-preview", + "input_cost": 0.000075, + "output_cost": 0.00015 + }, { "label": "gpt-4o-mini (latest)", - "name": "gpt-4o-mini" + "name": "gpt-4o-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o-mini-2024-07-18", - "name": "gpt-4o-mini-2024-07-18" + "name": "gpt-4o-mini-2024-07-18", + "input_cost": 1.5e-7, + "output_cost": 6e-7 }, { "label": "gpt-4o (latest)", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4o-2024-11-20", - "name": "gpt-4o-2024-11-20" + "name": "gpt-4o-2024-11-20", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4o-2024-08-06", - "name": "gpt-4o-2024-08-06" + "name": "gpt-4o-2024-08-06", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4o-2024-05-13", - "name": "gpt-4o-2024-05-13" + "name": "gpt-4o-2024-05-13", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "o3-mini (latest)", - "name": "o3-mini" + "name": "o3-mini", + "input_cost": 1.1e-6, + "output_cost": 4.4e-6 }, { "label": "o3-mini-2025-01-31", - "name": "o3-mini-2025-01-31" + "name": "o3-mini-2025-01-31", + "input_cost": 1.1e-6, + "output_cost": 4.4e-6 }, { "label": "o1-preview (latest)", - "name": "o1-preview" + "name": "o1-preview", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-preview-2024-09-12", - "name": "o1-preview-2024-09-12" + "name": "o1-preview-2024-09-12", + "input_cost": 0.000015, + "output_cost": 0.00006 }, { "label": "o1-mini (latest)", - "name": "o1-mini" + "name": "o1-mini", + "input_cost": 3e-6, + "output_cost": 0.000012 }, { "label": "o1-mini-2024-09-12", - "name": "o1-mini-2024-09-12" + "name": "o1-mini-2024-09-12", + "input_cost": 3e-6, + "output_cost": 0.000012 }, { "label": "gpt-4 (latest)", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-turbo (latest)", - "name": "gpt-4-turbo" + "name": "gpt-4-turbo", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-turbo-preview", - "name": "gpt-4-turbo-preview" + "name": "gpt-4-turbo-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0125-preview", - "name": "gpt-4-0125-preview" + "name": "gpt-4-0125-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-preview", - "name": "gpt-4-1106-preview" + "name": "gpt-4-1106-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-vision-preview", - "name": "gpt-4-1106-vision-preview" + "name": "gpt-4-1106-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0613", - "name": "gpt-4-0613" + "name": "gpt-4-0613", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-4-32k-0613", - "name": "gpt-4-32k-0613" + "name": "gpt-4-32k-0613", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo" + "name": "gpt-3.5-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-3.5-turbo-0125", - "name": "gpt-3.5-turbo-0125" + "name": "gpt-3.5-turbo-0125", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-3.5-turbo-1106", - "name": "gpt-3.5-turbo-1106" + "name": "gpt-3.5-turbo-1106", + "input_cost": 0.000001, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-0613", - "name": "gpt-3.5-turbo-0613" + "name": "gpt-3.5-turbo-0613", + "input_cost": 0.0000015, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-16k", - "name": "gpt-3.5-turbo-16k" + "name": "gpt-3.5-turbo-16k", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-3.5-turbo-16k-0613", - "name": "gpt-3.5-turbo-16k-0613" + "name": "gpt-3.5-turbo-16k-0613", + "input_cost": 0.000003, + "output_cost": 0.000004 + }, + { + "label": "o4-mini", + "name": "o4-mini", + "input_cost": 1.5e-7, + "output_cost": 6e-7 } ] }, @@ -722,63 +988,134 @@ "models": [ { "label": "gpt-4o", - "name": "gpt-4o" + "name": "gpt-4o", + "input_cost": 2.5e-6, + "output_cost": 0.00001 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-turbo", - "name": "gpt-4-turbo" + "name": "gpt-4-turbo", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-turbo-preview", - "name": "gpt-4-turbo-preview" + "name": "gpt-4-turbo-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0125-preview", - "name": "gpt-4-0125-preview" + "name": "gpt-4-0125-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-1106-preview", - "name": "gpt-4-1106-preview" + "name": "gpt-4-1106-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-vision-preview", - "name": "gpt-4-vision-preview" + "name": "gpt-4-vision-preview", + "input_cost": 0.00001, + "output_cost": 0.00003 }, { "label": "gpt-4-0613", - "name": "gpt-4-0613" + "name": "gpt-4-0613", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-4-32k-0613", - "name": "gpt-4-32k-0613" + "name": "gpt-4-32k-0613", + "input_cost": 0.00006, + "output_cost": 0.00012 }, { "label": "gpt-3.5-turbo", - "name": "gpt-3.5-turbo" + "name": "gpt-3.5-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-3.5-turbo-1106", - "name": "gpt-3.5-turbo-1106" + "name": "gpt-3.5-turbo-1106", + "input_cost": 0.000001, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-0613", - "name": "gpt-3.5-turbo-0613" + "name": "gpt-3.5-turbo-0613", + "input_cost": 0.0000015, + "output_cost": 0.000002 }, { "label": "gpt-3.5-turbo-16k", - "name": "gpt-3.5-turbo-16k" + "name": "gpt-3.5-turbo-16k", + "input_cost": 5e-7, + "output_cost": 0.0000015 }, { "label": "gpt-3.5-turbo-16k-0613", - "name": "gpt-3.5-turbo-16k-0613" + "name": "gpt-3.5-turbo-16k-0613", + "input_cost": 0.000003, + "output_cost": 0.000004 + } + ] + }, + { + "name": "chatPerplexity", + "models": [ + { + "label": "sonar", + "name": "sonar", + "input_cost": 1e-6, + "output_cost": 1e-6 + }, + { + "label": "sonar-pro", + "name": "sonar-pro", + "input_cost": 3e-6, + "output_cost": 1.5e-5 + }, + { + "label": "sonar-reasoning", + "name": "sonar-reasoning", + "input_cost": 1e-6, + "output_cost": 5e-6 + }, + { + "label": "sonar-reasoning-pro", + "name": "sonar-reasoning-pro", + "input_cost": 2e-6, + "output_cost": 8e-6 + }, + { + "label": "sonar-deep-research", + "name": "sonar", + "input_cost": 2e-6, + "output_cost": 8e-6 + }, + { + "label": "r1-1776", + "name": "r1-1776", + "input_cost": 2e-6, + "output_cost": 8e-6 } ] }, @@ -787,63 +1124,93 @@ "models": [ { "label": "open-mistral-nemo", - "name": "open-mistral-nemo" + "name": "open-mistral-nemo", + "input_cost": 0.00015, + "output_cost": 0.00015 }, { "label": "open-mistral-7b", - "name": "open-mistral-7b" + "name": "open-mistral-7b", + "input_cost": 0.00025, + "output_cost": 0.00025 }, { "label": "mistral-tiny-2312", - "name": "mistral-tiny-2312" + "name": "mistral-tiny-2312", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "mistral-tiny", - "name": "mistral-tiny" + "name": "mistral-tiny", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "open-mixtral-8x7b", - "name": "open-mixtral-8x7b" + "name": "open-mixtral-8x7b", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "open-mixtral-8x22b", - "name": "open-mixtral-8x22b" + "name": "open-mixtral-8x22b", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral-small-2312", - "name": "mistral-small-2312" + "name": "mistral-small-2312", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-small", - "name": "mistral-small" + "name": "mistral-small", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-small-2402", - "name": "mistral-small-2402" + "name": "mistral-small-2402", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-small-latest", - "name": "mistral-small-latest" + "name": "mistral-small-latest", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-medium-latest", - "name": "mistral-medium-latest" + "name": "mistral-medium-latest", + "input_cost": 0.001, + "output_cost": 0.003 }, { "label": "mistral-medium-2312", - "name": "mistral-medium-2312" + "name": "mistral-medium-2312", + "input_cost": 0.001, + "output_cost": 0.003 }, { "label": "mistral-medium", - "name": "mistral-medium" + "name": "mistral-medium", + "input_cost": 0.001, + "output_cost": 0.003 }, { "label": "mistral-large-latest", - "name": "mistral-large-latest" + "name": "mistral-large-latest", + "input_cost": 0.002, + "output_cost": 0.006 }, { "label": "mistral-large-2402", - "name": "mistral-large-2402" + "name": "mistral-large-2402", + "input_cost": 0.002, + "output_cost": 0.006 } ] }, @@ -852,15 +1219,21 @@ "models": [ { "label": "mistral-tiny", - "name": "mistral-tiny" + "name": "mistral-tiny", + "input_cost": 0.0007, + "output_cost": 0.0007 }, { "label": "mistral-small", - "name": "mistral-small" + "name": "mistral-small", + "input_cost": 0.0001, + "output_cost": 0.0003 }, { "label": "mistral-medium", - "name": "mistral-medium" + "name": "mistral-medium", + "input_cost": 0.001, + "output_cost": 0.003 } ] } @@ -879,27 +1252,39 @@ }, { "label": "cohere.command-text-v14", - "name": "cohere.command-text-v14" + "name": "cohere.command-text-v14", + "input_cost": 0.0015, + "output_cost": 0.002 }, { "label": "cohere.command-light-text-v14", - "name": "cohere.command-light-text-v14" + "name": "cohere.command-light-text-v14", + "input_cost": 0.0003, + "output_cost": 0.0006 }, { "label": "ai21.j2-grande-instruct", - "name": "ai21.j2-grande-instruct" + "name": "ai21.j2-grande-instruct", + "input_cost": 0.0005, + "output_cost": 0.0007 }, { "label": "ai21.j2-jumbo-instruct", - "name": "ai21.j2-jumbo-instruct" + "name": "ai21.j2-jumbo-instruct", + "input_cost": 0.0005, + "output_cost": 0.0007 }, { "label": "ai21.j2-mid", - "name": "ai21.j2-mid" + "name": "ai21.j2-mid", + "input_cost": 0.0125, + "output_cost": 0.0125 }, { "label": "ai21.j2-ultra", - "name": "ai21.j2-ultra" + "name": "ai21.j2-ultra", + "input_cost": 0.0188, + "output_cost": 0.0188 } ], "regions": [ @@ -1050,59 +1435,76 @@ "models": [ { "label": "text-davinci-003", - "name": "text-davinci-003" + "name": "text-davinci-003", + "total_cost": 0.00002 }, { "label": "ada", - "name": "ada" + "name": "ada", + "total_cost": 0.00004 }, { "label": "text-ada-001", - "name": "text-ada-001" + "name": "text-ada-001", + "total_cost": 0.00004 }, { "label": "babbage", - "name": "babbage" + "name": "babbage", + "total_cost": 0.00005 }, { "label": "text-babbage-001", - "name": "text-babbage-001" + "name": "text-babbage-001", + "total_cost": 0.00005 }, { "label": "curie", - "name": "curie" + "name": "curie", + "total_cost": 0.00002 }, { "label": "text-curie-001", - "name": "text-curie-001" + "name": "text-curie-001", + "total_cost": 0.00002 }, { "label": "davinci", - "name": "davinci" + "name": "davinci", + "total_cost": 0.00002 }, { "label": "text-davinci-001", - "name": "text-davinci-001" + "name": "text-davinci-001", + "total_cost": 0.00002 }, { "label": "text-davinci-002", - "name": "text-davinci-002" + "name": "text-davinci-002", + "total_cost": 0.00002 }, { "label": "text-davinci-fine-tune-002", - "name": "text-davinci-fine-tune-002" + "name": "text-davinci-fine-tune-002", + "total_cost": 0.00002 }, { "label": "gpt-35-turbo", - "name": "gpt-35-turbo" + "name": "gpt-35-turbo", + "input_cost": 1.5e-6, + "output_cost": 2e-6 }, { "label": "gpt-4", - "name": "gpt-4" + "name": "gpt-4", + "input_cost": 0.00003, + "output_cost": 0.00006 }, { "label": "gpt-4-32k", - "name": "gpt-4-32k" + "name": "gpt-4-32k", + "input_cost": 0.00006, + "output_cost": 0.00012 } ] }, @@ -1140,27 +1542,39 @@ "models": [ { "label": "text-bison", - "name": "text-bison" + "name": "text-bison", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-bison", - "name": "code-bison" + "name": "code-bison", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-gecko", - "name": "code-gecko" + "name": "code-gecko", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "text-bison-32k", - "name": "text-bison-32k" + "name": "text-bison-32k", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-bison-32k", - "name": "code-bison-32k" + "name": "code-bison-32k", + "input_cost": 2.5e-7, + "output_cost": 5e-7 }, { "label": "code-gecko-32k", - "name": "code-gecko-32k" + "name": "code-gecko-32k", + "input_cost": 2.5e-7, + "output_cost": 5e-7 } ] }, @@ -1169,15 +1583,21 @@ "models": [ { "label": "gpt-3.5-turbo-instruct", - "name": "gpt-3.5-turbo-instruct" + "name": "gpt-3.5-turbo-instruct", + "input_cost": 0.0000015, + "output_cost": 0.000002 }, { "label": "babbage-002", - "name": "babbage-002" + "name": "babbage-002", + "input_cost": 4e-7, + "output_cost": 0.0000016 }, { "label": "davinci-002", - "name": "davinci-002" + "name": "davinci-002", + "input_cost": 0.000006, + "output_cost": 0.000012 } ] } @@ -1320,6 +1740,10 @@ { "label": "text-embedding-004", "name": "text-embedding-004" + }, + { + "label": "gemini-embedding-exp-03-07", + "name": "gemini-embedding-exp-03-07" } ] }, diff --git a/packages/components/nodes/agentflow/Agent/Agent.ts b/packages/components/nodes/agentflow/Agent/Agent.ts new file mode 100644 index 000000000..849a2e3e5 --- /dev/null +++ b/packages/components/nodes/agentflow/Agent/Agent.ts @@ -0,0 +1,1780 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models' +import { + ICommonObject, + IDatabaseEntity, + IHumanInput, + INode, + INodeData, + INodeOptionsValue, + INodeParams, + IServerSideEventStreamer, + IUsedTool +} from '../../../src/Interface' +import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages' +import { AnalyticHandler } from '../../../src/handler' +import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt' +import { ILLMMessage } from '../Interface.Agentflow' +import { Tool } from '@langchain/core/tools' +import { ARTIFACTS_PREFIX, SOURCE_DOCUMENTS_PREFIX } from '../../../src/agents' +import { flatten } from 'lodash' +import zodToJsonSchema from 'zod-to-json-schema' +import { getErrorMessage } from '../../../src/error' +import { DataSource } from 'typeorm' +import { + getPastChatHistoryImageMessages, + getUniqueImageMessages, + processMessagesWithImages, + replaceBase64ImagesWithFileReferences, + updateFlowState +} from '../utils' + +interface ITool { + agentSelectedTool: string + agentSelectedToolConfig: ICommonObject + agentSelectedToolRequiresHumanInput: boolean +} + +interface IKnowledgeBase { + documentStore: string + docStoreDescription: string + returnSourceDocuments: boolean +} + +interface IKnowledgeBaseVSEmbeddings { + vectorStore: string + vectorStoreConfig: ICommonObject + embeddingModel: string + embeddingModelConfig: ICommonObject + knowledgeName: string + knowledgeDescription: string + returnSourceDocuments: boolean +} + +interface ISimpliefiedTool { + name: string + description: string + schema: any + toolNode: { + label: string + name: string + } +} + +class Agent_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Agent' + this.name = 'agentAgentflow' + this.version = 1.0 + this.type = 'Agent' + this.category = 'Agent Flows' + this.description = 'Dynamically choose and utilize tools during runtime, enabling multi-step reasoning' + this.color = '#4DD0E1' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Model', + name: 'agentModel', + type: 'asyncOptions', + loadMethod: 'listModels', + loadConfig: true + }, + { + label: 'Messages', + name: 'agentMessages', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Role', + name: 'role', + type: 'options', + options: [ + { + label: 'System', + name: 'system' + }, + { + label: 'Assistant', + name: 'assistant' + }, + { + label: 'Developer', + name: 'developer' + }, + { + label: 'User', + name: 'user' + } + ] + }, + { + label: 'Content', + name: 'content', + type: 'string', + acceptVariable: true, + generateInstruction: true, + rows: 4 + } + ] + }, + { + label: 'Tools', + name: 'agentTools', + type: 'array', + optional: true, + array: [ + { + label: 'Tool', + name: 'agentSelectedTool', + type: 'asyncOptions', + loadMethod: 'listTools', + loadConfig: true + }, + { + label: 'Require Human Input', + name: 'agentSelectedToolRequiresHumanInput', + type: 'boolean', + optional: true + } + ] + }, + { + label: 'Knowledge (Document Stores)', + name: 'agentKnowledgeDocumentStores', + type: 'array', + description: 'Give your agent context about different document sources. Document stores must be upserted in advance.', + array: [ + { + label: 'Document Store', + name: 'documentStore', + type: 'asyncOptions', + loadMethod: 'listStores' + }, + { + label: 'Describe Knowledge', + name: 'docStoreDescription', + type: 'string', + generateDocStoreDescription: true, + placeholder: + 'Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information', + rows: 4 + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ], + optional: true + }, + { + label: 'Knowledge (Vector Embeddings)', + name: 'agentKnowledgeVSEmbeddings', + type: 'array', + description: 'Give your agent context about different document sources from existing vector stores and embeddings', + array: [ + { + label: 'Vector Store', + name: 'vectorStore', + type: 'asyncOptions', + loadMethod: 'listVectorStores', + loadConfig: true + }, + { + label: 'Embedding Model', + name: 'embeddingModel', + type: 'asyncOptions', + loadMethod: 'listEmbeddings', + loadConfig: true + }, + { + label: 'Knowledge Name', + name: 'knowledgeName', + type: 'string', + placeholder: + 'A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information' + }, + { + label: 'Describe Knowledge', + name: 'knowledgeDescription', + type: 'string', + placeholder: + 'Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information', + rows: 4 + }, + { + label: 'Return Source Documents', + name: 'returnSourceDocuments', + type: 'boolean', + optional: true + } + ], + optional: true + }, + { + label: 'Enable Memory', + name: 'agentEnableMemory', + type: 'boolean', + description: 'Enable memory for the conversation thread', + default: true, + optional: true + }, + { + label: 'Memory Type', + name: 'agentMemoryType', + type: 'options', + options: [ + { + label: 'All Messages', + name: 'allMessages', + description: 'Retrieve all messages from the conversation' + }, + { + label: 'Window Size', + name: 'windowSize', + description: 'Uses a fixed window size to surface the last N messages' + }, + { + label: 'Conversation Summary', + name: 'conversationSummary', + description: 'Summarizes the whole conversation' + }, + { + label: 'Conversation Summary Buffer', + name: 'conversationSummaryBuffer', + description: 'Summarize conversations once token limit is reached. Default to 2000' + } + ], + optional: true, + default: 'allMessages', + show: { + agentEnableMemory: true + } + }, + { + label: 'Window Size', + name: 'agentMemoryWindowSize', + type: 'number', + default: '20', + description: 'Uses a fixed window size to surface the last N messages', + show: { + agentMemoryType: 'windowSize' + } + }, + { + label: 'Max Token Limit', + name: 'agentMemoryMaxTokenLimit', + type: 'number', + default: '2000', + description: 'Summarize conversations once token limit is reached. Default to 2000', + show: { + agentMemoryType: 'conversationSummaryBuffer' + } + }, + { + label: 'Input Message', + name: 'agentUserMessage', + type: 'string', + description: 'Add an input message as user message at the end of the conversation', + rows: 4, + optional: true, + acceptVariable: true, + show: { + agentEnableMemory: true + } + }, + { + label: 'Return Response As', + name: 'agentReturnResponseAs', + type: 'options', + options: [ + { + label: 'User Message', + name: 'userMessage' + }, + { + label: 'Assistant Message', + name: 'assistantMessage' + } + ], + default: 'userMessage' + }, + { + label: 'Update Flow State', + name: 'agentUpdateState', + description: 'Update runtime state during the execution of the workflow', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'asyncOptions', + loadMethod: 'listRuntimeStateKeys', + freeSolo: true + }, + { + label: 'Value', + name: 'value', + type: 'string', + acceptVariable: true, + acceptNodeOutputAsVariable: true + } + ] + } + ] + } + + //@ts-ignore + loadMethods = { + async listModels(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Chat Models') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + }, + async listEmbeddings(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Embeddings') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + }, + async listTools(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const removeTools = ['chainTool', 'retrieverTool', 'webBrowser'] + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Tools' || componentNode.category === 'Tools (MCP)') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + if (removeTools.includes(nodeName)) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + }, + async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow') + const state = startAgentflowNode?.inputs?.startState as ICommonObject[] + return state.map((item) => ({ label: item.key, name: item.key })) + }, + async listStores(_: INodeData, options: ICommonObject): Promise { + const returnData: INodeOptionsValue[] = [] + + const appDataSource = options.appDataSource as DataSource + const databaseEntities = options.databaseEntities as IDatabaseEntity + + if (appDataSource === undefined || !appDataSource) { + return returnData + } + + const stores = await appDataSource.getRepository(databaseEntities['DocumentStore']).find() + for (const store of stores) { + if (store.status === 'UPSERTED') { + const obj = { + name: `${store.id}:${store.name}`, + label: store.name, + description: store.description + } + returnData.push(obj) + } + } + return returnData + }, + async listVectorStores(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Vector Stores') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + } + } + + async run(nodeData: INodeData, input: string | Record, options: ICommonObject): Promise { + let llmIds: ICommonObject | undefined + let analyticHandlers = options.analyticHandlers as AnalyticHandler + + try { + const abortController = options.abortController as AbortController + + // Extract input parameters + const model = nodeData.inputs?.agentModel as string + const modelConfig = nodeData.inputs?.agentModelConfig as ICommonObject + if (!model) { + throw new Error('Model is required') + } + + // Extract tools + const tools = nodeData.inputs?.agentTools as ITool[] + + const toolsInstance: Tool[] = [] + for (const tool of tools) { + const toolConfig = tool.agentSelectedToolConfig + const nodeInstanceFilePath = options.componentNodes[tool.agentSelectedTool].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newToolNodeInstance = new nodeModule.nodeClass() + const newNodeData = { + ...nodeData, + credential: toolConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...toolConfig + } + } + const toolInstance = await newToolNodeInstance.init(newNodeData, '', options) + if (tool.agentSelectedToolRequiresHumanInput) { + toolInstance.requiresHumanInput = true + } + + // toolInstance might returns a list of tools like MCP tools + if (Array.isArray(toolInstance)) { + for (const subTool of toolInstance) { + const subToolInstance = subTool as Tool + ;(subToolInstance as any).agentSelectedTool = tool.agentSelectedTool + toolsInstance.push(subToolInstance) + } + } else { + toolsInstance.push(toolInstance as Tool) + } + } + + const availableTools: ISimpliefiedTool[] = toolsInstance.map((tool, index) => { + const originalTool = tools[index] + let agentSelectedTool = (tool as any)?.agentSelectedTool + if (!agentSelectedTool) { + agentSelectedTool = originalTool?.agentSelectedTool + } + const componentNode = options.componentNodes[agentSelectedTool] + + const jsonSchema = zodToJsonSchema(tool.schema) + if (jsonSchema.$schema) { + delete jsonSchema.$schema + } + + return { + name: tool.name, + description: tool.description, + schema: jsonSchema, + toolNode: { + label: componentNode?.label || tool.name, + name: componentNode?.name || tool.name + } + } + }) + + // Extract knowledge + const knowledgeBases = nodeData.inputs?.agentKnowledgeDocumentStores as IKnowledgeBase[] + if (knowledgeBases && knowledgeBases.length > 0) { + for (const knowledgeBase of knowledgeBases) { + const nodeInstanceFilePath = options.componentNodes['retrieverTool'].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newRetrieverToolNodeInstance = new nodeModule.nodeClass() + const [storeId, storeName] = knowledgeBase.documentStore.split(':') + + const docStoreVectorInstanceFilePath = options.componentNodes['documentStoreVS'].filePath as string + const docStoreVectorModule = await import(docStoreVectorInstanceFilePath) + const newDocStoreVectorInstance = new docStoreVectorModule.nodeClass() + const docStoreVectorInstance = await newDocStoreVectorInstance.init( + { + ...nodeData, + inputs: { + ...nodeData.inputs, + selectedStore: storeId + }, + outputs: { + output: 'retriever' + } + }, + '', + options + ) + + const newRetrieverToolNodeData = { + ...nodeData, + inputs: { + ...nodeData.inputs, + name: storeName + .toLowerCase() + .replace(/ /g, '_') + .replace(/[^a-z0-9_-]/g, ''), + description: knowledgeBase.docStoreDescription, + retriever: docStoreVectorInstance, + returnSourceDocuments: knowledgeBase.returnSourceDocuments + } + } + const retrieverToolInstance = await newRetrieverToolNodeInstance.init(newRetrieverToolNodeData, '', options) + + toolsInstance.push(retrieverToolInstance as Tool) + + const jsonSchema = zodToJsonSchema(retrieverToolInstance.schema) + if (jsonSchema.$schema) { + delete jsonSchema.$schema + } + const componentNode = options.componentNodes['retrieverTool'] + + availableTools.push({ + name: storeName + .toLowerCase() + .replace(/ /g, '_') + .replace(/[^a-z0-9_-]/g, ''), + description: knowledgeBase.docStoreDescription, + schema: jsonSchema, + toolNode: { + label: componentNode?.label || retrieverToolInstance.name, + name: componentNode?.name || retrieverToolInstance.name + } + }) + } + } + + const knowledgeBasesForVSEmbeddings = nodeData.inputs?.agentKnowledgeVSEmbeddings as IKnowledgeBaseVSEmbeddings[] + if (knowledgeBasesForVSEmbeddings && knowledgeBasesForVSEmbeddings.length > 0) { + for (const knowledgeBase of knowledgeBasesForVSEmbeddings) { + const nodeInstanceFilePath = options.componentNodes['retrieverTool'].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newRetrieverToolNodeInstance = new nodeModule.nodeClass() + + const selectedEmbeddingModel = knowledgeBase.embeddingModel + const selectedEmbeddingModelConfig = knowledgeBase.embeddingModelConfig + const embeddingInstanceFilePath = options.componentNodes[selectedEmbeddingModel].filePath as string + const embeddingModule = await import(embeddingInstanceFilePath) + const newEmbeddingInstance = new embeddingModule.nodeClass() + const newEmbeddingNodeData = { + ...nodeData, + credential: selectedEmbeddingModelConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...selectedEmbeddingModelConfig + } + } + const embeddingInstance = await newEmbeddingInstance.init(newEmbeddingNodeData, '', options) + + const selectedVectorStore = knowledgeBase.vectorStore + const selectedVectorStoreConfig = knowledgeBase.vectorStoreConfig + const vectorStoreInstanceFilePath = options.componentNodes[selectedVectorStore].filePath as string + const vectorStoreModule = await import(vectorStoreInstanceFilePath) + const newVectorStoreInstance = new vectorStoreModule.nodeClass() + const newVSNodeData = { + ...nodeData, + credential: selectedVectorStoreConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...selectedVectorStoreConfig, + embeddings: embeddingInstance + }, + outputs: { + output: 'retriever' + } + } + const vectorStoreInstance = await newVectorStoreInstance.init(newVSNodeData, '', options) + + const knowledgeName = knowledgeBase.knowledgeName || '' + + const newRetrieverToolNodeData = { + ...nodeData, + inputs: { + ...nodeData.inputs, + name: knowledgeName + .toLowerCase() + .replace(/ /g, '_') + .replace(/[^a-z0-9_-]/g, ''), + description: knowledgeBase.knowledgeDescription, + retriever: vectorStoreInstance, + returnSourceDocuments: knowledgeBase.returnSourceDocuments + } + } + const retrieverToolInstance = await newRetrieverToolNodeInstance.init(newRetrieverToolNodeData, '', options) + + toolsInstance.push(retrieverToolInstance as Tool) + + const jsonSchema = zodToJsonSchema(retrieverToolInstance.schema) + if (jsonSchema.$schema) { + delete jsonSchema.$schema + } + const componentNode = options.componentNodes['retrieverTool'] + + availableTools.push({ + name: knowledgeName + .toLowerCase() + .replace(/ /g, '_') + .replace(/[^a-z0-9_-]/g, ''), + description: knowledgeBase.knowledgeDescription, + schema: jsonSchema, + toolNode: { + label: componentNode?.label || retrieverToolInstance.name, + name: componentNode?.name || retrieverToolInstance.name + } + }) + } + } + + // Extract memory and configuration options + const enableMemory = nodeData.inputs?.agentEnableMemory as boolean + const memoryType = nodeData.inputs?.agentMemoryType as string + const userMessage = nodeData.inputs?.agentUserMessage as string + const _agentUpdateState = nodeData.inputs?.agentUpdateState + const agentMessages = (nodeData.inputs?.agentMessages as unknown as ILLMMessage[]) ?? [] + + // Extract runtime state and history + const state = options.agentflowRuntime?.state as ICommonObject + const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? [] + const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? [] + const chatId = options.chatId as string + + // Initialize the LLM model instance + const nodeInstanceFilePath = options.componentNodes[model].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newLLMNodeInstance = new nodeModule.nodeClass() + const newNodeData = { + ...nodeData, + credential: modelConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...modelConfig + } + } + + const llmWithoutToolsBind = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel + let llmNodeInstance = llmWithoutToolsBind + + if (llmNodeInstance && toolsInstance.length > 0) { + if (llmNodeInstance.bindTools === undefined) { + throw new Error(`Agent needs to have a function calling capable models.`) + } + + // @ts-ignore + llmNodeInstance = llmNodeInstance.bindTools(toolsInstance) + } + + // Prepare messages array + const messages: BaseMessageLike[] = [] + // Use to store messages with image file references as we do not want to store the base64 data into database + let runtimeImageMessagesWithFileRef: BaseMessageLike[] = [] + // Use to keep track of past messages with image file references + let pastImageMessagesWithFileRef: BaseMessageLike[] = [] + + for (const msg of agentMessages) { + const role = msg.role + const content = msg.content + if (role && content) { + messages.push({ role, content }) + } + } + + // Handle memory management if enabled + if (enableMemory) { + await this.handleMemory({ + messages, + memoryType, + pastChatHistory, + runtimeChatHistory, + llmNodeInstance, + nodeData, + userMessage, + input, + abortController, + options, + modelConfig, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + }) + } else if (!runtimeChatHistory.length) { + /* + * If this is the first node: + * - Add images to messages if exist + * - Add user message + */ + if (options.uploads) { + const imageContents = await getUniqueImageMessages(options, messages, modelConfig) + if (imageContents) { + const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents + messages.push(imageMessageWithBase64) + runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef) + } + } + + if (input && typeof input === 'string') { + messages.push({ + role: 'user', + content: input + }) + } + } + delete nodeData.inputs?.agentMessages + + // Initialize response and determine if streaming is possible + let response: AIMessageChunk = new AIMessageChunk('') + const isLastNode = options.isLastNode as boolean + const isStreamable = isLastNode && options.sseStreamer !== undefined && modelConfig?.streaming !== false + + // Start analytics + if (analyticHandlers && options.parentTraceIds) { + const llmLabel = options?.componentNodes?.[model]?.label || model + llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds) + } + + // Track execution time + const startTime = Date.now() + + // Get initial response from LLM + const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer + + // Handle tool calls with support for recursion + let usedTools: IUsedTool[] = [] + let sourceDocuments: Array = [] + let artifacts: any[] = [] + let additionalTokens = 0 + let isWaitingForHumanInput = false + + // Store the current messages length to track which messages are added during tool calls + const messagesBeforeToolCalls = [...messages] + let _toolCallMessages: BaseMessageLike[] = [] + + // Check if this is hummanInput for tool calls + const _humanInput = nodeData.inputs?.humanInput + const humanInput: IHumanInput = typeof _humanInput === 'string' ? JSON.parse(_humanInput) : _humanInput + const humanInputAction = options.humanInputAction + const iterationContext = options.iterationContext + + if (humanInput) { + if (humanInput.type !== 'proceed' && humanInput.type !== 'reject') { + throw new Error(`Invalid human input type. Expected 'proceed' or 'reject', but got '${humanInput.type}'`) + } + const result = await this.handleResumedToolCalls({ + humanInput, + humanInputAction, + messages, + toolsInstance, + sseStreamer, + chatId, + input, + options, + abortController, + llmWithoutToolsBind, + isStreamable, + isLastNode, + iterationContext + }) + + response = result.response + usedTools = result.usedTools + sourceDocuments = result.sourceDocuments + artifacts = result.artifacts + additionalTokens = result.totalTokens + isWaitingForHumanInput = result.isWaitingForHumanInput || false + + // Calculate which messages were added during tool calls + _toolCallMessages = messages.slice(messagesBeforeToolCalls.length) + + // Stream additional data if this is the last node + if (isLastNode && sseStreamer) { + if (usedTools.length > 0) { + sseStreamer.streamUsedToolsEvent(chatId, flatten(usedTools)) + } + + if (sourceDocuments.length > 0) { + sseStreamer.streamSourceDocumentsEvent(chatId, flatten(sourceDocuments)) + } + + if (artifacts.length > 0) { + sseStreamer.streamArtifactsEvent(chatId, flatten(artifacts)) + } + } + } else { + if (isStreamable) { + response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController) + } else { + response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal }) + } + } + + if (!humanInput && response.tool_calls && response.tool_calls.length > 0) { + const result = await this.handleToolCalls({ + response, + messages, + toolsInstance, + sseStreamer, + chatId, + input, + options, + abortController, + llmNodeInstance, + isStreamable, + isLastNode, + iterationContext + }) + + response = result.response + usedTools = result.usedTools + sourceDocuments = result.sourceDocuments + artifacts = result.artifacts + additionalTokens = result.totalTokens + isWaitingForHumanInput = result.isWaitingForHumanInput || false + + // Calculate which messages were added during tool calls + _toolCallMessages = messages.slice(messagesBeforeToolCalls.length) + + // Stream additional data if this is the last node + if (isLastNode && sseStreamer) { + if (usedTools.length > 0) { + sseStreamer.streamUsedToolsEvent(chatId, flatten(usedTools)) + } + + if (sourceDocuments.length > 0) { + sseStreamer.streamSourceDocumentsEvent(chatId, flatten(sourceDocuments)) + } + + if (artifacts.length > 0) { + sseStreamer.streamArtifactsEvent(chatId, flatten(artifacts)) + } + } + } else if (!humanInput && !isStreamable && isLastNode && sseStreamer) { + // Stream whole response back to UI if not streaming and no tool calls + let responseContent = JSON.stringify(response, null, 2) + if (typeof response.content === 'string') { + responseContent = response.content + } + sseStreamer.streamTokenEvent(chatId, responseContent) + } + + // Calculate execution time + const endTime = Date.now() + const timeDelta = endTime - startTime + + // Update flow state if needed + let newState = { ...state } + if (_agentUpdateState && Array.isArray(_agentUpdateState) && _agentUpdateState.length > 0) { + newState = updateFlowState(state, _agentUpdateState) + } + + // Clean up empty inputs + for (const key in nodeData.inputs) { + if (nodeData.inputs[key] === '') { + delete nodeData.inputs[key] + } + } + + // Prepare final response and output object + const finalResponse = (response.content as string) ?? JSON.stringify(response, null, 2) + const output = this.prepareOutputObject( + response, + availableTools, + finalResponse, + startTime, + endTime, + timeDelta, + usedTools, + sourceDocuments, + artifacts, + additionalTokens, + isWaitingForHumanInput + ) + + // End analytics tracking + if (analyticHandlers && llmIds) { + await analyticHandlers.onLLMEnd(llmIds, finalResponse) + } + + // Send additional streaming events if needed + if (isStreamable) { + this.sendStreamingEvents(options, chatId, response) + } + + // Process template variables in state + if (newState && Object.keys(newState).length > 0) { + for (const key in newState) { + if (newState[key].toString().includes('{{ output }}')) { + newState[key] = finalResponse + } + } + } + + // Replace the actual messages array with one that includes the file references for images instead of base64 data + const messagesWithFileReferences = replaceBase64ImagesWithFileReferences( + messages, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + ) + + // Only add to runtime chat history if this is the first node + const inputMessages = [] + if (!runtimeChatHistory.length) { + if (runtimeImageMessagesWithFileRef.length) { + inputMessages.push(...runtimeImageMessagesWithFileRef) + } + if (input && typeof input === 'string') { + inputMessages.push({ role: 'user', content: input }) + } + } + + const returnResponseAs = nodeData.inputs?.agentReturnResponseAs as string + let returnRole = 'user' + if (returnResponseAs === 'assistantMessage') { + returnRole = 'assistant' + } + + // Prepare and return the final output + return { + id: nodeData.id, + name: this.name, + input: { + messages: messagesWithFileReferences, + ...nodeData.inputs + }, + output, + state: newState, + chatHistory: [ + ...inputMessages, + + // Add the messages that were specifically added during tool calls, this enable other nodes to see the full tool call history, temporaraily disabled + // ...toolCallMessages, + + // End with the final assistant response + { + role: returnRole, + content: finalResponse, + name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id + } + ] + } + } catch (error) { + if (options.analyticHandlers && llmIds) { + await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error)) + } + + if (error instanceof Error && error.message === 'Aborted') { + throw error + } + throw new Error(`Error in Agent node: ${error instanceof Error ? error.message : String(error)}`) + } + } + + /** + * Handles memory management based on the specified memory type + */ + private async handleMemory({ + messages, + memoryType, + pastChatHistory, + runtimeChatHistory, + llmNodeInstance, + nodeData, + userMessage, + input, + abortController, + options, + modelConfig, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + }: { + messages: BaseMessageLike[] + memoryType: string + pastChatHistory: BaseMessageLike[] + runtimeChatHistory: BaseMessageLike[] + llmNodeInstance: BaseChatModel + nodeData: INodeData + userMessage: string + input: string | Record + abortController: AbortController + options: ICommonObject + modelConfig: ICommonObject + runtimeImageMessagesWithFileRef: BaseMessageLike[] + pastImageMessagesWithFileRef: BaseMessageLike[] + }): Promise { + const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options) + pastChatHistory = updatedPastMessages + pastImageMessagesWithFileRef.push(...transformedPastMessages) + + let pastMessages = [...pastChatHistory, ...runtimeChatHistory] + if (!runtimeChatHistory.length && input && typeof input === 'string') { + /* + * If this is the first node: + * - Add images to messages if exist + * - Add user message + */ + if (options.uploads) { + const imageContents = await getUniqueImageMessages(options, messages, modelConfig) + if (imageContents) { + const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents + pastMessages.push(imageMessageWithBase64) + runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef) + } + } + pastMessages.push({ + role: 'user', + content: input + }) + } + const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options) + pastMessages = updatedMessages + pastImageMessagesWithFileRef.push(...transformedMessages) + + if (pastMessages.length > 0) { + if (memoryType === 'windowSize') { + // Window memory: Keep the last N messages + const windowSize = nodeData.inputs?.agentMemoryWindowSize as number + const windowedMessages = pastMessages.slice(-windowSize * 2) + messages.push(...windowedMessages) + } else if (memoryType === 'conversationSummary') { + // Summary memory: Summarize all past messages + const summary = await llmNodeInstance.invoke( + [ + { + role: 'user', + content: DEFAULT_SUMMARIZER_TEMPLATE.replace( + '{conversation}', + pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + ) + } + ], + { signal: abortController?.signal } + ) + messages.push({ role: 'assistant', content: summary.content as string }) + } else if (memoryType === 'conversationSummaryBuffer') { + // Summary buffer: Summarize messages that exceed token limit + await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController) + } else { + // Default: Use all messages + messages.push(...pastMessages) + } + } + + // Add user message + if (userMessage) { + messages.push({ + role: 'user', + content: userMessage + }) + } + } + + /** + * Handles conversation summary buffer memory type + */ + private async handleSummaryBuffer( + messages: BaseMessageLike[], + pastMessages: BaseMessageLike[], + llmNodeInstance: BaseChatModel, + nodeData: INodeData, + abortController: AbortController + ): Promise { + const maxTokenLimit = (nodeData.inputs?.agentMemoryMaxTokenLimit as number) || 2000 + + // Convert past messages to a format suitable for token counting + const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + const tokenCount = await llmNodeInstance.getNumTokens(messagesString) + + if (tokenCount > maxTokenLimit) { + // Calculate how many messages to summarize (messages that exceed the token limit) + let currBufferLength = tokenCount + const messagesToSummarize = [] + const remainingMessages = [...pastMessages] + + // Remove messages from the beginning until we're under the token limit + while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) { + const poppedMessage = remainingMessages.shift() + if (poppedMessage) { + messagesToSummarize.push(poppedMessage) + // Recalculate token count for remaining messages + const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString) + } + } + + // Summarize the messages that were removed + const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + + const summary = await llmNodeInstance.invoke( + [ + { + role: 'user', + content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString) + } + ], + { signal: abortController?.signal } + ) + + // Add summary as a system message at the beginning, then add remaining messages + messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` }) + messages.push(...remainingMessages) + } else { + // If under token limit, use all messages + messages.push(...pastMessages) + } + } + + /** + * Handles streaming response from the LLM + */ + private async handleStreamingResponse( + sseStreamer: IServerSideEventStreamer | undefined, + llmNodeInstance: BaseChatModel, + messages: BaseMessageLike[], + chatId: string, + abortController: AbortController + ): Promise { + let response = new AIMessageChunk('') + + try { + for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) { + if (sseStreamer) { + let content = '' + if (Array.isArray(chunk.content) && chunk.content.length > 0) { + const contents = chunk.content as MessageContentText[] + content = contents.map((item) => item.text).join('') + } else { + content = chunk.content.toString() + } + sseStreamer.streamTokenEvent(chatId, content) + } + + response = response.concat(chunk) + } + } catch (error) { + console.error('Error during streaming:', error) + throw error + } + if (Array.isArray(response.content) && response.content.length > 0) { + const responseContents = response.content as MessageContentText[] + response.content = responseContents.map((item) => item.text).join('') + } + return response + } + + /** + * Prepares the output object with response and metadata + */ + private prepareOutputObject( + response: AIMessageChunk, + availableTools: ISimpliefiedTool[], + finalResponse: string, + startTime: number, + endTime: number, + timeDelta: number, + usedTools: IUsedTool[], + sourceDocuments: Array, + artifacts: any[], + additionalTokens: number = 0, + isWaitingForHumanInput: boolean = false + ): any { + const output: any = { + content: finalResponse, + timeMetadata: { + start: startTime, + end: endTime, + delta: timeDelta + } + } + + if (response.tool_calls) { + output.calledTools = response.tool_calls + } + + // Include token usage metadata with accumulated tokens from tool calls + if (response.usage_metadata) { + const originalTokens = response.usage_metadata.total_tokens || 0 + output.usageMetadata = { + ...response.usage_metadata, + total_tokens: originalTokens + additionalTokens, + tool_call_tokens: additionalTokens + } + } else if (additionalTokens > 0) { + // If no original usage metadata but we have tool tokens + output.usageMetadata = { + total_tokens: additionalTokens, + tool_call_tokens: additionalTokens + } + } + + // Add used tools, source documents and artifacts to output + if (usedTools && usedTools.length > 0) { + output.usedTools = flatten(usedTools) + } + + if (sourceDocuments && sourceDocuments.length > 0) { + output.sourceDocuments = flatten(sourceDocuments) + } + + if (artifacts && artifacts.length > 0) { + output.artifacts = flatten(artifacts) + } + + if (availableTools && availableTools.length > 0) { + output.availableTools = availableTools + } + + if (isWaitingForHumanInput) { + output.isWaitingForHumanInput = isWaitingForHumanInput + } + + return output + } + + /** + * Sends additional streaming events for tool calls and metadata + */ + private sendStreamingEvents(options: ICommonObject, chatId: string, response: AIMessageChunk): void { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer + + if (response.tool_calls) { + sseStreamer.streamCalledToolsEvent(chatId, response.tool_calls) + } + + if (response.usage_metadata) { + sseStreamer.streamUsageMetadataEvent(chatId, response.usage_metadata) + } + + sseStreamer.streamEndEvent(chatId) + } + + /** + * Handles tool calls and their responses, with support for recursive tool calling + */ + private async handleToolCalls({ + response, + messages, + toolsInstance, + sseStreamer, + chatId, + input, + options, + abortController, + llmNodeInstance, + isStreamable, + isLastNode, + iterationContext + }: { + response: AIMessageChunk + messages: BaseMessageLike[] + toolsInstance: Tool[] + sseStreamer: IServerSideEventStreamer | undefined + chatId: string + input: string | Record + options: ICommonObject + abortController: AbortController + llmNodeInstance: BaseChatModel + isStreamable: boolean + isLastNode: boolean + iterationContext: ICommonObject + }): Promise<{ + response: AIMessageChunk + usedTools: IUsedTool[] + sourceDocuments: Array + artifacts: any[] + totalTokens: number + isWaitingForHumanInput?: boolean + }> { + // Track total tokens used throughout this process + let totalTokens = response.usage_metadata?.total_tokens || 0 + + if (!response.tool_calls || response.tool_calls.length === 0) { + return { response, usedTools: [], sourceDocuments: [], artifacts: [], totalTokens } + } + + // Stream tool calls if available + if (sseStreamer) { + sseStreamer.streamCalledToolsEvent(chatId, JSON.stringify(response.tool_calls)) + } + + // Add LLM response with tool calls to messages + messages.push({ + id: response.id, + role: 'assistant', + content: response.content, + tool_calls: response.tool_calls, + usage_metadata: response.usage_metadata + }) + + const usedTools: IUsedTool[] = [] + let sourceDocuments: Array = [] + let artifacts: any[] = [] + + // Process each tool call + for (let i = 0; i < response.tool_calls.length; i++) { + const toolCall = response.tool_calls[i] + + const selectedTool = toolsInstance.find((tool) => tool.name === toolCall.name) + if (selectedTool) { + let parsedDocs + let parsedArtifacts + let isToolRequireHumanInput = + (selectedTool as any).requiresHumanInput && (!iterationContext || Object.keys(iterationContext).length === 0) + + const flowConfig = { + sessionId: options.sessionId, + chatId: options.chatId, + input: input, + state: options.agentflowRuntime?.state + } + + if (isToolRequireHumanInput) { + const toolCallDetails = '```json\n' + JSON.stringify(toolCall, null, 2) + '\n```' + const responseContent = response.content + `\nAttempting to use tool:\n${toolCallDetails}` + response.content = responseContent + sseStreamer?.streamTokenEvent(chatId, responseContent) + return { response, usedTools, sourceDocuments, artifacts, totalTokens, isWaitingForHumanInput: true } + } + + try { + //@ts-ignore + let toolOutput = await selectedTool.call(toolCall.args, { signal: abortController?.signal }, undefined, flowConfig) + + // Extract source documents if present + if (typeof toolOutput === 'string' && toolOutput.includes(SOURCE_DOCUMENTS_PREFIX)) { + const [output, docs] = toolOutput.split(SOURCE_DOCUMENTS_PREFIX) + toolOutput = output + try { + parsedDocs = JSON.parse(docs) + sourceDocuments.push(parsedDocs) + } catch (e) { + console.error('Error parsing source documents from tool:', e) + } + } + + // Extract artifacts if present + if (typeof toolOutput === 'string' && toolOutput.includes(ARTIFACTS_PREFIX)) { + const [output, artifact] = toolOutput.split(ARTIFACTS_PREFIX) + toolOutput = output + try { + parsedArtifacts = JSON.parse(artifact) + artifacts.push(parsedArtifacts) + } catch (e) { + console.error('Error parsing artifacts from tool:', e) + } + } + + // Add tool message to conversation + messages.push({ + role: 'tool', + content: toolOutput, + tool_call_id: toolCall.id, + name: toolCall.name, + additional_kwargs: { + artifacts: parsedArtifacts, + sourceDocuments: parsedDocs + } + }) + + // Track used tools + usedTools.push({ + tool: toolCall.name, + toolInput: toolCall.args, + toolOutput + }) + } catch (e) { + console.error('Error invoking tool:', e) + usedTools.push({ + tool: selectedTool.name, + toolInput: toolCall.args, + toolOutput: '', + error: getErrorMessage(e) + }) + sseStreamer?.streamUsedToolsEvent(chatId, flatten(usedTools)) + throw new Error(getErrorMessage(e)) + } + } + } + + // Return direct tool output if there's exactly one tool with returnDirect + if (response.tool_calls.length === 1) { + const selectedTool = toolsInstance.find((tool) => tool.name === response.tool_calls?.[0]?.name) + if (selectedTool && selectedTool.returnDirect) { + const lastToolOutput = usedTools[0]?.toolOutput || '' + const lastToolOutputString = typeof lastToolOutput === 'string' ? lastToolOutput : JSON.stringify(lastToolOutput, null, 2) + + if (sseStreamer) { + sseStreamer.streamTokenEvent(chatId, lastToolOutputString) + } + + return { + response: new AIMessageChunk(lastToolOutputString), + usedTools, + sourceDocuments, + artifacts, + totalTokens + } + } + } + + // Get LLM response after tool calls + let newResponse: AIMessageChunk + + if (isStreamable) { + newResponse = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController) + } else { + newResponse = await llmNodeInstance.invoke(messages, { signal: abortController?.signal }) + + // Stream non-streaming response if this is the last node + if (isLastNode && sseStreamer) { + let responseContent = JSON.stringify(newResponse, null, 2) + if (typeof newResponse.content === 'string') { + responseContent = newResponse.content + } + sseStreamer.streamTokenEvent(chatId, responseContent) + } + } + + // Add tokens from this response + if (newResponse.usage_metadata?.total_tokens) { + totalTokens += newResponse.usage_metadata.total_tokens + } + + // Check for recursive tool calls and handle them + if (newResponse.tool_calls && newResponse.tool_calls.length > 0) { + const { + response: recursiveResponse, + usedTools: recursiveUsedTools, + sourceDocuments: recursiveSourceDocuments, + artifacts: recursiveArtifacts, + totalTokens: recursiveTokens + } = await this.handleToolCalls({ + response: newResponse, + messages, + toolsInstance, + sseStreamer, + chatId, + input, + options, + abortController, + llmNodeInstance, + isStreamable, + isLastNode, + iterationContext + }) + + // Merge results from recursive tool calls + newResponse = recursiveResponse + usedTools.push(...recursiveUsedTools) + sourceDocuments = [...sourceDocuments, ...recursiveSourceDocuments] + artifacts = [...artifacts, ...recursiveArtifacts] + totalTokens += recursiveTokens + } + + return { response: newResponse, usedTools, sourceDocuments, artifacts, totalTokens } + } + + /** + * Handles tool calls and their responses, with support for recursive tool calling + */ + private async handleResumedToolCalls({ + humanInput, + humanInputAction, + messages, + toolsInstance, + sseStreamer, + chatId, + input, + options, + abortController, + llmWithoutToolsBind, + isStreamable, + isLastNode, + iterationContext + }: { + humanInput: IHumanInput + humanInputAction: Record | undefined + messages: BaseMessageLike[] + toolsInstance: Tool[] + sseStreamer: IServerSideEventStreamer | undefined + chatId: string + input: string | Record + options: ICommonObject + abortController: AbortController + llmWithoutToolsBind: BaseChatModel + isStreamable: boolean + isLastNode: boolean + iterationContext: ICommonObject + }): Promise<{ + response: AIMessageChunk + usedTools: IUsedTool[] + sourceDocuments: Array + artifacts: any[] + totalTokens: number + isWaitingForHumanInput?: boolean + }> { + let llmNodeInstance = llmWithoutToolsBind + + const lastCheckpointMessages = humanInputAction?.data?.input?.messages ?? [] + if (!lastCheckpointMessages.length) { + return { response: new AIMessageChunk(''), usedTools: [], sourceDocuments: [], artifacts: [], totalTokens: 0 } + } + + // Use the last message as the response + const response = lastCheckpointMessages[lastCheckpointMessages.length - 1] as AIMessageChunk + + // Replace messages array + messages.length = 0 + messages.push(...lastCheckpointMessages.slice(0, lastCheckpointMessages.length - 1)) + + // Track total tokens used throughout this process + let totalTokens = response.usage_metadata?.total_tokens || 0 + + if (!response.tool_calls || response.tool_calls.length === 0) { + return { response, usedTools: [], sourceDocuments: [], artifacts: [], totalTokens } + } + + // Stream tool calls if available + if (sseStreamer) { + sseStreamer.streamCalledToolsEvent(chatId, JSON.stringify(response.tool_calls)) + } + + // Add LLM response with tool calls to messages + messages.push({ + id: response.id, + role: 'assistant', + content: response.content, + tool_calls: response.tool_calls, + usage_metadata: response.usage_metadata + }) + + const usedTools: IUsedTool[] = [] + let sourceDocuments: Array = [] + let artifacts: any[] = [] + let isWaitingForHumanInput: boolean | undefined + + // Process each tool call + for (let i = 0; i < response.tool_calls.length; i++) { + const toolCall = response.tool_calls[i] + + const selectedTool = toolsInstance.find((tool) => tool.name === toolCall.name) + if (selectedTool) { + let parsedDocs + let parsedArtifacts + + const flowConfig = { + sessionId: options.sessionId, + chatId: options.chatId, + input: input, + state: options.agentflowRuntime?.state + } + + if (humanInput.type === 'reject') { + messages.pop() + toolsInstance = toolsInstance.filter((tool) => tool.name !== toolCall.name) + } + if (humanInput.type === 'proceed') { + try { + //@ts-ignore + let toolOutput = await selectedTool.call(toolCall.args, { signal: abortController?.signal }, undefined, flowConfig) + + // Extract source documents if present + if (typeof toolOutput === 'string' && toolOutput.includes(SOURCE_DOCUMENTS_PREFIX)) { + const [output, docs] = toolOutput.split(SOURCE_DOCUMENTS_PREFIX) + toolOutput = output + try { + parsedDocs = JSON.parse(docs) + sourceDocuments.push(parsedDocs) + } catch (e) { + console.error('Error parsing source documents from tool:', e) + } + } + + // Extract artifacts if present + if (typeof toolOutput === 'string' && toolOutput.includes(ARTIFACTS_PREFIX)) { + const [output, artifact] = toolOutput.split(ARTIFACTS_PREFIX) + toolOutput = output + try { + parsedArtifacts = JSON.parse(artifact) + artifacts.push(parsedArtifacts) + } catch (e) { + console.error('Error parsing artifacts from tool:', e) + } + } + + // Add tool message to conversation + messages.push({ + role: 'tool', + content: toolOutput, + tool_call_id: toolCall.id, + name: toolCall.name, + additional_kwargs: { + artifacts: parsedArtifacts, + sourceDocuments: parsedDocs + } + }) + + // Track used tools + usedTools.push({ + tool: toolCall.name, + toolInput: toolCall.args, + toolOutput + }) + } catch (e) { + console.error('Error invoking tool:', e) + usedTools.push({ + tool: selectedTool.name, + toolInput: toolCall.args, + toolOutput: '', + error: getErrorMessage(e) + }) + sseStreamer?.streamUsedToolsEvent(chatId, flatten(usedTools)) + throw new Error(getErrorMessage(e)) + } + } + } + } + + // Return direct tool output if there's exactly one tool with returnDirect + if (response.tool_calls.length === 1) { + const selectedTool = toolsInstance.find((tool) => tool.name === response.tool_calls?.[0]?.name) + if (selectedTool && selectedTool.returnDirect) { + const lastToolOutput = usedTools[0]?.toolOutput || '' + const lastToolOutputString = typeof lastToolOutput === 'string' ? lastToolOutput : JSON.stringify(lastToolOutput, null, 2) + + if (sseStreamer) { + sseStreamer.streamTokenEvent(chatId, lastToolOutputString) + } + + return { + response: new AIMessageChunk(lastToolOutputString), + usedTools, + sourceDocuments, + artifacts, + totalTokens + } + } + } + + // Get LLM response after tool calls + let newResponse: AIMessageChunk + + if (llmNodeInstance && toolsInstance.length > 0) { + if (llmNodeInstance.bindTools === undefined) { + throw new Error(`Agent needs to have a function calling capable models.`) + } + + // @ts-ignore + llmNodeInstance = llmNodeInstance.bindTools(toolsInstance) + } + + if (isStreamable) { + newResponse = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController) + } else { + newResponse = await llmNodeInstance.invoke(messages, { signal: abortController?.signal }) + + // Stream non-streaming response if this is the last node + if (isLastNode && sseStreamer) { + let responseContent = JSON.stringify(newResponse, null, 2) + if (typeof newResponse.content === 'string') { + responseContent = newResponse.content + } + sseStreamer.streamTokenEvent(chatId, responseContent) + } + } + + // Add tokens from this response + if (newResponse.usage_metadata?.total_tokens) { + totalTokens += newResponse.usage_metadata.total_tokens + } + + // Check for recursive tool calls and handle them + if (newResponse.tool_calls && newResponse.tool_calls.length > 0) { + const { + response: recursiveResponse, + usedTools: recursiveUsedTools, + sourceDocuments: recursiveSourceDocuments, + artifacts: recursiveArtifacts, + totalTokens: recursiveTokens, + isWaitingForHumanInput: recursiveIsWaitingForHumanInput + } = await this.handleToolCalls({ + response: newResponse, + messages, + toolsInstance, + sseStreamer, + chatId, + input, + options, + abortController, + llmNodeInstance, + isStreamable, + isLastNode, + iterationContext + }) + + // Merge results from recursive tool calls + newResponse = recursiveResponse + usedTools.push(...recursiveUsedTools) + sourceDocuments = [...sourceDocuments, ...recursiveSourceDocuments] + artifacts = [...artifacts, ...recursiveArtifacts] + totalTokens += recursiveTokens + isWaitingForHumanInput = recursiveIsWaitingForHumanInput + } + + return { response: newResponse, usedTools, sourceDocuments, artifacts, totalTokens, isWaitingForHumanInput } + } +} + +module.exports = { nodeClass: Agent_Agentflow } diff --git a/packages/components/nodes/agentflow/Condition/Condition.ts b/packages/components/nodes/agentflow/Condition/Condition.ts new file mode 100644 index 000000000..af2fa0411 --- /dev/null +++ b/packages/components/nodes/agentflow/Condition/Condition.ts @@ -0,0 +1,350 @@ +import { CommonType, ICommonObject, ICondition, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' + +class Condition_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Condition' + this.name = 'conditionAgentflow' + this.version = 1.0 + this.type = 'Condition' + this.category = 'Agent Flows' + this.description = `Split flows based on If Else conditions` + this.baseClasses = [this.type] + this.color = '#FFB938' + this.inputs = [ + { + label: 'Conditions', + name: 'conditions', + type: 'array', + description: 'Values to compare', + acceptVariable: true, + default: [ + { + type: 'string', + value1: '', + operation: 'equal', + value2: '' + } + ], + array: [ + { + label: 'Type', + name: 'type', + type: 'options', + options: [ + { + label: 'String', + name: 'string' + }, + { + label: 'Number', + name: 'number' + }, + { + label: 'Boolean', + name: 'boolean' + } + ], + default: 'string' + }, + /////////////////////////////////////// STRING //////////////////////////////////////// + { + label: 'Value 1', + name: 'value1', + type: 'string', + default: '', + description: 'First value to be compared with', + acceptVariable: true, + show: { + 'conditions[$index].type': 'string' + } + }, + { + label: 'Operation', + name: 'operation', + type: 'options', + options: [ + { + label: 'Contains', + name: 'contains' + }, + { + label: 'Ends With', + name: 'endsWith' + }, + { + label: 'Equal', + name: 'equal' + }, + { + label: 'Not Contains', + name: 'notContains' + }, + { + label: 'Not Equal', + name: 'notEqual' + }, + { + label: 'Regex', + name: 'regex' + }, + { + label: 'Starts With', + name: 'startsWith' + }, + { + label: 'Is Empty', + name: 'isEmpty' + }, + { + label: 'Not Empty', + name: 'notEmpty' + } + ], + default: 'equal', + description: 'Type of operation', + show: { + 'conditions[$index].type': 'string' + } + }, + { + label: 'Value 2', + name: 'value2', + type: 'string', + default: '', + description: 'Second value to be compared with', + acceptVariable: true, + show: { + 'conditions[$index].type': 'string' + }, + hide: { + 'conditions[$index].operation': ['isEmpty', 'notEmpty'] + } + }, + /////////////////////////////////////// NUMBER //////////////////////////////////////// + { + label: 'Value 1', + name: 'value1', + type: 'number', + default: '', + description: 'First value to be compared with', + acceptVariable: true, + show: { + 'conditions[$index].type': 'number' + } + }, + { + label: 'Operation', + name: 'operation', + type: 'options', + options: [ + { + label: 'Smaller', + name: 'smaller' + }, + { + label: 'Smaller Equal', + name: 'smallerEqual' + }, + { + label: 'Equal', + name: 'equal' + }, + { + label: 'Not Equal', + name: 'notEqual' + }, + { + label: 'Larger', + name: 'larger' + }, + { + label: 'Larger Equal', + name: 'largerEqual' + }, + { + label: 'Is Empty', + name: 'isEmpty' + }, + { + label: 'Not Empty', + name: 'notEmpty' + } + ], + default: 'equal', + description: 'Type of operation', + show: { + 'conditions[$index].type': 'number' + } + }, + { + label: 'Value 2', + name: 'value2', + type: 'number', + default: 0, + description: 'Second value to be compared with', + acceptVariable: true, + show: { + 'conditions[$index].type': 'number' + } + }, + /////////////////////////////////////// BOOLEAN //////////////////////////////////////// + { + label: 'Value 1', + name: 'value1', + type: 'boolean', + default: false, + description: 'First value to be compared with', + show: { + 'conditions[$index].type': 'boolean' + } + }, + { + label: 'Operation', + name: 'operation', + type: 'options', + options: [ + { + label: 'Equal', + name: 'equal' + }, + { + label: 'Not Equal', + name: 'notEqual' + } + ], + default: 'equal', + description: 'Type of operation', + show: { + 'conditions[$index].type': 'boolean' + } + }, + { + label: 'Value 2', + name: 'value2', + type: 'boolean', + default: false, + description: 'Second value to be compared with', + show: { + 'conditions[$index].type': 'boolean' + } + } + ] + } + ] + this.outputs = [ + { + label: '0', + name: '0', + description: 'Condition 0' + }, + { + label: '1', + name: '1', + description: 'Else' + } + ] + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const state = options.agentflowRuntime?.state as ICommonObject + + const compareOperationFunctions: { + [key: string]: (value1: CommonType, value2: CommonType) => boolean + } = { + contains: (value1: CommonType, value2: CommonType) => (value1 || '').toString().includes((value2 || '').toString()), + notContains: (value1: CommonType, value2: CommonType) => !(value1 || '').toString().includes((value2 || '').toString()), + endsWith: (value1: CommonType, value2: CommonType) => (value1 as string).endsWith(value2 as string), + equal: (value1: CommonType, value2: CommonType) => value1 === value2, + notEqual: (value1: CommonType, value2: CommonType) => value1 !== value2, + larger: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) > (Number(value2) || 0), + largerEqual: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) >= (Number(value2) || 0), + smaller: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) < (Number(value2) || 0), + smallerEqual: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) <= (Number(value2) || 0), + startsWith: (value1: CommonType, value2: CommonType) => (value1 as string).startsWith(value2 as string), + isEmpty: (value1: CommonType) => [undefined, null, ''].includes(value1 as string), + notEmpty: (value1: CommonType) => ![undefined, null, ''].includes(value1 as string) + } + + const _conditions = nodeData.inputs?.conditions + const conditions: ICondition[] = typeof _conditions === 'string' ? JSON.parse(_conditions) : _conditions + const initialConditions = { ...conditions } + + for (const condition of conditions) { + const _value1 = condition.value1 + const _value2 = condition.value2 + const operation = condition.operation + + let value1: CommonType + let value2: CommonType + + switch (condition.type) { + case 'boolean': + value1 = _value1 + value2 = _value2 + break + case 'number': + value1 = parseFloat(_value1 as string) || 0 + value2 = parseFloat(_value2 as string) || 0 + break + default: // string + value1 = _value1 as string + value2 = _value2 as string + } + + const compareOperationResult = compareOperationFunctions[operation](value1, value2) + if (compareOperationResult) { + // find the matching condition + const conditionIndex = conditions.findIndex((c) => JSON.stringify(c) === JSON.stringify(condition)) + // add isFulfilled to the condition + if (conditionIndex > -1) { + conditions[conditionIndex] = { ...condition, isFulfilled: true } + } + break + } + } + + // If no condition is fullfilled, add isFulfilled to the ELSE condition + const dummyElseConditionData = { + type: 'string', + value1: '', + operation: 'equal', + value2: '' + } + if (!conditions.some((c) => c.isFulfilled)) { + conditions.push({ + ...dummyElseConditionData, + isFulfilled: true + }) + } else { + conditions.push({ + ...dummyElseConditionData, + isFulfilled: false + }) + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { conditions: initialConditions }, + output: { conditions }, + state + } + + return returnOutput + } +} + +module.exports = { nodeClass: Condition_Agentflow } diff --git a/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts b/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts new file mode 100644 index 000000000..6ec809f96 --- /dev/null +++ b/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts @@ -0,0 +1,600 @@ +import { AnalyticHandler } from '../../../src/handler' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeOutputsValue, INodeParams } from '../../../src/Interface' +import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages' +import { + getPastChatHistoryImageMessages, + getUniqueImageMessages, + processMessagesWithImages, + replaceBase64ImagesWithFileReferences +} from '../utils' +import { CONDITION_AGENT_SYSTEM_PROMPT, DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt' +import { BaseChatModel } from '@langchain/core/language_models/chat_models' + +class ConditionAgent_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Condition Agent' + this.name = 'conditionAgentAgentflow' + this.version = 1.0 + this.type = 'ConditionAgent' + this.category = 'Agent Flows' + this.description = `Utilize an agent to split flows based on dynamic conditions` + this.baseClasses = [this.type] + this.color = '#ff8fab' + this.inputs = [ + { + label: 'Model', + name: 'conditionAgentModel', + type: 'asyncOptions', + loadMethod: 'listModels', + loadConfig: true + }, + { + label: 'Instructions', + name: 'conditionAgentInstructions', + type: 'string', + description: 'A general instructions of what the condition agent should do', + rows: 4, + acceptVariable: true, + placeholder: 'Determine if the user is interested in learning about AI' + }, + { + label: 'Input', + name: 'conditionAgentInput', + type: 'string', + description: 'Input to be used for the condition agent', + rows: 4, + acceptVariable: true, + default: '

{{ question }}

' + }, + { + label: 'Scenarios', + name: 'conditionAgentScenarios', + description: 'Define the scenarios that will be used as the conditions to split the flow', + type: 'array', + array: [ + { + label: 'Scenario', + name: 'scenario', + type: 'string', + placeholder: 'User is asking for a pizza' + } + ], + default: [ + { + scenario: '' + }, + { + scenario: '' + } + ] + } + /*{ + label: 'Enable Memory', + name: 'conditionAgentEnableMemory', + type: 'boolean', + description: 'Enable memory for the conversation thread', + default: true, + optional: true + }, + { + label: 'Memory Type', + name: 'conditionAgentMemoryType', + type: 'options', + options: [ + { + label: 'All Messages', + name: 'allMessages', + description: 'Retrieve all messages from the conversation' + }, + { + label: 'Window Size', + name: 'windowSize', + description: 'Uses a fixed window size to surface the last N messages' + }, + { + label: 'Conversation Summary', + name: 'conversationSummary', + description: 'Summarizes the whole conversation' + }, + { + label: 'Conversation Summary Buffer', + name: 'conversationSummaryBuffer', + description: 'Summarize conversations once token limit is reached. Default to 2000' + } + ], + optional: true, + default: 'allMessages', + show: { + conditionAgentEnableMemory: true + } + }, + { + label: 'Window Size', + name: 'conditionAgentMemoryWindowSize', + type: 'number', + default: '20', + description: 'Uses a fixed window size to surface the last N messages', + show: { + conditionAgentMemoryType: 'windowSize' + } + }, + { + label: 'Max Token Limit', + name: 'conditionAgentMemoryMaxTokenLimit', + type: 'number', + default: '2000', + description: 'Summarize conversations once token limit is reached. Default to 2000', + show: { + conditionAgentMemoryType: 'conversationSummaryBuffer' + } + }*/ + ] + this.outputs = [ + { + label: '0', + name: '0', + description: 'Condition 0' + }, + { + label: '1', + name: '1', + description: 'Else' + } + ] + } + + //@ts-ignore + loadMethods = { + async listModels(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Chat Models') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + } + } + + private parseJsonMarkdown(jsonString: string): any { + // Strip whitespace + jsonString = jsonString.trim() + const starts = ['```json', '```', '``', '`', '{'] + const ends = ['```', '``', '`', '}'] + + let startIndex = -1 + let endIndex = -1 + + // Find start of JSON + for (const s of starts) { + startIndex = jsonString.indexOf(s) + if (startIndex !== -1) { + if (jsonString[startIndex] !== '{') { + startIndex += s.length + } + break + } + } + + // Find end of JSON + if (startIndex !== -1) { + for (const e of ends) { + endIndex = jsonString.lastIndexOf(e, jsonString.length) + if (endIndex !== -1) { + if (jsonString[endIndex] === '}') { + endIndex += 1 + } + break + } + } + } + + if (startIndex !== -1 && endIndex !== -1 && startIndex < endIndex) { + const extractedContent = jsonString.slice(startIndex, endIndex).trim() + try { + return JSON.parse(extractedContent) + } catch (error) { + throw new Error(`Invalid JSON object. Error: ${error}`) + } + } + + throw new Error('Could not find JSON block in the output.') + } + + async run(nodeData: INodeData, question: string, options: ICommonObject): Promise { + let llmIds: ICommonObject | undefined + let analyticHandlers = options.analyticHandlers as AnalyticHandler + + try { + const abortController = options.abortController as AbortController + + // Extract input parameters + const model = nodeData.inputs?.conditionAgentModel as string + const modelConfig = nodeData.inputs?.conditionAgentModelConfig as ICommonObject + if (!model) { + throw new Error('Model is required') + } + const conditionAgentInput = nodeData.inputs?.conditionAgentInput as string + let input = conditionAgentInput || question + const conditionAgentInstructions = nodeData.inputs?.conditionAgentInstructions as string + + // Extract memory and configuration options + const enableMemory = nodeData.inputs?.conditionAgentEnableMemory as boolean + const memoryType = nodeData.inputs?.conditionAgentMemoryType as string + const _conditionAgentScenarios = nodeData.inputs?.conditionAgentScenarios as { scenario: string }[] + + // Extract runtime state and history + const state = options.agentflowRuntime?.state as ICommonObject + const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? [] + const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? [] + + // Initialize the LLM model instance + const nodeInstanceFilePath = options.componentNodes[model].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newLLMNodeInstance = new nodeModule.nodeClass() + const newNodeData = { + ...nodeData, + credential: modelConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...modelConfig + } + } + let llmNodeInstance = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel + + const isStructuredOutput = + _conditionAgentScenarios && Array.isArray(_conditionAgentScenarios) && _conditionAgentScenarios.length > 0 + if (!isStructuredOutput) { + throw new Error('Scenarios are required') + } + + // Prepare messages array + const messages: BaseMessageLike[] = [ + { + role: 'system', + content: CONDITION_AGENT_SYSTEM_PROMPT + }, + { + role: 'user', + content: `{"input": "Hello", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"}` + }, + { + role: 'assistant', + content: `\`\`\`json\n{"output": "default"}\n\`\`\`` + }, + { + role: 'user', + content: `{"input": "What is AIGC?", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"}` + }, + { + role: 'assistant', + content: `\`\`\`json\n{"output": "user is asking about AI"}\n\`\`\`` + }, + { + role: 'user', + content: `{"input": "Can you explain deep learning?", "scenarios": ["user is interested in AI topics", "default"], "instruction": "Determine if the user is interested in learning about AI"}` + }, + { + role: 'assistant', + content: `\`\`\`json\n{"output": "user is interested in AI topics"}\n\`\`\`` + } + ] + // Use to store messages with image file references as we do not want to store the base64 data into database + let runtimeImageMessagesWithFileRef: BaseMessageLike[] = [] + // Use to keep track of past messages with image file references + let pastImageMessagesWithFileRef: BaseMessageLike[] = [] + + input = `{"input": ${input}, "scenarios": ${JSON.stringify( + _conditionAgentScenarios.map((scenario) => scenario.scenario) + )}, "instruction": ${conditionAgentInstructions}}` + + // Handle memory management if enabled + if (enableMemory) { + await this.handleMemory({ + messages, + memoryType, + pastChatHistory, + runtimeChatHistory, + llmNodeInstance, + nodeData, + input, + abortController, + options, + modelConfig, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + }) + } else { + /* + * If this is the first node: + * - Add images to messages if exist + */ + if (!runtimeChatHistory.length && options.uploads) { + const imageContents = await getUniqueImageMessages(options, messages, modelConfig) + if (imageContents) { + const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents + messages.push(imageMessageWithBase64) + runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef) + } + } + messages.push({ + role: 'user', + content: input + }) + } + + // Initialize response and determine if streaming is possible + let response: AIMessageChunk = new AIMessageChunk('') + + // Start analytics + if (analyticHandlers && options.parentTraceIds) { + const llmLabel = options?.componentNodes?.[model]?.label || model + llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds) + } + + // Track execution time + const startTime = Date.now() + + response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal }) + + // Calculate execution time + const endTime = Date.now() + const timeDelta = endTime - startTime + + // End analytics tracking + if (analyticHandlers && llmIds) { + await analyticHandlers.onLLMEnd( + llmIds, + typeof response.content === 'string' ? response.content : JSON.stringify(response.content) + ) + } + + let calledOutputName = 'default' + try { + const parsedResponse = this.parseJsonMarkdown(response.content as string) + if (!parsedResponse.output) { + throw new Error('Missing "output" key in response') + } + calledOutputName = parsedResponse.output + } catch (error) { + console.warn(`Failed to parse LLM response: ${error}. Using default output.`) + } + + // Clean up empty inputs + for (const key in nodeData.inputs) { + if (nodeData.inputs[key] === '') { + delete nodeData.inputs[key] + } + } + + // Find the first exact match + const matchedScenarioIndex = _conditionAgentScenarios.findIndex( + (scenario) => calledOutputName.toLowerCase() === scenario.scenario.toLowerCase() + ) + + const conditions = _conditionAgentScenarios.map((scenario, index) => { + return { + output: scenario.scenario, + isFulfilled: index === matchedScenarioIndex + } + }) + + // Replace the actual messages array with one that includes the file references for images instead of base64 data + const messagesWithFileReferences = replaceBase64ImagesWithFileReferences( + messages, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + ) + + // Only add to runtime chat history if this is the first node + const inputMessages = [] + if (!runtimeChatHistory.length) { + if (runtimeImageMessagesWithFileRef.length) { + inputMessages.push(...runtimeImageMessagesWithFileRef) + } + if (input && typeof input === 'string') { + inputMessages.push({ role: 'user', content: question }) + } + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { messages: messagesWithFileReferences }, + output: { + conditions, + content: typeof response.content === 'string' ? response.content : JSON.stringify(response.content), + timeMetadata: { + start: startTime, + end: endTime, + delta: timeDelta + } + }, + state, + chatHistory: [...inputMessages] + } + + return returnOutput + } catch (error) { + if (options.analyticHandlers && llmIds) { + await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error)) + } + + if (error instanceof Error && error.message === 'Aborted') { + throw error + } + throw new Error(`Error in Condition Agent node: ${error instanceof Error ? error.message : String(error)}`) + } + } + + /** + * Handles memory management based on the specified memory type + */ + private async handleMemory({ + messages, + memoryType, + pastChatHistory, + runtimeChatHistory, + llmNodeInstance, + nodeData, + input, + abortController, + options, + modelConfig, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + }: { + messages: BaseMessageLike[] + memoryType: string + pastChatHistory: BaseMessageLike[] + runtimeChatHistory: BaseMessageLike[] + llmNodeInstance: BaseChatModel + nodeData: INodeData + input: string + abortController: AbortController + options: ICommonObject + modelConfig: ICommonObject + runtimeImageMessagesWithFileRef: BaseMessageLike[] + pastImageMessagesWithFileRef: BaseMessageLike[] + }): Promise { + const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options) + pastChatHistory = updatedPastMessages + pastImageMessagesWithFileRef.push(...transformedPastMessages) + + let pastMessages = [...pastChatHistory, ...runtimeChatHistory] + if (!runtimeChatHistory.length) { + /* + * If this is the first node: + * - Add images to messages if exist + */ + if (options.uploads) { + const imageContents = await getUniqueImageMessages(options, messages, modelConfig) + if (imageContents) { + const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents + pastMessages.push(imageMessageWithBase64) + runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef) + } + } + } + const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options) + pastMessages = updatedMessages + pastImageMessagesWithFileRef.push(...transformedMessages) + + if (pastMessages.length > 0) { + if (memoryType === 'windowSize') { + // Window memory: Keep the last N messages + const windowSize = nodeData.inputs?.conditionAgentMemoryWindowSize as number + const windowedMessages = pastMessages.slice(-windowSize * 2) + messages.push(...windowedMessages) + } else if (memoryType === 'conversationSummary') { + // Summary memory: Summarize all past messages + const summary = await llmNodeInstance.invoke( + [ + { + role: 'user', + content: DEFAULT_SUMMARIZER_TEMPLATE.replace( + '{conversation}', + pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + ) + } + ], + { signal: abortController?.signal } + ) + messages.push({ role: 'assistant', content: summary.content as string }) + } else if (memoryType === 'conversationSummaryBuffer') { + // Summary buffer: Summarize messages that exceed token limit + await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController) + } else { + // Default: Use all messages + messages.push(...pastMessages) + } + } + + messages.push({ + role: 'user', + content: input + }) + } + + /** + * Handles conversation summary buffer memory type + */ + private async handleSummaryBuffer( + messages: BaseMessageLike[], + pastMessages: BaseMessageLike[], + llmNodeInstance: BaseChatModel, + nodeData: INodeData, + abortController: AbortController + ): Promise { + const maxTokenLimit = (nodeData.inputs?.conditionAgentMemoryMaxTokenLimit as number) || 2000 + + // Convert past messages to a format suitable for token counting + const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + const tokenCount = await llmNodeInstance.getNumTokens(messagesString) + + if (tokenCount > maxTokenLimit) { + // Calculate how many messages to summarize (messages that exceed the token limit) + let currBufferLength = tokenCount + const messagesToSummarize = [] + const remainingMessages = [...pastMessages] + + // Remove messages from the beginning until we're under the token limit + while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) { + const poppedMessage = remainingMessages.shift() + if (poppedMessage) { + messagesToSummarize.push(poppedMessage) + // Recalculate token count for remaining messages + const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString) + } + } + + // Summarize the messages that were removed + const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + + const summary = await llmNodeInstance.invoke( + [ + { + role: 'user', + content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString) + } + ], + { signal: abortController?.signal } + ) + + // Add summary as a system message at the beginning, then add remaining messages + messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` }) + messages.push(...remainingMessages) + } else { + // If under token limit, use all messages + messages.push(...pastMessages) + } + } +} + +module.exports = { nodeClass: ConditionAgent_Agentflow } diff --git a/packages/components/nodes/agentflow/CustomFunction/CustomFunction.ts b/packages/components/nodes/agentflow/CustomFunction/CustomFunction.ts new file mode 100644 index 000000000..6922c651b --- /dev/null +++ b/packages/components/nodes/agentflow/CustomFunction/CustomFunction.ts @@ -0,0 +1,241 @@ +import { DataSource } from 'typeorm' +import { + ICommonObject, + IDatabaseEntity, + INode, + INodeData, + INodeOptionsValue, + INodeParams, + IServerSideEventStreamer +} from '../../../src/Interface' +import { availableDependencies, defaultAllowBuiltInDep, getVars, prepareSandboxVars } from '../../../src/utils' +import { NodeVM } from '@flowiseai/nodevm' +import { updateFlowState } from '../utils' + +interface ICustomFunctionInputVariables { + variableName: string + variableValue: string +} + +const exampleFunc = `/* +* You can use any libraries imported in Flowise +* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid +* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state +* You can get custom variables: $vars. +* Must return a string value at the end of function +*/ + +const fetch = require('node-fetch'); +const url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true'; +const options = { + method: 'GET', + headers: { + 'Content-Type': 'application/json' + } +}; +try { + const response = await fetch(url, options); + const text = await response.text(); + return text; +} catch (error) { + console.error(error); + return ''; +}` + +class CustomFunction_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + hideOutput: boolean + hint: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Custom Function' + this.name = 'customFunctionAgentflow' + this.version = 1.0 + this.type = 'CustomFunction' + this.category = 'Agent Flows' + this.description = 'Execute custom function' + this.baseClasses = [this.type] + this.color = '#E4B7FF' + this.inputs = [ + { + label: 'Input Variables', + name: 'customFunctionInputVariables', + description: 'Input variables can be used in the function with prefix $. For example: $foo', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Variable Name', + name: 'variableName', + type: 'string' + }, + { + label: 'Variable Value', + name: 'variableValue', + type: 'string', + acceptVariable: true + } + ] + }, + { + label: 'Javascript Function', + name: 'customFunctionJavascriptFunction', + type: 'code', + codeExample: exampleFunc, + description: 'The function to execute. Must return a string or an object that can be converted to a string.' + }, + { + label: 'Update Flow State', + name: 'customFunctionUpdateState', + description: 'Update runtime state during the execution of the workflow', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'asyncOptions', + loadMethod: 'listRuntimeStateKeys', + freeSolo: true + }, + { + label: 'Value', + name: 'value', + type: 'string', + acceptVariable: true, + acceptNodeOutputAsVariable: true + } + ] + } + ] + } + + //@ts-ignore + loadMethods = { + async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow') + const state = startAgentflowNode?.inputs?.startState as ICommonObject[] + return state.map((item) => ({ label: item.key, name: item.key })) + } + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const javascriptFunction = nodeData.inputs?.customFunctionJavascriptFunction as string + const functionInputVariables = nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[] + const _customFunctionUpdateState = nodeData.inputs?.customFunctionUpdateState + + const state = options.agentflowRuntime?.state as ICommonObject + const chatId = options.chatId as string + const isLastNode = options.isLastNode as boolean + const isStreamable = isLastNode && options.sseStreamer !== undefined + + const appDataSource = options.appDataSource as DataSource + const databaseEntities = options.databaseEntities as IDatabaseEntity + + // Update flow state if needed + let newState = { ...state } + if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) { + newState = updateFlowState(state, _customFunctionUpdateState) + } + + const variables = await getVars(appDataSource, databaseEntities, nodeData) + const flow = { + chatflowId: options.chatflowid, + sessionId: options.sessionId, + chatId: options.chatId, + input + } + + let sandbox: any = { + $input: input, + util: undefined, + Symbol: undefined, + child_process: undefined, + fs: undefined, + process: undefined + } + sandbox['$vars'] = prepareSandboxVars(variables) + sandbox['$flow'] = flow + + for (const item of functionInputVariables) { + const variableName = item.variableName + const variableValue = item.variableValue + sandbox[`$${variableName}`] = variableValue + } + + const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP + ? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(',')) + : defaultAllowBuiltInDep + const externalDeps = process.env.TOOL_FUNCTION_EXTERNAL_DEP ? process.env.TOOL_FUNCTION_EXTERNAL_DEP.split(',') : [] + const deps = availableDependencies.concat(externalDeps) + + const nodeVMOptions = { + console: 'inherit', + sandbox, + require: { + external: { modules: deps }, + builtin: builtinDeps + }, + eval: false, + wasm: false, + timeout: 10000 + } as any + + const vm = new NodeVM(nodeVMOptions) + try { + const response = await vm.run(`module.exports = async function() {${javascriptFunction}}()`, __dirname) + + let finalOutput = response + if (typeof response === 'object') { + finalOutput = JSON.stringify(response, null, 2) + } + + if (isStreamable) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer + sseStreamer.streamTokenEvent(chatId, finalOutput) + } + + // Process template variables in state + if (newState && Object.keys(newState).length > 0) { + for (const key in newState) { + if (newState[key].toString().includes('{{ output }}')) { + newState[key] = finalOutput + } + } + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { + inputVariables: functionInputVariables, + code: javascriptFunction + }, + output: { + content: finalOutput + }, + state: newState + } + + return returnOutput + } catch (e) { + throw new Error(e) + } + } +} + +module.exports = { nodeClass: CustomFunction_Agentflow } diff --git a/packages/components/nodes/agentflow/DirectReply/DirectReply.ts b/packages/components/nodes/agentflow/DirectReply/DirectReply.ts new file mode 100644 index 000000000..b30a8635d --- /dev/null +++ b/packages/components/nodes/agentflow/DirectReply/DirectReply.ts @@ -0,0 +1,67 @@ +import { ICommonObject, INode, INodeData, INodeParams, IServerSideEventStreamer } from '../../../src/Interface' + +class DirectReply_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + hideOutput: boolean + hint: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Direct Reply' + this.name = 'directReplyAgentflow' + this.version = 1.0 + this.type = 'DirectReply' + this.category = 'Agent Flows' + this.description = 'Directly reply to the user with a message' + this.baseClasses = [this.type] + this.color = '#4DDBBB' + this.hideOutput = true + this.inputs = [ + { + label: 'Message', + name: 'directReplyMessage', + type: 'string', + rows: 4, + acceptVariable: true + } + ] + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const directReplyMessage = nodeData.inputs?.directReplyMessage as string + + const state = options.agentflowRuntime?.state as ICommonObject + const chatId = options.chatId as string + const isLastNode = options.isLastNode as boolean + const isStreamable = isLastNode && options.sseStreamer !== undefined + + if (isStreamable) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer + sseStreamer.streamTokenEvent(chatId, directReplyMessage) + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: {}, + output: { + content: directReplyMessage + }, + state + } + + return returnOutput + } +} + +module.exports = { nodeClass: DirectReply_Agentflow } diff --git a/packages/components/nodes/agentflow/ExecuteFlow/ExecuteFlow.ts b/packages/components/nodes/agentflow/ExecuteFlow/ExecuteFlow.ts new file mode 100644 index 000000000..26e5df7b6 --- /dev/null +++ b/packages/components/nodes/agentflow/ExecuteFlow/ExecuteFlow.ts @@ -0,0 +1,297 @@ +import { + ICommonObject, + IDatabaseEntity, + INode, + INodeData, + INodeOptionsValue, + INodeParams, + IServerSideEventStreamer +} from '../../../src/Interface' +import axios, { AxiosRequestConfig } from 'axios' +import { getCredentialData, getCredentialParam } from '../../../src/utils' +import { DataSource } from 'typeorm' +import { BaseMessageLike } from '@langchain/core/messages' +import { updateFlowState } from '../utils' + +class ExecuteFlow_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Execute Flow' + this.name = 'executeFlowAgentflow' + this.version = 1.0 + this.type = 'ExecuteFlow' + this.category = 'Agent Flows' + this.description = 'Execute another flow' + this.baseClasses = [this.type] + this.color = '#a3b18a' + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['chatflowApi'], + optional: true + } + this.inputs = [ + { + label: 'Select Flow', + name: 'executeFlowSelectedFlow', + type: 'asyncOptions', + loadMethod: 'listFlows' + }, + { + label: 'Input', + name: 'executeFlowInput', + type: 'string', + rows: 4, + acceptVariable: true + }, + { + label: 'Override Config', + name: 'executeFlowOverrideConfig', + description: 'Override the config passed to the flow', + type: 'json', + optional: true + }, + { + label: 'Base URL', + name: 'executeFlowBaseURL', + type: 'string', + description: + 'Base URL to Flowise. By default, it is the URL of the incoming request. Useful when you need to execute flow through an alternative route.', + placeholder: 'http://localhost:3000', + optional: true + }, + { + label: 'Return Response As', + name: 'executeFlowReturnResponseAs', + type: 'options', + options: [ + { + label: 'User Message', + name: 'userMessage' + }, + { + label: 'Assistant Message', + name: 'assistantMessage' + } + ], + default: 'userMessage' + }, + { + label: 'Update Flow State', + name: 'executeFlowUpdateState', + description: 'Update runtime state during the execution of the workflow', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'asyncOptions', + loadMethod: 'listRuntimeStateKeys', + freeSolo: true + }, + { + label: 'Value', + name: 'value', + type: 'string', + acceptVariable: true, + acceptNodeOutputAsVariable: true + } + ] + } + ] + } + + //@ts-ignore + loadMethods = { + async listFlows(_: INodeData, options: ICommonObject): Promise { + const returnData: INodeOptionsValue[] = [] + + const appDataSource = options.appDataSource as DataSource + const databaseEntities = options.databaseEntities as IDatabaseEntity + if (appDataSource === undefined || !appDataSource) { + return returnData + } + + const chatflows = await appDataSource.getRepository(databaseEntities['ChatFlow']).find() + + for (let i = 0; i < chatflows.length; i += 1) { + let cfType = 'Chatflow' + if (chatflows[i].type === 'AGENTFLOW') { + cfType = 'Agentflow V2' + } else if (chatflows[i].type === 'MULTIAGENT') { + cfType = 'Agentflow V1' + } + const data = { + label: chatflows[i].name, + name: chatflows[i].id, + description: cfType + } as INodeOptionsValue + returnData.push(data) + } + + // order by label + return returnData.sort((a, b) => a.label.localeCompare(b.label)) + }, + async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow') + const state = startAgentflowNode?.inputs?.startState as ICommonObject[] + return state.map((item) => ({ label: item.key, name: item.key })) + } + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const baseURL = (nodeData.inputs?.executeFlowBaseURL as string) || (options.baseURL as string) + const selectedFlowId = nodeData.inputs?.executeFlowSelectedFlow as string + const flowInput = nodeData.inputs?.executeFlowInput as string + const returnResponseAs = nodeData.inputs?.executeFlowReturnResponseAs as string + const _executeFlowUpdateState = nodeData.inputs?.executeFlowUpdateState + const overrideConfig = + typeof nodeData.inputs?.executeFlowOverrideConfig === 'string' && + nodeData.inputs.executeFlowOverrideConfig.startsWith('{') && + nodeData.inputs.executeFlowOverrideConfig.endsWith('}') + ? JSON.parse(nodeData.inputs.executeFlowOverrideConfig) + : nodeData.inputs?.executeFlowOverrideConfig + + const state = options.agentflowRuntime?.state as ICommonObject + const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? [] + const isLastNode = options.isLastNode as boolean + const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer + + try { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const chatflowApiKey = getCredentialParam('chatflowApiKey', credentialData, nodeData) + + if (selectedFlowId === options.chatflowid) throw new Error('Cannot call the same agentflow!') + + let headers: Record = { + 'Content-Type': 'application/json' + } + if (chatflowApiKey) headers = { ...headers, Authorization: `Bearer ${chatflowApiKey}` } + + const finalUrl = `${baseURL}/api/v1/prediction/${selectedFlowId}` + const requestConfig: AxiosRequestConfig = { + method: 'POST', + url: finalUrl, + headers, + data: { + question: flowInput, + chatId: options.chatId, + overrideConfig + } + } + + const response = await axios(requestConfig) + + let resultText = '' + if (response.data.text) resultText = response.data.text + else if (response.data.json) resultText = '```json\n' + JSON.stringify(response.data.json, null, 2) + else resultText = JSON.stringify(response.data, null, 2) + + if (isLastNode && sseStreamer) { + sseStreamer.streamTokenEvent(options.chatId, resultText) + } + + // Update flow state if needed + let newState = { ...state } + if (_executeFlowUpdateState && Array.isArray(_executeFlowUpdateState) && _executeFlowUpdateState.length > 0) { + newState = updateFlowState(state, _executeFlowUpdateState) + } + + // Process template variables in state + if (newState && Object.keys(newState).length > 0) { + for (const key in newState) { + if (newState[key].toString().includes('{{ output }}')) { + newState[key] = resultText + } + } + } + + // Only add to runtime chat history if this is the first node + const inputMessages = [] + if (!runtimeChatHistory.length) { + inputMessages.push({ role: 'user', content: flowInput }) + } + + let returnRole = 'user' + if (returnResponseAs === 'assistantMessage') { + returnRole = 'assistant' + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { + messages: [ + { + role: 'user', + content: flowInput + } + ] + }, + output: { + content: resultText + }, + state: newState, + chatHistory: [ + ...inputMessages, + { + role: returnRole, + content: resultText, + name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id + } + ] + } + + return returnOutput + } catch (error) { + console.error('ExecuteFlow Error:', error) + + // Format error response + const errorResponse: any = { + id: nodeData.id, + name: this.name, + input: { + messages: [ + { + role: 'user', + content: flowInput + } + ] + }, + error: { + name: error.name || 'Error', + message: error.message || 'An error occurred during the execution of the flow' + }, + state + } + + // Add more error details if available + if (error.response) { + errorResponse.error.status = error.response.status + errorResponse.error.statusText = error.response.statusText + errorResponse.error.data = error.response.data + errorResponse.error.headers = error.response.headers + } + + throw new Error(error) + } + } +} + +module.exports = { nodeClass: ExecuteFlow_Agentflow } diff --git a/packages/components/nodes/agentflow/HTTP/HTTP.ts b/packages/components/nodes/agentflow/HTTP/HTTP.ts new file mode 100644 index 000000000..752d6dd0b --- /dev/null +++ b/packages/components/nodes/agentflow/HTTP/HTTP.ts @@ -0,0 +1,368 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import axios, { AxiosRequestConfig, Method, ResponseType } from 'axios' +import FormData from 'form-data' +import * as querystring from 'querystring' +import { getCredentialData, getCredentialParam } from '../../../src/utils' + +class HTTP_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'HTTP' + this.name = 'httpAgentflow' + this.version = 1.0 + this.type = 'HTTP' + this.category = 'Agent Flows' + this.description = 'Send a HTTP request' + this.baseClasses = [this.type] + this.color = '#FF7F7F' + this.credential = { + label: 'HTTP Credential', + name: 'credential', + type: 'credential', + credentialNames: ['httpBasicAuth', 'httpBearerToken', 'httpApiKey'], + optional: true + } + this.inputs = [ + { + label: 'Method', + name: 'method', + type: 'options', + options: [ + { + label: 'GET', + name: 'GET' + }, + { + label: 'POST', + name: 'POST' + }, + { + label: 'PUT', + name: 'PUT' + }, + { + label: 'DELETE', + name: 'DELETE' + }, + { + label: 'PATCH', + name: 'PATCH' + } + ], + default: 'GET' + }, + { + label: 'URL', + name: 'url', + type: 'string' + }, + { + label: 'Headers', + name: 'headers', + type: 'array', + array: [ + { + label: 'Key', + name: 'key', + type: 'string', + default: '' + }, + { + label: 'Value', + name: 'value', + type: 'string', + default: '' + } + ], + optional: true + }, + { + label: 'Query Params', + name: 'queryParams', + type: 'array', + array: [ + { + label: 'Key', + name: 'key', + type: 'string', + default: '' + }, + { + label: 'Value', + name: 'value', + type: 'string', + default: '' + } + ], + optional: true + }, + { + label: 'Body Type', + name: 'bodyType', + type: 'options', + options: [ + { + label: 'JSON', + name: 'json' + }, + { + label: 'Raw', + name: 'raw' + }, + { + label: 'Form Data', + name: 'formData' + }, + { + label: 'x-www-form-urlencoded', + name: 'xWwwFormUrlencoded' + } + ], + optional: true + }, + { + label: 'Body', + name: 'body', + type: 'string', + acceptVariable: true, + rows: 4, + show: { + bodyType: ['raw', 'json'] + }, + optional: true + }, + { + label: 'Body', + name: 'body', + type: 'array', + show: { + bodyType: ['xWwwFormUrlencoded', 'formData'] + }, + array: [ + { + label: 'Key', + name: 'key', + type: 'string', + default: '' + }, + { + label: 'Value', + name: 'value', + type: 'string', + default: '' + } + ], + optional: true + }, + { + label: 'Response Type', + name: 'responseType', + type: 'options', + options: [ + { + label: 'JSON', + name: 'json' + }, + { + label: 'Text', + name: 'text' + }, + { + label: 'Array Buffer', + name: 'arraybuffer' + }, + { + label: 'Raw (Base64)', + name: 'base64' + } + ], + optional: true + } + ] + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const method = nodeData.inputs?.method as 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH' + const url = nodeData.inputs?.url as string + const headers = nodeData.inputs?.headers as ICommonObject + const queryParams = nodeData.inputs?.queryParams as ICommonObject + const bodyType = nodeData.inputs?.bodyType as 'json' | 'raw' | 'formData' | 'xWwwFormUrlencoded' + const body = nodeData.inputs?.body as ICommonObject | string | ICommonObject[] + const responseType = nodeData.inputs?.responseType as 'json' | 'text' | 'arraybuffer' | 'base64' + + const state = options.agentflowRuntime?.state as ICommonObject + + try { + // Prepare headers + const requestHeaders: Record = {} + + // Add headers from inputs + if (headers && Array.isArray(headers)) { + for (const header of headers) { + if (header.key && header.value) { + requestHeaders[header.key] = header.value + } + } + } + + // Add credentials if provided + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + if (credentialData && Object.keys(credentialData).length !== 0) { + const basicAuthUsername = getCredentialParam('username', credentialData, nodeData) + const basicAuthPassword = getCredentialParam('password', credentialData, nodeData) + const bearerToken = getCredentialParam('token', credentialData, nodeData) + const apiKeyName = getCredentialParam('key', credentialData, nodeData) + const apiKeyValue = getCredentialParam('value', credentialData, nodeData) + + // Determine which type of auth to use based on available credentials + if (basicAuthUsername && basicAuthPassword) { + // Basic Auth + const auth = Buffer.from(`${basicAuthUsername}:${basicAuthPassword}`).toString('base64') + requestHeaders['Authorization'] = `Basic ${auth}` + } else if (bearerToken) { + // Bearer Token + requestHeaders['Authorization'] = `Bearer ${bearerToken}` + } else if (apiKeyName && apiKeyValue) { + // API Key in header + requestHeaders[apiKeyName] = apiKeyValue + } + } + + // Prepare query parameters + let queryString = '' + if (queryParams && Array.isArray(queryParams)) { + const params = new URLSearchParams() + for (const param of queryParams) { + if (param.key && param.value) { + params.append(param.key, param.value) + } + } + queryString = params.toString() + } + + // Build final URL with query parameters + const finalUrl = queryString ? `${url}${url.includes('?') ? '&' : '?'}${queryString}` : url + + // Prepare request config + const requestConfig: AxiosRequestConfig = { + method: method as Method, + url: finalUrl, + headers: requestHeaders, + responseType: (responseType || 'json') as ResponseType + } + + // Handle request body based on body type + if (method !== 'GET' && body) { + switch (bodyType) { + case 'json': + requestConfig.data = typeof body === 'string' ? JSON.parse(body) : body + requestHeaders['Content-Type'] = 'application/json' + break + case 'raw': + requestConfig.data = body + break + case 'formData': { + const formData = new FormData() + if (Array.isArray(body) && body.length > 0) { + for (const item of body) { + formData.append(item.key, item.value) + } + } + requestConfig.data = formData + break + } + case 'xWwwFormUrlencoded': + requestConfig.data = querystring.stringify(typeof body === 'string' ? JSON.parse(body) : body) + requestHeaders['Content-Type'] = 'application/x-www-form-urlencoded' + break + } + } + + // Make the HTTP request + const response = await axios(requestConfig) + + // Process response based on response type + let responseData + if (responseType === 'base64' && response.data) { + responseData = Buffer.from(response.data, 'binary').toString('base64') + } else { + responseData = response.data + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { + http: { + method, + url, + headers, + queryParams, + bodyType, + body, + responseType + } + }, + output: { + http: { + data: responseData, + status: response.status, + statusText: response.statusText, + headers: response.headers + } + }, + state + } + + return returnOutput + } catch (error) { + console.error('HTTP Request Error:', error) + + // Format error response + const errorResponse: any = { + id: nodeData.id, + name: this.name, + input: { + http: { + method, + url, + headers, + queryParams, + bodyType, + body, + responseType + } + }, + error: { + name: error.name || 'Error', + message: error.message || 'An error occurred during the HTTP request' + }, + state + } + + // Add more error details if available + if (error.response) { + errorResponse.error.status = error.response.status + errorResponse.error.statusText = error.response.statusText + errorResponse.error.data = error.response.data + errorResponse.error.headers = error.response.headers + } + + throw new Error(error) + } + } +} + +module.exports = { nodeClass: HTTP_Agentflow } diff --git a/packages/components/nodes/agentflow/HumanInput/HumanInput.ts b/packages/components/nodes/agentflow/HumanInput/HumanInput.ts new file mode 100644 index 000000000..6fa388e26 --- /dev/null +++ b/packages/components/nodes/agentflow/HumanInput/HumanInput.ts @@ -0,0 +1,271 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models' +import { + ICommonObject, + ICondition, + IHumanInput, + INode, + INodeData, + INodeOptionsValue, + INodeOutputsValue, + INodeParams, + IServerSideEventStreamer +} from '../../../src/Interface' +import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages' +import { DEFAULT_HUMAN_INPUT_DESCRIPTION, DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML } from '../prompt' + +class HumanInput_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Human Input' + this.name = 'humanInputAgentflow' + this.version = 1.0 + this.type = 'HumanInput' + this.category = 'Agent Flows' + this.description = 'Request human input, approval or rejection during execution' + this.color = '#6E6EFD' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Description Type', + name: 'humanInputDescriptionType', + type: 'options', + options: [ + { + label: 'Fixed', + name: 'fixed', + description: 'Specify a fixed description' + }, + { + label: 'Dynamic', + name: 'dynamic', + description: 'Use LLM to generate a description' + } + ] + }, + { + label: 'Description', + name: 'humanInputDescription', + type: 'string', + placeholder: 'Are you sure you want to proceed?', + acceptVariable: true, + rows: 4, + show: { + humanInputDescriptionType: 'fixed' + } + }, + { + label: 'Model', + name: 'humanInputModel', + type: 'asyncOptions', + loadMethod: 'listModels', + loadConfig: true, + show: { + humanInputDescriptionType: 'dynamic' + } + }, + { + label: 'Prompt', + name: 'humanInputModelPrompt', + type: 'string', + default: DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML, + acceptVariable: true, + generateInstruction: true, + rows: 4, + show: { + humanInputDescriptionType: 'dynamic' + } + }, + { + label: 'Enable Feedback', + name: 'humanInputEnableFeedback', + type: 'boolean', + default: true + } + ] + this.outputs = [ + { + label: 'Proceed', + name: 'proceed' + }, + { + label: 'Reject', + name: 'reject' + } + ] + } + + //@ts-ignore + loadMethods = { + async listModels(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Chat Models') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + } + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const _humanInput = nodeData.inputs?.humanInput + const humanInput: IHumanInput = typeof _humanInput === 'string' ? JSON.parse(_humanInput) : _humanInput + + const humanInputEnableFeedback = nodeData.inputs?.humanInputEnableFeedback as boolean + let humanInputDescriptionType = nodeData.inputs?.humanInputDescriptionType as string + const model = nodeData.inputs?.humanInputModel as string + const modelConfig = nodeData.inputs?.humanInputModelConfig as ICommonObject + const _humanInputModelPrompt = nodeData.inputs?.humanInputModelPrompt as string + const humanInputModelPrompt = _humanInputModelPrompt ? _humanInputModelPrompt : DEFAULT_HUMAN_INPUT_DESCRIPTION + + // Extract runtime state and history + const state = options.agentflowRuntime?.state as ICommonObject + const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? [] + const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? [] + + const chatId = options.chatId as string + const isStreamable = options.sseStreamer !== undefined + + if (humanInput) { + const outcomes: Partial[] & Partial[] = [ + { + type: 'proceed', + startNodeId: humanInput?.startNodeId, + feedback: humanInputEnableFeedback && humanInput?.feedback ? humanInput.feedback : undefined, + isFulfilled: false + }, + { + type: 'reject', + startNodeId: humanInput?.startNodeId, + feedback: humanInputEnableFeedback && humanInput?.feedback ? humanInput.feedback : undefined, + isFulfilled: false + } + ] + + // Only one outcome can be fulfilled at a time + switch (humanInput?.type) { + case 'proceed': + outcomes[0].isFulfilled = true + break + case 'reject': + outcomes[1].isFulfilled = true + break + } + + const messages = [ + ...pastChatHistory, + ...runtimeChatHistory, + { + role: 'user', + content: humanInput.feedback || humanInput.type + } + ] + const input = { ...humanInput, messages } + const output = { conditions: outcomes } + + const nodeOutput = { + id: nodeData.id, + name: this.name, + input, + output, + state + } + + if (humanInput.feedback) { + ;(nodeOutput as any).chatHistory = [{ role: 'user', content: humanInput.feedback }] + } + + return nodeOutput + } else { + let humanInputDescription = '' + + if (humanInputDescriptionType === 'fixed') { + humanInputDescription = (nodeData.inputs?.humanInputDescription as string) || 'Do you want to proceed?' + const messages = [...pastChatHistory, ...runtimeChatHistory] + // Find the last message in the messages array + const lastMessage = (messages[messages.length - 1] as any).content || '' + humanInputDescription = `${lastMessage}\n\n${humanInputDescription}` + if (isStreamable) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer + sseStreamer.streamTokenEvent(chatId, humanInputDescription) + } + } else { + if (model && modelConfig) { + const nodeInstanceFilePath = options.componentNodes[model].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newNodeInstance = new nodeModule.nodeClass() + const newNodeData = { + ...nodeData, + credential: modelConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...modelConfig + } + } + const llmNodeInstance = (await newNodeInstance.init(newNodeData, '', options)) as BaseChatModel + const messages = [ + ...pastChatHistory, + ...runtimeChatHistory, + { + role: 'user', + content: humanInputModelPrompt || DEFAULT_HUMAN_INPUT_DESCRIPTION + } + ] + + let response: AIMessageChunk = new AIMessageChunk('') + if (isStreamable) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer + for await (const chunk of await llmNodeInstance.stream(messages)) { + sseStreamer.streamTokenEvent(chatId, chunk.content.toString()) + response = response.concat(chunk) + } + humanInputDescription = response.content as string + } else { + const response = await llmNodeInstance.invoke(messages) + humanInputDescription = response.content as string + } + } + } + + const input = { messages: [...pastChatHistory, ...runtimeChatHistory], humanInputEnableFeedback } + const output = { content: humanInputDescription } + const nodeOutput = { + id: nodeData.id, + name: this.name, + input, + output, + state, + chatHistory: [{ role: 'assistant', content: humanInputDescription }] + } + + return nodeOutput + } + } +} + +module.exports = { nodeClass: HumanInput_Agentflow } diff --git a/packages/components/nodes/agentflow/Interface.Agentflow.ts b/packages/components/nodes/agentflow/Interface.Agentflow.ts new file mode 100644 index 000000000..33984784a --- /dev/null +++ b/packages/components/nodes/agentflow/Interface.Agentflow.ts @@ -0,0 +1,17 @@ +export interface ILLMMessage { + role: 'system' | 'assistant' | 'user' | 'tool' | 'developer' + content: string +} + +export interface IStructuredOutput { + key: string + type: 'string' | 'stringArray' | 'number' | 'boolean' | 'enum' | 'jsonArray' + enumValues?: string + description?: string + jsonSchema?: string +} + +export interface IFlowState { + key: string + value: string +} diff --git a/packages/components/nodes/agentflow/Iteration/Iteration.ts b/packages/components/nodes/agentflow/Iteration/Iteration.ts new file mode 100644 index 000000000..048035fb2 --- /dev/null +++ b/packages/components/nodes/agentflow/Iteration/Iteration.ts @@ -0,0 +1,69 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' + +class Iteration_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Iteration' + this.name = 'iterationAgentflow' + this.version = 1.0 + this.type = 'Iteration' + this.category = 'Agent Flows' + this.description = 'Execute the nodes within the iteration block through N iterations' + this.baseClasses = [this.type] + this.color = '#9C89B8' + this.inputs = [ + { + label: 'Array Input', + name: 'iterationInput', + type: 'string', + description: 'The input array to iterate over', + acceptVariable: true, + rows: 4 + } + ] + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const iterationInput = nodeData.inputs?.iterationInput + + // Helper function to clean JSON strings with redundant backslashes + const cleanJsonString = (str: string): string => { + return str.replace(/\\(["'[\]{}])/g, '$1') + } + + const iterationInputArray = + typeof iterationInput === 'string' && iterationInput !== '' ? JSON.parse(cleanJsonString(iterationInput)) : iterationInput + + if (!iterationInputArray || !Array.isArray(iterationInputArray)) { + throw new Error('Invalid input array') + } + + const state = options.agentflowRuntime?.state as ICommonObject + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { + iterationInput: iterationInputArray + }, + output: {}, + state + } + + return returnOutput + } +} + +module.exports = { nodeClass: Iteration_Agentflow } diff --git a/packages/components/nodes/agentflow/LLM/LLM.ts b/packages/components/nodes/agentflow/LLM/LLM.ts new file mode 100644 index 000000000..18f8d187d --- /dev/null +++ b/packages/components/nodes/agentflow/LLM/LLM.ts @@ -0,0 +1,985 @@ +import { BaseChatModel } from '@langchain/core/language_models/chat_models' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface' +import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages' +import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt' +import { z } from 'zod' +import { AnalyticHandler } from '../../../src/handler' +import { ILLMMessage, IStructuredOutput } from '../Interface.Agentflow' +import { + getPastChatHistoryImageMessages, + getUniqueImageMessages, + processMessagesWithImages, + replaceBase64ImagesWithFileReferences, + updateFlowState +} from '../utils' +import { get } from 'lodash' + +class LLM_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'LLM' + this.name = 'llmAgentflow' + this.version = 1.0 + this.type = 'LLM' + this.category = 'Agent Flows' + this.description = 'Large language models to analyze user-provided inputs and generate responses' + this.color = '#64B5F6' + this.baseClasses = [this.type] + this.inputs = [ + { + label: 'Model', + name: 'llmModel', + type: 'asyncOptions', + loadMethod: 'listModels', + loadConfig: true + }, + { + label: 'Messages', + name: 'llmMessages', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Role', + name: 'role', + type: 'options', + options: [ + { + label: 'System', + name: 'system' + }, + { + label: 'Assistant', + name: 'assistant' + }, + { + label: 'Developer', + name: 'developer' + }, + { + label: 'User', + name: 'user' + } + ] + }, + { + label: 'Content', + name: 'content', + type: 'string', + acceptVariable: true, + generateInstruction: true, + rows: 4 + } + ] + }, + { + label: 'Enable Memory', + name: 'llmEnableMemory', + type: 'boolean', + description: 'Enable memory for the conversation thread', + default: true, + optional: true + }, + { + label: 'Memory Type', + name: 'llmMemoryType', + type: 'options', + options: [ + { + label: 'All Messages', + name: 'allMessages', + description: 'Retrieve all messages from the conversation' + }, + { + label: 'Window Size', + name: 'windowSize', + description: 'Uses a fixed window size to surface the last N messages' + }, + { + label: 'Conversation Summary', + name: 'conversationSummary', + description: 'Summarizes the whole conversation' + }, + { + label: 'Conversation Summary Buffer', + name: 'conversationSummaryBuffer', + description: 'Summarize conversations once token limit is reached. Default to 2000' + } + ], + optional: true, + default: 'allMessages', + show: { + llmEnableMemory: true + } + }, + { + label: 'Window Size', + name: 'llmMemoryWindowSize', + type: 'number', + default: '20', + description: 'Uses a fixed window size to surface the last N messages', + show: { + llmMemoryType: 'windowSize' + } + }, + { + label: 'Max Token Limit', + name: 'llmMemoryMaxTokenLimit', + type: 'number', + default: '2000', + description: 'Summarize conversations once token limit is reached. Default to 2000', + show: { + llmMemoryType: 'conversationSummaryBuffer' + } + }, + { + label: 'Input Message', + name: 'llmUserMessage', + type: 'string', + description: 'Add an input message as user message at the end of the conversation', + rows: 4, + optional: true, + acceptVariable: true, + show: { + llmEnableMemory: true + } + }, + { + label: 'Return Response As', + name: 'llmReturnResponseAs', + type: 'options', + options: [ + { + label: 'User Message', + name: 'userMessage' + }, + { + label: 'Assistant Message', + name: 'assistantMessage' + } + ], + default: 'userMessage' + }, + { + label: 'JSON Structured Output', + name: 'llmStructuredOutput', + description: 'Instruct the LLM to give output in a JSON structured schema', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'string' + }, + { + label: 'Type', + name: 'type', + type: 'options', + options: [ + { + label: 'String', + name: 'string' + }, + { + label: 'String Array', + name: 'stringArray' + }, + { + label: 'Number', + name: 'number' + }, + { + label: 'Boolean', + name: 'boolean' + }, + { + label: 'Enum', + name: 'enum' + }, + { + label: 'JSON Array', + name: 'jsonArray' + } + ] + }, + { + label: 'Enum Values', + name: 'enumValues', + type: 'string', + placeholder: 'value1, value2, value3', + description: 'Enum values. Separated by comma', + optional: true, + show: { + 'llmStructuredOutput[$index].type': 'enum' + } + }, + { + label: 'JSON Schema', + name: 'jsonSchema', + type: 'code', + placeholder: `{ + "answer": { + "type": "string", + "description": "Value of the answer" + }, + "reason": { + "type": "string", + "description": "Reason for the answer" + }, + "optional": { + "type": "boolean" + }, + "count": { + "type": "number" + }, + "children": { + "type": "array", + "items": { + "type": "object", + "properties": { + "value": { + "type": "string", + "description": "Value of the children's answer" + } + } + } + } +}`, + description: 'JSON schema for the structured output', + optional: true, + show: { + 'llmStructuredOutput[$index].type': 'jsonArray' + } + }, + { + label: 'Description', + name: 'description', + type: 'string', + placeholder: 'Description of the key' + } + ] + }, + { + label: 'Update Flow State', + name: 'llmUpdateState', + description: 'Update runtime state during the execution of the workflow', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'asyncOptions', + loadMethod: 'listRuntimeStateKeys', + freeSolo: true + }, + { + label: 'Value', + name: 'value', + type: 'string', + acceptVariable: true, + acceptNodeOutputAsVariable: true + } + ] + } + ] + } + + //@ts-ignore + loadMethods = { + async listModels(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Chat Models') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + }, + async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow') + const state = startAgentflowNode?.inputs?.startState as ICommonObject[] + return state.map((item) => ({ label: item.key, name: item.key })) + } + } + + async run(nodeData: INodeData, input: string | Record, options: ICommonObject): Promise { + let llmIds: ICommonObject | undefined + let analyticHandlers = options.analyticHandlers as AnalyticHandler + + try { + const abortController = options.abortController as AbortController + + // Extract input parameters + const model = nodeData.inputs?.llmModel as string + const modelConfig = nodeData.inputs?.llmModelConfig as ICommonObject + if (!model) { + throw new Error('Model is required') + } + + // Extract memory and configuration options + const enableMemory = nodeData.inputs?.llmEnableMemory as boolean + const memoryType = nodeData.inputs?.llmMemoryType as string + const userMessage = nodeData.inputs?.llmUserMessage as string + const _llmUpdateState = nodeData.inputs?.llmUpdateState + const _llmStructuredOutput = nodeData.inputs?.llmStructuredOutput + const llmMessages = (nodeData.inputs?.llmMessages as unknown as ILLMMessage[]) ?? [] + + // Extract runtime state and history + const state = options.agentflowRuntime?.state as ICommonObject + const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? [] + const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? [] + const chatId = options.chatId as string + + // Initialize the LLM model instance + const nodeInstanceFilePath = options.componentNodes[model].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newLLMNodeInstance = new nodeModule.nodeClass() + const newNodeData = { + ...nodeData, + credential: modelConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...modelConfig + } + } + let llmNodeInstance = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel + + // Prepare messages array + const messages: BaseMessageLike[] = [] + // Use to store messages with image file references as we do not want to store the base64 data into database + let runtimeImageMessagesWithFileRef: BaseMessageLike[] = [] + // Use to keep track of past messages with image file references + let pastImageMessagesWithFileRef: BaseMessageLike[] = [] + + for (const msg of llmMessages) { + const role = msg.role + const content = msg.content + if (role && content) { + messages.push({ role, content }) + } + } + + // Handle memory management if enabled + if (enableMemory) { + await this.handleMemory({ + messages, + memoryType, + pastChatHistory, + runtimeChatHistory, + llmNodeInstance, + nodeData, + userMessage, + input, + abortController, + options, + modelConfig, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + }) + } else if (!runtimeChatHistory.length) { + /* + * If this is the first node: + * - Add images to messages if exist + * - Add user message + */ + if (options.uploads) { + const imageContents = await getUniqueImageMessages(options, messages, modelConfig) + if (imageContents) { + const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents + messages.push(imageMessageWithBase64) + runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef) + } + } + + if (input && typeof input === 'string') { + messages.push({ + role: 'user', + content: input + }) + } + } + delete nodeData.inputs?.llmMessages + + // Configure structured output if specified + const isStructuredOutput = _llmStructuredOutput && Array.isArray(_llmStructuredOutput) && _llmStructuredOutput.length > 0 + if (isStructuredOutput) { + llmNodeInstance = this.configureStructuredOutput(llmNodeInstance, _llmStructuredOutput) + } + + // Initialize response and determine if streaming is possible + let response: AIMessageChunk = new AIMessageChunk('') + const isLastNode = options.isLastNode as boolean + const isStreamable = isLastNode && options.sseStreamer !== undefined && modelConfig?.streaming !== false && !isStructuredOutput + + // Start analytics + if (analyticHandlers && options.parentTraceIds) { + const llmLabel = options?.componentNodes?.[model]?.label || model + llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds) + } + + // Track execution time + const startTime = Date.now() + + const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer + + if (isStreamable) { + response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController) + } else { + response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal }) + + // Stream whole response back to UI if this is the last node + if (isLastNode && options.sseStreamer) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer + let responseContent = JSON.stringify(response, null, 2) + if (typeof response.content === 'string') { + responseContent = response.content + } + sseStreamer.streamTokenEvent(chatId, responseContent) + } + } + + // Calculate execution time + const endTime = Date.now() + const timeDelta = endTime - startTime + + // Update flow state if needed + let newState = { ...state } + if (_llmUpdateState && Array.isArray(_llmUpdateState) && _llmUpdateState.length > 0) { + newState = updateFlowState(state, _llmUpdateState) + } + + // Clean up empty inputs + for (const key in nodeData.inputs) { + if (nodeData.inputs[key] === '') { + delete nodeData.inputs[key] + } + } + + // Prepare final response and output object + const finalResponse = (response.content as string) ?? JSON.stringify(response, null, 2) + const output = this.prepareOutputObject(response, finalResponse, startTime, endTime, timeDelta) + + // End analytics tracking + if (analyticHandlers && llmIds) { + await analyticHandlers.onLLMEnd(llmIds, finalResponse) + } + + // Send additional streaming events if needed + if (isStreamable) { + this.sendStreamingEvents(options, chatId, response) + } + + // Process template variables in state + if (newState && Object.keys(newState).length > 0) { + for (const key in newState) { + const stateValue = newState[key].toString() + if (stateValue.includes('{{ output')) { + // Handle simple output replacement + if (stateValue === '{{ output }}') { + newState[key] = finalResponse + continue + } + + // Handle JSON path expressions like {{ output.item1 }} + // eslint-disable-next-line + const match = stateValue.match(/{{[\s]*output\.([\w\.]+)[\s]*}}/) + if (match) { + try { + // Parse the response if it's JSON + const jsonResponse = typeof finalResponse === 'string' ? JSON.parse(finalResponse) : finalResponse + // Get the value using lodash get + const path = match[1] + const value = get(jsonResponse, path) + newState[key] = value ?? stateValue // Fall back to original if path not found + } catch (e) { + // If JSON parsing fails, keep original template + console.warn(`Failed to parse JSON or find path in output: ${e}`) + newState[key] = stateValue + } + } + } + } + } + + // Replace the actual messages array with one that includes the file references for images instead of base64 data + const messagesWithFileReferences = replaceBase64ImagesWithFileReferences( + messages, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + ) + + // Only add to runtime chat history if this is the first node + const inputMessages = [] + if (!runtimeChatHistory.length) { + if (runtimeImageMessagesWithFileRef.length) { + inputMessages.push(...runtimeImageMessagesWithFileRef) + } + if (input && typeof input === 'string') { + inputMessages.push({ role: 'user', content: input }) + } + } + + const returnResponseAs = nodeData.inputs?.llmReturnResponseAs as string + let returnRole = 'user' + if (returnResponseAs === 'assistantMessage') { + returnRole = 'assistant' + } + + // Prepare and return the final output + return { + id: nodeData.id, + name: this.name, + input: { + messages: messagesWithFileReferences, + ...nodeData.inputs + }, + output, + state: newState, + chatHistory: [ + ...inputMessages, + + // LLM response + { + role: returnRole, + content: finalResponse, + name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id + } + ] + } + } catch (error) { + if (options.analyticHandlers && llmIds) { + await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error)) + } + + if (error instanceof Error && error.message === 'Aborted') { + throw error + } + throw new Error(`Error in LLM node: ${error instanceof Error ? error.message : String(error)}`) + } + } + + /** + * Handles memory management based on the specified memory type + */ + private async handleMemory({ + messages, + memoryType, + pastChatHistory, + runtimeChatHistory, + llmNodeInstance, + nodeData, + userMessage, + input, + abortController, + options, + modelConfig, + runtimeImageMessagesWithFileRef, + pastImageMessagesWithFileRef + }: { + messages: BaseMessageLike[] + memoryType: string + pastChatHistory: BaseMessageLike[] + runtimeChatHistory: BaseMessageLike[] + llmNodeInstance: BaseChatModel + nodeData: INodeData + userMessage: string + input: string | Record + abortController: AbortController + options: ICommonObject + modelConfig: ICommonObject + runtimeImageMessagesWithFileRef: BaseMessageLike[] + pastImageMessagesWithFileRef: BaseMessageLike[] + }): Promise { + const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options) + pastChatHistory = updatedPastMessages + pastImageMessagesWithFileRef.push(...transformedPastMessages) + + let pastMessages = [...pastChatHistory, ...runtimeChatHistory] + if (!runtimeChatHistory.length && input && typeof input === 'string') { + /* + * If this is the first node: + * - Add images to messages if exist + * - Add user message + */ + if (options.uploads) { + const imageContents = await getUniqueImageMessages(options, messages, modelConfig) + if (imageContents) { + const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents + pastMessages.push(imageMessageWithBase64) + runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef) + } + } + pastMessages.push({ + role: 'user', + content: input + }) + } + const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options) + pastMessages = updatedMessages + pastImageMessagesWithFileRef.push(...transformedMessages) + + if (pastMessages.length > 0) { + if (memoryType === 'windowSize') { + // Window memory: Keep the last N messages + const windowSize = nodeData.inputs?.llmMemoryWindowSize as number + const windowedMessages = pastMessages.slice(-windowSize * 2) + messages.push(...windowedMessages) + } else if (memoryType === 'conversationSummary') { + // Summary memory: Summarize all past messages + const summary = await llmNodeInstance.invoke( + [ + { + role: 'user', + content: DEFAULT_SUMMARIZER_TEMPLATE.replace( + '{conversation}', + pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + ) + } + ], + { signal: abortController?.signal } + ) + messages.push({ role: 'assistant', content: summary.content as string }) + } else if (memoryType === 'conversationSummaryBuffer') { + // Summary buffer: Summarize messages that exceed token limit + await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController) + } else { + // Default: Use all messages + messages.push(...pastMessages) + } + } + + // Add user message + if (userMessage) { + messages.push({ + role: 'user', + content: userMessage + }) + } + } + + /** + * Handles conversation summary buffer memory type + */ + private async handleSummaryBuffer( + messages: BaseMessageLike[], + pastMessages: BaseMessageLike[], + llmNodeInstance: BaseChatModel, + nodeData: INodeData, + abortController: AbortController + ): Promise { + const maxTokenLimit = (nodeData.inputs?.llmMemoryMaxTokenLimit as number) || 2000 + + // Convert past messages to a format suitable for token counting + const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + const tokenCount = await llmNodeInstance.getNumTokens(messagesString) + + if (tokenCount > maxTokenLimit) { + // Calculate how many messages to summarize (messages that exceed the token limit) + let currBufferLength = tokenCount + const messagesToSummarize = [] + const remainingMessages = [...pastMessages] + + // Remove messages from the beginning until we're under the token limit + while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) { + const poppedMessage = remainingMessages.shift() + if (poppedMessage) { + messagesToSummarize.push(poppedMessage) + // Recalculate token count for remaining messages + const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString) + } + } + + // Summarize the messages that were removed + const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n') + + const summary = await llmNodeInstance.invoke( + [ + { + role: 'user', + content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString) + } + ], + { signal: abortController?.signal } + ) + + // Add summary as a system message at the beginning, then add remaining messages + messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` }) + messages.push(...remainingMessages) + } else { + // If under token limit, use all messages + messages.push(...pastMessages) + } + } + + /** + * Configures structured output for the LLM + */ + private configureStructuredOutput(llmNodeInstance: BaseChatModel, llmStructuredOutput: IStructuredOutput[]): BaseChatModel { + try { + const zodObj: ICommonObject = {} + for (const sch of llmStructuredOutput) { + if (sch.type === 'string') { + zodObj[sch.key] = z.string().describe(sch.description || '') + } else if (sch.type === 'stringArray') { + zodObj[sch.key] = z.array(z.string()).describe(sch.description || '') + } else if (sch.type === 'number') { + zodObj[sch.key] = z.number().describe(sch.description || '') + } else if (sch.type === 'boolean') { + zodObj[sch.key] = z.boolean().describe(sch.description || '') + } else if (sch.type === 'enum') { + const enumValues = sch.enumValues?.split(',').map((item: string) => item.trim()) || [] + zodObj[sch.key] = z + .enum(enumValues.length ? (enumValues as [string, ...string[]]) : ['default']) + .describe(sch.description || '') + } else if (sch.type === 'jsonArray') { + const jsonSchema = sch.jsonSchema + if (jsonSchema) { + try { + // Parse the JSON schema + const schemaObj = JSON.parse(jsonSchema) + + // Create a Zod schema from the JSON schema + const itemSchema = this.createZodSchemaFromJSON(schemaObj) + + // Create an array schema of the item schema + zodObj[sch.key] = z.array(itemSchema).describe(sch.description || '') + } catch (err) { + console.error(`Error parsing JSON schema for ${sch.key}:`, err) + // Fallback to generic array of records + zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '') + } + } else { + // If no schema provided, use generic array of records + zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '') + } + } + } + const structuredOutput = z.object(zodObj) + + // @ts-ignore + return llmNodeInstance.withStructuredOutput(structuredOutput) + } catch (exception) { + console.error(exception) + return llmNodeInstance + } + } + + /** + * Handles streaming response from the LLM + */ + private async handleStreamingResponse( + sseStreamer: IServerSideEventStreamer | undefined, + llmNodeInstance: BaseChatModel, + messages: BaseMessageLike[], + chatId: string, + abortController: AbortController + ): Promise { + let response = new AIMessageChunk('') + + try { + for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) { + if (sseStreamer) { + let content = '' + if (Array.isArray(chunk.content) && chunk.content.length > 0) { + const contents = chunk.content as MessageContentText[] + content = contents.map((item) => item.text).join('') + } else { + content = chunk.content.toString() + } + sseStreamer.streamTokenEvent(chatId, content) + } + + response = response.concat(chunk) + } + } catch (error) { + console.error('Error during streaming:', error) + throw error + } + if (Array.isArray(response.content) && response.content.length > 0) { + const responseContents = response.content as MessageContentText[] + response.content = responseContents.map((item) => item.text).join('') + } + return response + } + + /** + * Prepares the output object with response and metadata + */ + private prepareOutputObject( + response: AIMessageChunk, + finalResponse: string, + startTime: number, + endTime: number, + timeDelta: number + ): any { + const output: any = { + content: finalResponse, + timeMetadata: { + start: startTime, + end: endTime, + delta: timeDelta + } + } + + if (response.tool_calls) { + output.calledTools = response.tool_calls + } + + if (response.usage_metadata) { + output.usageMetadata = response.usage_metadata + } + + return output + } + + /** + * Sends additional streaming events for tool calls and metadata + */ + private sendStreamingEvents(options: ICommonObject, chatId: string, response: AIMessageChunk): void { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer + + if (response.tool_calls) { + sseStreamer.streamCalledToolsEvent(chatId, response.tool_calls) + } + + if (response.usage_metadata) { + sseStreamer.streamUsageMetadataEvent(chatId, response.usage_metadata) + } + + sseStreamer.streamEndEvent(chatId) + } + + /** + * Creates a Zod schema from a JSON schema object + * @param jsonSchema The JSON schema object + * @returns A Zod schema + */ + private createZodSchemaFromJSON(jsonSchema: any): z.ZodTypeAny { + // If the schema is an object with properties, create an object schema + if (typeof jsonSchema === 'object' && jsonSchema !== null) { + const schemaObj: Record = {} + + // Process each property in the schema + for (const [key, value] of Object.entries(jsonSchema)) { + if (value === null) { + // Handle null values + schemaObj[key] = z.null() + } else if (typeof value === 'object' && !Array.isArray(value)) { + // Check if the property has a type definition + if ('type' in value) { + const type = value.type as string + const description = ('description' in value ? (value.description as string) : '') || '' + + // Create the appropriate Zod type based on the type property + if (type === 'string') { + schemaObj[key] = z.string().describe(description) + } else if (type === 'number') { + schemaObj[key] = z.number().describe(description) + } else if (type === 'boolean') { + schemaObj[key] = z.boolean().describe(description) + } else if (type === 'array') { + // If it's an array type, check if items is defined + if ('items' in value && value.items) { + const itemSchema = this.createZodSchemaFromJSON(value.items) + schemaObj[key] = z.array(itemSchema).describe(description) + } else { + // Default to array of any if items not specified + schemaObj[key] = z.array(z.any()).describe(description) + } + } else if (type === 'object') { + // If it's an object type, check if properties is defined + if ('properties' in value && value.properties) { + const nestedSchema = this.createZodSchemaFromJSON(value.properties) + schemaObj[key] = nestedSchema.describe(description) + } else { + // Default to record of any if properties not specified + schemaObj[key] = z.record(z.any()).describe(description) + } + } else { + // Default to any for unknown types + schemaObj[key] = z.any().describe(description) + } + + // Check if the property is optional + if ('optional' in value && value.optional === true) { + schemaObj[key] = schemaObj[key].optional() + } + } else if (Array.isArray(value)) { + // Array values without a type property + if (value.length > 0) { + // If the array has items, recursively create a schema for the first item + const itemSchema = this.createZodSchemaFromJSON(value[0]) + schemaObj[key] = z.array(itemSchema) + } else { + // Empty array, allow any array + schemaObj[key] = z.array(z.any()) + } + } else { + // It's a nested object without a type property, recursively create schema + schemaObj[key] = this.createZodSchemaFromJSON(value) + } + } else if (Array.isArray(value)) { + // Array values + if (value.length > 0) { + // If the array has items, recursively create a schema for the first item + const itemSchema = this.createZodSchemaFromJSON(value[0]) + schemaObj[key] = z.array(itemSchema) + } else { + // Empty array, allow any array + schemaObj[key] = z.array(z.any()) + } + } else { + // For primitive values (which shouldn't be in the schema directly) + // Use the corresponding Zod type + if (typeof value === 'string') { + schemaObj[key] = z.string() + } else if (typeof value === 'number') { + schemaObj[key] = z.number() + } else if (typeof value === 'boolean') { + schemaObj[key] = z.boolean() + } else { + schemaObj[key] = z.any() + } + } + } + + return z.object(schemaObj) + } + + // Fallback to any for unknown types + return z.any() + } +} + +module.exports = { nodeClass: LLM_Agentflow } diff --git a/packages/components/nodes/agentflow/Loop/Loop.ts b/packages/components/nodes/agentflow/Loop/Loop.ts new file mode 100644 index 000000000..bc9d7b08d --- /dev/null +++ b/packages/components/nodes/agentflow/Loop/Loop.ts @@ -0,0 +1,94 @@ +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' + +class Loop_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + hideOutput: boolean + hint: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Loop' + this.name = 'loopAgentflow' + this.version = 1.0 + this.type = 'Loop' + this.category = 'Agent Flows' + this.description = 'Loop back to a previous node' + this.baseClasses = [this.type] + this.color = '#FFA07A' + this.hint = 'Make sure to have memory enabled in the LLM/Agent node to retain the chat history' + this.hideOutput = true + this.inputs = [ + { + label: 'Loop Back To', + name: 'loopBackToNode', + type: 'asyncOptions', + loadMethod: 'listPreviousNodes', + freeSolo: true + }, + { + label: 'Max Loop Count', + name: 'maxLoopCount', + type: 'number', + default: 5 + } + ] + } + + //@ts-ignore + loadMethods = { + async listPreviousNodes(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + + const returnOptions: INodeOptionsValue[] = [] + for (const node of previousNodes) { + returnOptions.push({ + label: node.label, + name: `${node.id}-${node.label}`, + description: node.id + }) + } + return returnOptions + } + } + + async run(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const loopBackToNode = nodeData.inputs?.loopBackToNode as string + const _maxLoopCount = nodeData.inputs?.maxLoopCount as string + + const state = options.agentflowRuntime?.state as ICommonObject + + const loopBackToNodeId = loopBackToNode.split('-')[0] + const loopBackToNodeLabel = loopBackToNode.split('-')[1] + + const data = { + nodeID: loopBackToNodeId, + maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5 + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: data, + output: { + content: 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`, + nodeID: loopBackToNodeId, + maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5 + }, + state + } + + return returnOutput + } +} + +module.exports = { nodeClass: Loop_Agentflow } diff --git a/packages/components/nodes/agentflow/Retriever/Retriever.ts b/packages/components/nodes/agentflow/Retriever/Retriever.ts new file mode 100644 index 000000000..68420484e --- /dev/null +++ b/packages/components/nodes/agentflow/Retriever/Retriever.ts @@ -0,0 +1,227 @@ +import { + ICommonObject, + IDatabaseEntity, + INode, + INodeData, + INodeOptionsValue, + INodeParams, + IServerSideEventStreamer +} from '../../../src/Interface' +import { updateFlowState } from '../utils' +import { DataSource } from 'typeorm' +import { BaseRetriever } from '@langchain/core/retrievers' +import { Document } from '@langchain/core/documents' + +interface IKnowledgeBase { + documentStore: string +} + +class Retriever_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + hideOutput: boolean + hint: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Retriever' + this.name = 'retrieverAgentflow' + this.version = 1.0 + this.type = 'Retriever' + this.category = 'Agent Flows' + this.description = 'Retrieve information from vector database' + this.baseClasses = [this.type] + this.color = '#b8bedd' + this.inputs = [ + { + label: 'Knowledge (Document Stores)', + name: 'retrieverKnowledgeDocumentStores', + type: 'array', + description: 'Document stores to retrieve information from. Document stores must be upserted in advance.', + array: [ + { + label: 'Document Store', + name: 'documentStore', + type: 'asyncOptions', + loadMethod: 'listStores' + } + ] + }, + { + label: 'Retriever Query', + name: 'retrieverQuery', + type: 'string', + placeholder: 'Enter your query here', + rows: 4, + acceptVariable: true + }, + { + label: 'Output Format', + name: 'outputFormat', + type: 'options', + options: [ + { label: 'Text', name: 'text' }, + { label: 'Text with Metadata', name: 'textWithMetadata' } + ], + default: 'text' + }, + { + label: 'Update Flow State', + name: 'retrieverUpdateState', + description: 'Update runtime state during the execution of the workflow', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'asyncOptions', + loadMethod: 'listRuntimeStateKeys', + freeSolo: true + }, + { + label: 'Value', + name: 'value', + type: 'string', + acceptVariable: true, + acceptNodeOutputAsVariable: true + } + ] + } + ] + } + + //@ts-ignore + loadMethods = { + async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow') + const state = startAgentflowNode?.inputs?.startState as ICommonObject[] + return state.map((item) => ({ label: item.key, name: item.key })) + }, + async listStores(_: INodeData, options: ICommonObject): Promise { + const returnData: INodeOptionsValue[] = [] + + const appDataSource = options.appDataSource as DataSource + const databaseEntities = options.databaseEntities as IDatabaseEntity + + if (appDataSource === undefined || !appDataSource) { + return returnData + } + + const stores = await appDataSource.getRepository(databaseEntities['DocumentStore']).find() + for (const store of stores) { + if (store.status === 'UPSERTED') { + const obj = { + name: `${store.id}:${store.name}`, + label: store.name, + description: store.description + } + returnData.push(obj) + } + } + return returnData + } + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const retrieverQuery = nodeData.inputs?.retrieverQuery as string + const outputFormat = nodeData.inputs?.outputFormat as string + const _retrieverUpdateState = nodeData.inputs?.retrieverUpdateState + + const state = options.agentflowRuntime?.state as ICommonObject + const chatId = options.chatId as string + const isLastNode = options.isLastNode as boolean + const isStreamable = isLastNode && options.sseStreamer !== undefined + + const abortController = options.abortController as AbortController + + // Extract knowledge + let docs: Document[] = [] + const knowledgeBases = nodeData.inputs?.retrieverKnowledgeDocumentStores as IKnowledgeBase[] + if (knowledgeBases && knowledgeBases.length > 0) { + for (const knowledgeBase of knowledgeBases) { + const [storeId, _] = knowledgeBase.documentStore.split(':') + + const docStoreVectorInstanceFilePath = options.componentNodes['documentStoreVS'].filePath as string + const docStoreVectorModule = await import(docStoreVectorInstanceFilePath) + const newDocStoreVectorInstance = new docStoreVectorModule.nodeClass() + const docStoreVectorInstance = (await newDocStoreVectorInstance.init( + { + ...nodeData, + inputs: { + ...nodeData.inputs, + selectedStore: storeId + }, + outputs: { + output: 'retriever' + } + }, + '', + options + )) as BaseRetriever + + docs = await docStoreVectorInstance.invoke(retrieverQuery || input, { signal: abortController?.signal }) + } + } + + const docsText = docs.map((doc) => doc.pageContent).join('\n') + + // Update flow state if needed + let newState = { ...state } + if (_retrieverUpdateState && Array.isArray(_retrieverUpdateState) && _retrieverUpdateState.length > 0) { + newState = updateFlowState(state, _retrieverUpdateState) + } + + try { + let finalOutput = '' + if (outputFormat === 'text') { + finalOutput = docsText + } else if (outputFormat === 'textWithMetadata') { + finalOutput = JSON.stringify(docs, null, 2) + } + + if (isStreamable) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer + sseStreamer.streamTokenEvent(chatId, finalOutput) + } + + // Process template variables in state + if (newState && Object.keys(newState).length > 0) { + for (const key in newState) { + if (newState[key].toString().includes('{{ output }}')) { + newState[key] = finalOutput + } + } + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { + question: retrieverQuery || input + }, + output: { + content: finalOutput + }, + state: newState + } + + return returnOutput + } catch (e) { + throw new Error(e) + } + } +} + +module.exports = { nodeClass: Retriever_Agentflow } diff --git a/packages/components/nodes/agentflow/Start/Start.ts b/packages/components/nodes/agentflow/Start/Start.ts new file mode 100644 index 000000000..5f6bf8449 --- /dev/null +++ b/packages/components/nodes/agentflow/Start/Start.ts @@ -0,0 +1,217 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' + +class Start_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + hideInput: boolean + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Start' + this.name = 'startAgentflow' + this.version = 1.0 + this.type = 'Start' + this.category = 'Agent Flows' + this.description = 'Starting point of the agentflow' + this.baseClasses = [this.type] + this.color = '#7EE787' + this.hideInput = true + this.inputs = [ + { + label: 'Input Type', + name: 'startInputType', + type: 'options', + options: [ + { + label: 'Chat Input', + name: 'chatInput', + description: 'Start the conversation with chat input' + }, + { + label: 'Form Input', + name: 'formInput', + description: 'Start the workflow with form inputs' + } + ], + default: 'chatInput' + }, + { + label: 'Form Title', + name: 'formTitle', + type: 'string', + placeholder: 'Please Fill Out The Form', + show: { + startInputType: 'formInput' + } + }, + { + label: 'Form Description', + name: 'formDescription', + type: 'string', + placeholder: 'Complete all fields below to continue', + show: { + startInputType: 'formInput' + } + }, + { + label: 'Form Input Types', + name: 'formInputTypes', + description: 'Specify the type of form input', + type: 'array', + show: { + startInputType: 'formInput' + }, + array: [ + { + label: 'Type', + name: 'type', + type: 'options', + options: [ + { + label: 'String', + name: 'string' + }, + { + label: 'Number', + name: 'number' + }, + { + label: 'Boolean', + name: 'boolean' + }, + { + label: 'Options', + name: 'options' + } + ], + default: 'string' + }, + { + label: 'Label', + name: 'label', + type: 'string', + placeholder: 'Label for the input' + }, + { + label: 'Variable Name', + name: 'name', + type: 'string', + placeholder: 'Variable name for the input (must be camel case)', + description: 'Variable name must be camel case. For example: firstName, lastName, etc.' + }, + { + label: 'Add Options', + name: 'addOptions', + type: 'array', + show: { + 'formInputTypes[$index].type': 'options' + }, + array: [ + { + label: 'Option', + name: 'option', + type: 'string' + } + ] + } + ] + }, + { + label: 'Ephemeral Memory', + name: 'startEphemeralMemory', + type: 'boolean', + description: 'Start fresh for every execution without past chat history', + optional: true + }, + { + label: 'Flow State', + name: 'startState', + description: 'Runtime state during the execution of the workflow', + type: 'array', + optional: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'string', + placeholder: 'Foo' + }, + { + label: 'Value', + name: 'value', + type: 'string', + placeholder: 'Bar', + optional: true + } + ] + } + ] + } + + async run(nodeData: INodeData, input: string | Record, options: ICommonObject): Promise { + const _flowState = nodeData.inputs?.startState as string + const startInputType = nodeData.inputs?.startInputType as string + const startEphemeralMemory = nodeData.inputs?.startEphemeralMemory as boolean + + let flowStateArray = [] + if (_flowState) { + try { + flowStateArray = typeof _flowState === 'string' ? JSON.parse(_flowState) : _flowState + } catch (error) { + throw new Error('Invalid Flow State') + } + } + + let flowState: Record = {} + for (const state of flowStateArray) { + flowState[state.key] = state.value + } + + const inputData: ICommonObject = {} + const outputData: ICommonObject = {} + + if (startInputType === 'chatInput') { + inputData.question = input + outputData.question = input + } + + if (startInputType === 'formInput') { + inputData.form = { + title: nodeData.inputs?.formTitle, + description: nodeData.inputs?.formDescription, + inputs: nodeData.inputs?.formInputTypes + } + + let form = input + if (options.agentflowRuntime?.form && Object.keys(options.agentflowRuntime.form).length) { + form = options.agentflowRuntime.form + } + outputData.form = form + } + + if (startEphemeralMemory) { + outputData.ephemeralMemory = true + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: inputData, + output: outputData, + state: flowState + } + + return returnOutput + } +} + +module.exports = { nodeClass: Start_Agentflow } diff --git a/packages/components/nodes/agentflow/StickyNote/StickyNote.ts b/packages/components/nodes/agentflow/StickyNote/StickyNote.ts new file mode 100644 index 000000000..7bf0d7a24 --- /dev/null +++ b/packages/components/nodes/agentflow/StickyNote/StickyNote.ts @@ -0,0 +1,42 @@ +import { INode, INodeParams } from '../../../src/Interface' + +class StickyNote_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + tags: string[] + baseClasses: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Sticky Note' + this.name = 'stickyNoteAgentflow' + this.version = 1.0 + this.type = 'StickyNote' + this.color = '#fee440' + this.category = 'Agent Flows' + this.description = 'Add notes to the agent flow' + this.inputs = [ + { + label: '', + name: 'note', + type: 'string', + rows: 1, + placeholder: 'Type something here', + optional: true + } + ] + this.baseClasses = [this.type] + } + + async run(): Promise { + return undefined + } +} + +module.exports = { nodeClass: StickyNote_Agentflow } diff --git a/packages/components/nodes/agentflow/Tool/Tool.ts b/packages/components/nodes/agentflow/Tool/Tool.ts new file mode 100644 index 000000000..c3945ff3e --- /dev/null +++ b/packages/components/nodes/agentflow/Tool/Tool.ts @@ -0,0 +1,304 @@ +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface' +import { updateFlowState } from '../utils' +import { Tool } from '@langchain/core/tools' +import { ARTIFACTS_PREFIX } from '../../../src/agents' +import zodToJsonSchema from 'zod-to-json-schema' + +interface IToolInputArgs { + inputArgName: string + inputArgValue: string +} + +class Tool_Agentflow implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + color: string + hideOutput: boolean + hint: string + baseClasses: string[] + documentation?: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Tool' + this.name = 'toolAgentflow' + this.version = 1.0 + this.type = 'Tool' + this.category = 'Agent Flows' + this.description = 'Tools allow LLM to interact with external systems' + this.baseClasses = [this.type] + this.color = '#d4a373' + this.inputs = [ + { + label: 'Tool', + name: 'selectedTool', + type: 'asyncOptions', + loadMethod: 'listTools', + loadConfig: true + }, + { + label: 'Tool Input Arguments', + name: 'toolInputArgs', + type: 'array', + acceptVariable: true, + refresh: true, + array: [ + { + label: 'Input Argument Name', + name: 'inputArgName', + type: 'asyncOptions', + loadMethod: 'listToolInputArgs', + refresh: true + }, + { + label: 'Input Argument Value', + name: 'inputArgValue', + type: 'string', + acceptVariable: true + } + ], + show: { + selectedTool: '.+' + } + }, + { + label: 'Update Flow State', + name: 'toolUpdateState', + description: 'Update runtime state during the execution of the workflow', + type: 'array', + optional: true, + acceptVariable: true, + array: [ + { + label: 'Key', + name: 'key', + type: 'asyncOptions', + loadMethod: 'listRuntimeStateKeys', + freeSolo: true + }, + { + label: 'Value', + name: 'value', + type: 'string', + acceptVariable: true, + acceptNodeOutputAsVariable: true + } + ] + } + ] + } + + //@ts-ignore + loadMethods = { + async listTools(_: INodeData, options: ICommonObject): Promise { + const componentNodes = options.componentNodes as { + [key: string]: INode + } + + const removeTools = ['chainTool', 'retrieverTool', 'webBrowser'] + + const returnOptions: INodeOptionsValue[] = [] + for (const nodeName in componentNodes) { + const componentNode = componentNodes[nodeName] + if (componentNode.category === 'Tools' || componentNode.category === 'Tools (MCP)') { + if (componentNode.tags?.includes('LlamaIndex')) { + continue + } + if (removeTools.includes(nodeName)) { + continue + } + returnOptions.push({ + label: componentNode.label, + name: nodeName, + imageSrc: componentNode.icon + }) + } + } + return returnOptions + }, + async listToolInputArgs(nodeData: INodeData, options: ICommonObject): Promise { + const currentNode = options.currentNode as ICommonObject + const selectedTool = currentNode?.inputs?.selectedTool as string + const selectedToolConfig = currentNode?.inputs?.selectedToolConfig as ICommonObject + + const nodeInstanceFilePath = options.componentNodes[selectedTool].filePath as string + + const nodeModule = await import(nodeInstanceFilePath) + const newToolNodeInstance = new nodeModule.nodeClass() + + const newNodeData = { + ...nodeData, + credential: selectedToolConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...selectedToolConfig + } + } + + try { + const toolInstance = (await newToolNodeInstance.init(newNodeData, '', options)) as Tool + + let toolInputArgs: ICommonObject = {} + + if (Array.isArray(toolInstance)) { + // Combine schemas from all tools in the array + const allProperties = toolInstance.reduce((acc, tool) => { + if (tool?.schema) { + const schema: Record = zodToJsonSchema(tool.schema) + return { ...acc, ...(schema.properties || {}) } + } + return acc + }, {}) + toolInputArgs = { properties: allProperties } + } else { + // Handle single tool instance + toolInputArgs = toolInstance.schema ? zodToJsonSchema(toolInstance.schema) : {} + } + + if (toolInputArgs && Object.keys(toolInputArgs).length > 0) { + delete toolInputArgs.$schema + } + + return Object.keys(toolInputArgs.properties || {}).map((item) => ({ + label: item, + name: item, + description: toolInputArgs.properties[item].description + })) + } catch (e) { + return [] + } + }, + async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise { + const previousNodes = options.previousNodes as ICommonObject[] + const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow') + const state = startAgentflowNode?.inputs?.startState as ICommonObject[] + return state.map((item) => ({ label: item.key, name: item.key })) + } + } + + async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { + const selectedTool = nodeData.inputs?.selectedTool as string + const selectedToolConfig = nodeData.inputs?.selectedToolConfig as ICommonObject + + const toolInputArgs = nodeData.inputs?.toolInputArgs as IToolInputArgs[] + const _toolUpdateState = nodeData.inputs?.toolUpdateState + + const state = options.agentflowRuntime?.state as ICommonObject + const chatId = options.chatId as string + const isLastNode = options.isLastNode as boolean + const isStreamable = isLastNode && options.sseStreamer !== undefined + + const abortController = options.abortController as AbortController + + // Update flow state if needed + let newState = { ...state } + if (_toolUpdateState && Array.isArray(_toolUpdateState) && _toolUpdateState.length > 0) { + newState = updateFlowState(state, _toolUpdateState) + } + + if (!selectedTool) { + throw new Error('Tool not selected') + } + + const nodeInstanceFilePath = options.componentNodes[selectedTool].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newToolNodeInstance = new nodeModule.nodeClass() + const newNodeData = { + ...nodeData, + credential: selectedToolConfig['FLOWISE_CREDENTIAL_ID'], + inputs: { + ...nodeData.inputs, + ...selectedToolConfig + } + } + const toolInstance = (await newToolNodeInstance.init(newNodeData, '', options)) as Tool | Tool[] + + let toolCallArgs: Record = {} + for (const item of toolInputArgs) { + const variableName = item.inputArgName + const variableValue = item.inputArgValue + toolCallArgs[variableName] = variableValue + } + + const flowConfig = { + sessionId: options.sessionId, + chatId: options.chatId, + input: input, + state: options.agentflowRuntime?.state + } + + try { + let toolOutput: string + if (Array.isArray(toolInstance)) { + // Execute all tools and combine their outputs + const outputs = await Promise.all( + toolInstance.map((tool) => + //@ts-ignore + tool.call(toolCallArgs, { signal: abortController?.signal }, undefined, flowConfig) + ) + ) + toolOutput = outputs.join('\n') + } else { + //@ts-ignore + toolOutput = await toolInstance.call(toolCallArgs, { signal: abortController?.signal }, undefined, flowConfig) + } + + let parsedArtifacts + + // Extract artifacts if present + if (typeof toolOutput === 'string' && toolOutput.includes(ARTIFACTS_PREFIX)) { + const [output, artifact] = toolOutput.split(ARTIFACTS_PREFIX) + toolOutput = output + try { + parsedArtifacts = JSON.parse(artifact) + } catch (e) { + console.error('Error parsing artifacts from tool:', e) + } + } + + if (typeof toolOutput === 'object') { + toolOutput = JSON.stringify(toolOutput, null, 2) + } + + if (isStreamable) { + const sseStreamer: IServerSideEventStreamer = options.sseStreamer + sseStreamer.streamTokenEvent(chatId, toolOutput) + } + + // Process template variables in state + if (newState && Object.keys(newState).length > 0) { + for (const key in newState) { + if (newState[key].toString().includes('{{ output }}')) { + newState[key] = toolOutput + } + } + } + + const returnOutput = { + id: nodeData.id, + name: this.name, + input: { + toolInputArgs: toolInputArgs, + selectedTool: selectedTool + }, + output: { + content: toolOutput, + artifacts: parsedArtifacts + }, + state: newState + } + + return returnOutput + } catch (e) { + throw new Error(e) + } + } +} + +module.exports = { nodeClass: Tool_Agentflow } diff --git a/packages/components/nodes/agentflow/prompt.ts b/packages/components/nodes/agentflow/prompt.ts new file mode 100644 index 000000000..a5d9cd893 --- /dev/null +++ b/packages/components/nodes/agentflow/prompt.ts @@ -0,0 +1,75 @@ +export const DEFAULT_SUMMARIZER_TEMPLATE = `Progressively summarize the conversation provided and return a new summary. + +EXAMPLE: +Human: Why do you think artificial intelligence is a force for good? +AI: Because artificial intelligence will help humans reach their full potential. + +New summary: +The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential. +END OF EXAMPLE + +Conversation: +{conversation} + +New summary:` + +export const DEFAULT_HUMAN_INPUT_DESCRIPTION = `Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback. +- Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed. +- Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced. +- Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message + +## Output Format The output should be structured in three parts in text: + +- A summary of the conversation (1-3 sentences). +- The last assistant message (exactly as it appeared). +- Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed. +` + +export const DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML = `

Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.

+
    +
  • Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed.
  • +
  • Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced.
  • +
  • Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message
  • +
+

Output Format The output should be structured in three parts in text:

+
    +
  • A summary of the conversation (1-3 sentences).
  • +
  • The last assistant message (exactly as it appeared).
  • +
  • Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed.
  • +
+` + +export const CONDITION_AGENT_SYSTEM_PROMPT = `You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios. If none of the scenarios match the input, you should return "default." + +- **Input**: A string representing the user's query or message. +- **Scenarios**: A list of predefined scenarios that relate to the input. +- **Instruction**: Determine if the input fits any of the scenarios. + +## Steps + +1. **Read the input string** and the list of scenarios. +2. **Analyze the content of the input** to identify its main topic or intention. +3. **Compare the input with each scenario**: + - If a scenario matches the main topic of the input, select that scenario. + - If no scenarios match, prepare to output "\`\`\`json\n{"output": "default"}\`\`\`" +4. **Output the result**: If a match is found, return the corresponding scenario in JSON; otherwise, return "\`\`\`json\n{"output": "default"}\`\`\`" + +## Output Format + +Output should be a JSON object that either names the matching scenario or returns "\`\`\`json\n{"output": "default"}\`\`\`" if no scenarios match. No explanation is needed. + +## Examples + +1. **Input**: {"input": "Hello", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"} + **Output**: "\`\`\`json\n{"output": "default"}\`\`\`" + +2. **Input**: {"input": "What is AIGC?", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"} + **Output**: "\`\`\`json\n{"output": "user is asking about AI"}\`\`\`" + +3. **Input**: {"input": "Can you explain deep learning?", "scenarios": ["user is interested in AI topics", "default"], "instruction": "Determine if the user is interested in learning about AI"} + **Output**: "\`\`\`json\n{"output": "user is interested in AI topics"}\`\`\`" + +## Note +- Ensure that the input scenarios align well with potential user queries for accurate matching +- DO NOT include anything other than the JSON in your response. +` diff --git a/packages/components/nodes/agentflow/utils.ts b/packages/components/nodes/agentflow/utils.ts new file mode 100644 index 000000000..8891e74eb --- /dev/null +++ b/packages/components/nodes/agentflow/utils.ts @@ -0,0 +1,407 @@ +import { BaseMessage, MessageContentImageUrl } from '@langchain/core/messages' +import { getImageUploads } from '../../src/multiModalUtils' +import { getFileFromStorage } from '../../src/storageUtils' +import { ICommonObject, IFileUpload } from '../../src/Interface' +import { BaseMessageLike } from '@langchain/core/messages' +import { IFlowState } from './Interface.Agentflow' +import { mapMimeTypeToInputField } from '../../src/utils' + +export const addImagesToMessages = async ( + options: ICommonObject, + allowImageUploads: boolean, + imageResolution?: 'auto' | 'low' | 'high' +): Promise => { + const imageContent: MessageContentImageUrl[] = [] + + if (allowImageUploads && options?.uploads && options?.uploads.length > 0) { + const imageUploads = getImageUploads(options.uploads) + for (const upload of imageUploads) { + let bf = upload.data + if (upload.type == 'stored-file') { + const contents = await getFileFromStorage(upload.name, options.chatflowid, options.chatId) + // as the image is stored in the server, read the file and convert it to base64 + bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64') + + imageContent.push({ + type: 'image_url', + image_url: { + url: bf, + detail: imageResolution ?? 'low' + } + }) + } else if (upload.type == 'url' && bf) { + imageContent.push({ + type: 'image_url', + image_url: { + url: bf, + detail: imageResolution ?? 'low' + } + }) + } + } + } + + return imageContent +} + +/** + * Process message array to replace stored file references with base64 image data + * @param messages Array of messages that may contain image references + * @param options Common options object containing chatflowid and chatId + * @returns Object containing updated messages array and transformed original messages + */ +export const processMessagesWithImages = async ( + messages: BaseMessageLike[], + options: ICommonObject +): Promise<{ + updatedMessages: BaseMessageLike[] + transformedMessages: BaseMessageLike[] +}> => { + if (!messages || !options.chatflowid || !options.chatId) { + return { + updatedMessages: messages, + transformedMessages: [] + } + } + + // Create a deep copy of the messages to avoid mutating the original + const updatedMessages = JSON.parse(JSON.stringify(messages)) + // Track which messages were transformed + const transformedMessages: BaseMessageLike[] = [] + + // Scan through all messages looking for stored-file references + for (let i = 0; i < updatedMessages.length; i++) { + const message = updatedMessages[i] + + // Skip non-user messages or messages without content + if (message.role !== 'user' || !message.content) { + continue + } + + // Handle array content (typically containing file references) + if (Array.isArray(message.content)) { + const imageContents: MessageContentImageUrl[] = [] + let hasImageReferences = false + + // Process each content item + for (const item of message.content) { + // Look for stored-file type items + if (item.type === 'stored-file' && item.name && item.mime.startsWith('image/')) { + hasImageReferences = true + try { + // Get file contents from storage + const contents = await getFileFromStorage(item.name, options.chatflowid, options.chatId) + + // Create base64 data URL + const base64Data = 'data:' + item.mime + ';base64,' + contents.toString('base64') + + // Add to image content array + imageContents.push({ + type: 'image_url', + image_url: { + url: base64Data, + detail: item.imageResolution ?? 'low' + } + }) + } catch (error) { + console.error(`Failed to load image ${item.name}:`, error) + } + } + } + + // Replace the content with the image content array + if (imageContents.length > 0) { + // Store the original message before modifying + if (hasImageReferences) { + transformedMessages.push(JSON.parse(JSON.stringify(messages[i]))) + } + updatedMessages[i].content = imageContents + } + } + } + + return { + updatedMessages, + transformedMessages + } +} + +/** + * Replace base64 image data in messages with file references + * @param messages Array of messages that may contain base64 image data + * @param uniqueImageMessages Array of messages with file references for new images + * @param pastImageMessages Array of messages with file references for previous images + * @returns Updated messages array with file references instead of base64 data + */ +export const replaceBase64ImagesWithFileReferences = ( + messages: BaseMessageLike[], + uniqueImageMessages: BaseMessageLike[] = [], + pastImageMessages: BaseMessageLike[] = [] +): BaseMessageLike[] => { + // Create a deep copy to avoid mutating the original + const updatedMessages = JSON.parse(JSON.stringify(messages)) + + // Track positions in replacement arrays + let pastMessageIndex = 0 + let pastContentIndex = 0 + let uniqueMessageIndex = 0 + let uniqueContentIndex = 0 + + for (let i = 0; i < updatedMessages.length; i++) { + const message = updatedMessages[i] + if (message.content && Array.isArray(message.content)) { + for (let j = 0; j < message.content.length; j++) { + const item = message.content[j] + if (item.type === 'image_url') { + // Try past images first + let replacement = null + + if (pastMessageIndex < pastImageMessages.length) { + const pastMessage = pastImageMessages[pastMessageIndex] as BaseMessage | undefined + if (pastMessage && Array.isArray(pastMessage.content)) { + if (pastContentIndex < pastMessage.content.length) { + replacement = pastMessage.content[pastContentIndex] + pastContentIndex++ + + // Move to next message if we've used all content in current one + if (pastContentIndex >= pastMessage.content.length) { + pastMessageIndex++ + pastContentIndex = 0 + } + } else { + // Current message has no more content, move to next + pastMessageIndex++ + pastContentIndex = 0 + + // Try again with the next message + if (pastMessageIndex < pastImageMessages.length) { + const nextPastMessage = pastImageMessages[pastMessageIndex] as BaseMessage | undefined + if (nextPastMessage && Array.isArray(nextPastMessage.content) && nextPastMessage.content.length > 0) { + replacement = nextPastMessage.content[0] + pastContentIndex = 1 + } + } + } + } + } + + // Try unique images if no past image replacement found + if (!replacement && uniqueMessageIndex < uniqueImageMessages.length) { + const uniqueMessage = uniqueImageMessages[uniqueMessageIndex] as BaseMessage | undefined + if (uniqueMessage && Array.isArray(uniqueMessage.content)) { + if (uniqueContentIndex < uniqueMessage.content.length) { + replacement = uniqueMessage.content[uniqueContentIndex] + uniqueContentIndex++ + + // Move to next message if we've used all content in current one + if (uniqueContentIndex >= uniqueMessage.content.length) { + uniqueMessageIndex++ + uniqueContentIndex = 0 + } + } else { + // Current message has no more content, move to next + uniqueMessageIndex++ + uniqueContentIndex = 0 + + // Try again with the next message + if (uniqueMessageIndex < uniqueImageMessages.length) { + const nextUniqueMessage = uniqueImageMessages[uniqueMessageIndex] as BaseMessage | undefined + if ( + nextUniqueMessage && + Array.isArray(nextUniqueMessage.content) && + nextUniqueMessage.content.length > 0 + ) { + replacement = nextUniqueMessage.content[0] + uniqueContentIndex = 1 + } + } + } + } + } + + // Apply replacement if found + if (replacement) { + message.content[j] = { + ...replacement + } + } + } + } + } + } + + return updatedMessages +} + +/** + * Get unique image messages from uploads + * @param options Common options object containing uploads + * @param messages Array of messages to check for existing images + * @param modelConfig Model configuration object containing allowImageUploads and imageResolution + * @returns Object containing imageMessageWithFileRef and imageMessageWithBase64 + */ +export const getUniqueImageMessages = async ( + options: ICommonObject, + messages: BaseMessageLike[], + modelConfig?: ICommonObject +): Promise<{ imageMessageWithFileRef: BaseMessageLike; imageMessageWithBase64: BaseMessageLike } | undefined> => { + if (!options.uploads) return undefined + + // Get images from uploads + const images = await addImagesToMessages(options, modelConfig?.allowImageUploads, modelConfig?.imageResolution) + + // Filter out images that are already in previous messages + const uniqueImages = images.filter((image) => { + // Check if this image is already in any existing message + return !messages.some((msg: any) => { + // For multimodal content (arrays with image objects) + if (Array.isArray(msg.content)) { + return msg.content.some( + (item: any) => + // Compare by image URL/content for image objects + item.type === 'image_url' && image.type === 'image_url' && JSON.stringify(item) === JSON.stringify(image) + ) + } + // For direct comparison of simple content + return JSON.stringify(msg.content) === JSON.stringify(image) + }) + }) + + if (uniqueImages.length === 0) { + return undefined + } + + // Create messages with the original file references for storage/display + const imageMessageWithFileRef = { + role: 'user', + content: options.uploads.map((upload: IFileUpload) => ({ + type: upload.type, + name: upload.name, + mime: upload.mime, + imageResolution: modelConfig?.imageResolution + })) + } + + // Create messages with base64 data for the LLM + const imageMessageWithBase64 = { + role: 'user', + content: uniqueImages + } + + return { + imageMessageWithFileRef, + imageMessageWithBase64 + } +} + +/** + * Get past chat history image messages + * @param pastChatHistory Array of past chat history messages + * @param options Common options object + * @returns Object containing updatedPastMessages and transformedPastMessages + */ +export const getPastChatHistoryImageMessages = async ( + pastChatHistory: BaseMessageLike[], + options: ICommonObject +): Promise<{ updatedPastMessages: BaseMessageLike[]; transformedPastMessages: BaseMessageLike[] }> => { + const chatHistory = [] + const transformedPastMessages = [] + + for (let i = 0; i < pastChatHistory.length; i++) { + const message = pastChatHistory[i] as BaseMessage & { role: string } + const messageRole = message.role || 'user' + if (message.additional_kwargs && message.additional_kwargs.fileUploads) { + // example: [{"type":"stored-file","name":"0_DiXc4ZklSTo3M8J4.jpg","mime":"image/jpeg"}] + const fileUploads = message.additional_kwargs.fileUploads + try { + let messageWithFileUploads = '' + const uploads: IFileUpload[] = typeof fileUploads === 'string' ? JSON.parse(fileUploads) : fileUploads + const imageContents: MessageContentImageUrl[] = [] + for (const upload of uploads) { + if (upload.type === 'stored-file' && upload.mime.startsWith('image/')) { + const fileData = await getFileFromStorage(upload.name, options.chatflowid, options.chatId) + // as the image is stored in the server, read the file and convert it to base64 + const bf = 'data:' + upload.mime + ';base64,' + fileData.toString('base64') + + imageContents.push({ + type: 'image_url', + image_url: { + url: bf + } + }) + } else if (upload.type === 'url' && upload.mime.startsWith('image') && upload.data) { + imageContents.push({ + type: 'image_url', + image_url: { + url: upload.data + } + }) + } else if (upload.type === 'stored-file:full') { + const fileLoaderNodeModule = await import('../../nodes/documentloaders/File/File') + // @ts-ignore + const fileLoaderNodeInstance = new fileLoaderNodeModule.nodeClass() + const nodeOptions = { + retrieveAttachmentChatId: true, + chatflowid: options.chatflowid, + chatId: options.chatId + } + let fileInputFieldFromMimeType = 'txtFile' + fileInputFieldFromMimeType = mapMimeTypeToInputField(upload.mime) + const nodeData = { + inputs: { + [fileInputFieldFromMimeType]: `FILE-STORAGE::${JSON.stringify([upload.name])}` + } + } + const documents: string = await fileLoaderNodeInstance.init(nodeData, '', nodeOptions) + messageWithFileUploads += `${documents}\n\n` + } + } + const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content + if (imageContents.length > 0) { + chatHistory.push({ + role: messageRole, + content: imageContents + }) + transformedPastMessages.push({ + role: messageRole, + content: [...JSON.parse((pastChatHistory[i] as any).additional_kwargs.fileUploads)] + }) + } + chatHistory.push({ + role: messageRole, + content: messageContent + }) + } catch (e) { + // failed to parse fileUploads, continue with text only + chatHistory.push({ + role: messageRole, + content: message.content + }) + } + } else { + chatHistory.push({ + role: messageRole, + content: message.content + }) + } + } + return { + updatedPastMessages: chatHistory, + transformedPastMessages + } +} + +/** + * Updates the flow state with new values + */ +export const updateFlowState = (state: ICommonObject, llmUpdateState: IFlowState[]): ICommonObject => { + let newFlowState: Record = {} + for (const state of llmUpdateState) { + newFlowState[state.key] = state.value + } + + return { + ...state, + ...newFlowState + } +} diff --git a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts index f7c496c35..f8886983d 100644 --- a/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts +++ b/packages/components/nodes/agents/OpenAIAssistant/OpenAIAssistant.ts @@ -18,6 +18,7 @@ import { AnalyticHandler } from '../../../src/handler' import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' import { addSingleFileToStorage } from '../../../src/storageUtils' +import { DynamicStructuredTool } from '../../tools/OpenAPIToolkit/core' const lenticularBracketRegex = /【[^】]*】/g const imageRegex = /]*\/>/g @@ -223,7 +224,7 @@ class OpenAIAssistant_Agents implements INode { const openai = new OpenAI({ apiKey: openAIApiKey }) // Start analytics - const analyticHandlers = new AnalyticHandler(nodeData, options) + const analyticHandlers = AnalyticHandler.getInstance(nodeData, options) await analyticHandlers.init() const parentIds = await analyticHandlers.onChainStart('OpenAIAssistant', input) @@ -504,7 +505,6 @@ class OpenAIAssistant_Agents implements INode { toolCallId: item.id }) }) - const submitToolOutputs = [] for (let i = 0; i < actions.length; i += 1) { const tool = tools.find((tool: any) => tool.name === actions[i].tool) @@ -539,30 +539,23 @@ class OpenAIAssistant_Agents implements INode { } try { - const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, { - tool_outputs: submitToolOutputs + await handleToolSubmission({ + openai, + threadId, + runThreadId, + submitToolOutputs, + tools, + analyticHandlers, + parentIds, + llmIds, + sseStreamer, + chatId, + options, + input, + usedTools, + text, + isStreamingStarted }) - - for await (const event of stream) { - if (event.event === 'thread.message.delta') { - const chunk = event.data.delta.content?.[0] - if (chunk && 'text' in chunk && chunk.text?.value) { - text += chunk.text.value - if (!isStreamingStarted) { - isStreamingStarted = true - if (sseStreamer) { - sseStreamer.streamStartEvent(chatId, chunk.text.value) - } - } - if (sseStreamer) { - sseStreamer.streamTokenEvent(chatId, chunk.text.value) - } - } - } - } - if (sseStreamer) { - sseStreamer.streamUsedToolsEvent(chatId, usedTools) - } } catch (error) { console.error('Error submitting tool outputs:', error) await openai.beta.threads.runs.cancel(threadId, runThreadId) @@ -634,7 +627,6 @@ class OpenAIAssistant_Agents implements INode { toolCallId: item.id }) }) - const submitToolOutputs = [] for (let i = 0; i < actions.length; i += 1) { const tool = tools.find((tool: any) => tool.name === actions[i].tool) @@ -751,7 +743,7 @@ class OpenAIAssistant_Agents implements INode { state = await promise(threadId, newRunThread.id) } else { const errMsg = `Error processing thread: ${state}, Thread ID: ${threadId}` - await analyticHandlers.onChainError(parentIds, errMsg) + await analyticHandlers.onChainError(parentIds, errMsg, true) throw new Error(errMsg) } } @@ -895,15 +887,212 @@ const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string } } +interface ToolSubmissionParams { + openai: OpenAI + threadId: string + runThreadId: string + submitToolOutputs: any[] + tools: any[] + analyticHandlers: AnalyticHandler + parentIds: ICommonObject + llmIds: ICommonObject + sseStreamer: IServerSideEventStreamer + chatId: string + options: ICommonObject + input: string + usedTools: IUsedTool[] + text: string + isStreamingStarted: boolean +} + +interface ToolSubmissionResult { + text: string + isStreamingStarted: boolean +} + +async function handleToolSubmission(params: ToolSubmissionParams): Promise { + const { + openai, + threadId, + runThreadId, + submitToolOutputs, + tools, + analyticHandlers, + parentIds, + llmIds, + sseStreamer, + chatId, + options, + input, + usedTools + } = params + + let updatedText = params.text + let updatedIsStreamingStarted = params.isStreamingStarted + + const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, { + tool_outputs: submitToolOutputs + }) + + try { + for await (const event of stream) { + if (event.event === 'thread.message.delta') { + const chunk = event.data.delta.content?.[0] + if (chunk && 'text' in chunk && chunk.text?.value) { + updatedText += chunk.text.value + if (!updatedIsStreamingStarted) { + updatedIsStreamingStarted = true + if (sseStreamer) { + sseStreamer.streamStartEvent(chatId, chunk.text.value) + } + } + if (sseStreamer) { + sseStreamer.streamTokenEvent(chatId, chunk.text.value) + } + } + } else if (event.event === 'thread.run.requires_action') { + if (event.data.required_action?.submit_tool_outputs.tool_calls) { + const actions: ICommonObject[] = [] + + event.data.required_action.submit_tool_outputs.tool_calls.forEach((item) => { + const functionCall = item.function + let args = {} + try { + args = JSON.parse(functionCall.arguments) + } catch (e) { + console.error('Error parsing arguments, default to empty object') + } + actions.push({ + tool: functionCall.name, + toolInput: args, + toolCallId: item.id + }) + }) + + const nestedToolOutputs = [] + for (let i = 0; i < actions.length; i += 1) { + const tool = tools.find((tool: any) => tool.name === actions[i].tool) + if (!tool) continue + + const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds) + + try { + const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, { + sessionId: threadId, + chatId: options.chatId, + input + }) + await analyticHandlers.onToolEnd(toolIds, toolOutput) + nestedToolOutputs.push({ + tool_call_id: actions[i].toolCallId, + output: toolOutput + }) + usedTools.push({ + tool: tool.name, + toolInput: actions[i].toolInput, + toolOutput + }) + } catch (e) { + await analyticHandlers.onToolEnd(toolIds, e) + console.error('Error executing tool', e) + throw new Error(`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`) + } + } + + // Recursively handle nested tool submissions + const result = await handleToolSubmission({ + openai, + threadId, + runThreadId, + submitToolOutputs: nestedToolOutputs, + tools, + analyticHandlers, + parentIds, + llmIds, + sseStreamer, + chatId, + options, + input, + usedTools, + text: updatedText, + isStreamingStarted: updatedIsStreamingStarted + }) + updatedText = result.text + updatedIsStreamingStarted = result.isStreamingStarted + } + } + } + + if (sseStreamer) { + sseStreamer.streamUsedToolsEvent(chatId, usedTools) + } + + return { + text: updatedText, + isStreamingStarted: updatedIsStreamingStarted + } + } catch (error) { + console.error('Error submitting tool outputs:', error) + await openai.beta.threads.runs.cancel(threadId, runThreadId) + + const errMsg = `Error submitting tool outputs. Thread ID: ${threadId}. Run ID: ${runThreadId}` + + await analyticHandlers.onLLMError(llmIds, errMsg) + await analyticHandlers.onChainError(parentIds, errMsg, true) + + throw new Error(errMsg) + } +} + +interface JSONSchema { + type?: string + properties?: Record + additionalProperties?: boolean + required?: string[] + [key: string]: any +} + const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.FunctionTool => { - return { + const parameters = zodToJsonSchema(tool.schema) as JSONSchema + + // For strict tools, we need to: + // 1. Set additionalProperties to false + // 2. Make all parameters required + // 3. Set the strict flag + if (tool instanceof DynamicStructuredTool && tool.isStrict()) { + // Get all property names from the schema + const properties = parameters.properties || {} + const allPropertyNames = Object.keys(properties) + + parameters.additionalProperties = false + parameters.required = allPropertyNames + + // Handle nested objects + for (const [_, prop] of Object.entries(properties)) { + if (prop.type === 'object') { + prop.additionalProperties = false + if (prop.properties) { + prop.required = Object.keys(prop.properties) + } + } + } + } + + const functionTool: OpenAI.Beta.FunctionTool = { type: 'function', function: { name: tool.name, description: tool.description, - parameters: zodToJsonSchema(tool.schema) + parameters } } + + // Add strict property if the tool is marked as strict + if (tool instanceof DynamicStructuredTool && tool.isStrict()) { + ;(functionTool.function as any).strict = true + } + + return functionTool } module.exports = { nodeClass: OpenAIAssistant_Agents } diff --git a/packages/components/nodes/agents/ToolAgent/ToolAgent.ts b/packages/components/nodes/agents/ToolAgent/ToolAgent.ts index c2cf38958..4dce1a460 100644 --- a/packages/components/nodes/agents/ToolAgent/ToolAgent.ts +++ b/packages/components/nodes/agents/ToolAgent/ToolAgent.ts @@ -24,7 +24,7 @@ import { IUsedTool, IVisionChatModal } from '../../../src/Interface' -import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' +import { ConsoleCallbackHandler, CustomChainHandler, CustomStreamingHandler, additionalCallbacks } from '../../../src/handler' import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents' import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation' import { formatResponse } from '../../outputparsers/OutputParserHelpers' @@ -101,6 +101,15 @@ class ToolAgent_Agents implements INode { type: 'number', optional: true, additionalParams: true + }, + { + label: 'Enable Detailed Streaming', + name: 'enableDetailedStreaming', + type: 'boolean', + default: false, + description: 'Stream detailed intermediate steps during agent execution', + optional: true, + additionalParams: true } ] this.sessionId = fields?.sessionId @@ -113,6 +122,7 @@ class ToolAgent_Agents implements INode { async run(nodeData: INodeData, input: string, options: ICommonObject): Promise { const memory = nodeData.inputs?.memory as FlowiseMemory const moderations = nodeData.inputs?.inputModeration as Moderation[] + const enableDetailedStreaming = nodeData.inputs?.enableDetailedStreaming as boolean const shouldStreamResponse = options.shouldStreamResponse const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer @@ -136,6 +146,13 @@ class ToolAgent_Agents implements INode { const loggerHandler = new ConsoleCallbackHandler(options.logger) const callbacks = await additionalCallbacks(nodeData, options) + // Add custom streaming handler if detailed streaming is enabled + let customStreamingHandler = null + + if (enableDetailedStreaming && shouldStreamResponse) { + customStreamingHandler = new CustomStreamingHandler(sseStreamer, chatId) + } + let res: ChainValues = {} let sourceDocuments: ICommonObject[] = [] let usedTools: IUsedTool[] = [] @@ -143,7 +160,14 @@ class ToolAgent_Agents implements INode { if (shouldStreamResponse) { const handler = new CustomChainHandler(sseStreamer, chatId) - res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] }) + const allCallbacks = [loggerHandler, handler, ...callbacks] + + // Add detailed streaming handler if enabled + if (enableDetailedStreaming && customStreamingHandler) { + allCallbacks.push(customStreamingHandler) + } + + res = await executor.invoke({ input }, { callbacks: allCallbacks }) if (res.sourceDocuments) { if (sseStreamer) { sseStreamer.streamSourceDocumentsEvent(chatId, flatten(res.sourceDocuments)) @@ -174,7 +198,14 @@ class ToolAgent_Agents implements INode { } } } else { - res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] }) + const allCallbacks = [loggerHandler, ...callbacks] + + // Add detailed streaming handler if enabled + if (enableDetailedStreaming && customStreamingHandler) { + allCallbacks.push(customStreamingHandler) + } + + res = await executor.invoke({ input }, { callbacks: allCallbacks }) if (res.sourceDocuments) { sourceDocuments = res.sourceDocuments } diff --git a/packages/components/nodes/analytic/Opik/Opik.ts b/packages/components/nodes/analytic/Opik/Opik.ts new file mode 100644 index 000000000..c620bdcc1 --- /dev/null +++ b/packages/components/nodes/analytic/Opik/Opik.ts @@ -0,0 +1,33 @@ +import { INode, INodeParams } from '../../../src/Interface' + +class Opik_Analytic implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs?: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Opik' + this.name = 'opik' + this.version = 1.0 + this.type = 'Opik' + this.icon = 'opik.png' + this.category = 'Analytic' + this.baseClasses = [this.type] + this.inputs = [] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['opikApi'] + } + } +} + +module.exports = { nodeClass: Opik_Analytic } diff --git a/packages/components/nodes/analytic/Opik/opik.png b/packages/components/nodes/analytic/Opik/opik.png new file mode 100644 index 000000000..20de0c39d Binary files /dev/null and b/packages/components/nodes/analytic/Opik/opik.png differ diff --git a/packages/components/nodes/cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager.ts b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager.ts new file mode 100644 index 000000000..dee516064 --- /dev/null +++ b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager.ts @@ -0,0 +1,44 @@ +import type { CachedContentBase, CachedContent, Content } from '@google/generative-ai' +import { GoogleAICacheManager as GoogleAICacheManagerBase } from '@google/generative-ai/server' +import hash from 'object-hash' + +type CacheContentOptions = Omit & { contents?: Content[] } + +export class GoogleAICacheManager extends GoogleAICacheManagerBase { + private ttlSeconds: number + private cachedContents: Map = new Map() + + setTtlSeconds(ttlSeconds: number) { + this.ttlSeconds = ttlSeconds + } + + async lookup(options: CacheContentOptions): Promise { + const { model, tools, contents } = options + if (!contents?.length) { + return undefined + } + const hashKey = hash({ + model, + tools, + contents + }) + if (this.cachedContents.has(hashKey)) { + return this.cachedContents.get(hashKey) + } + const { cachedContents } = await this.list() + const cachedContent = (cachedContents ?? []).find((cache) => cache.displayName === hashKey) + if (cachedContent) { + this.cachedContents.set(hashKey, cachedContent) + return cachedContent + } + const res = await this.create({ + ...(options as CachedContentBase), + displayName: hashKey, + ttlSeconds: this.ttlSeconds + }) + this.cachedContents.set(hashKey, res) + return res + } +} + +export default GoogleAICacheManager diff --git a/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGemini.svg b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGemini.svg new file mode 100644 index 000000000..53b497fa1 --- /dev/null +++ b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGemini.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGenerativeAIContextCache.ts b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGenerativeAIContextCache.ts new file mode 100644 index 000000000..9e6d28317 --- /dev/null +++ b/packages/components/nodes/cache/GoogleGenerativeAIContextCache/GoogleGenerativeAIContextCache.ts @@ -0,0 +1,53 @@ +import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src' +import FlowiseGoogleAICacheManager from './FlowiseGoogleAICacheManager' + +class GoogleGenerativeAIContextCache implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'Google GenAI Context Cache' + this.name = 'googleGenerativeAIContextCache' + this.version = 1.0 + this.type = 'GoogleAICacheManager' + this.description = 'Large context cache for Google Gemini large language models' + this.icon = 'GoogleGemini.svg' + this.category = 'Cache' + this.baseClasses = [this.type, ...getBaseClasses(FlowiseGoogleAICacheManager)] + this.inputs = [ + { + label: 'TTL', + name: 'ttl', + type: 'number', + default: 60 * 60 * 24 * 30 + } + ] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['googleGenerativeAI'], + optional: false, + description: 'Google Generative AI credential.' + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const ttl = nodeData.inputs?.ttl as number + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData) + const manager = new FlowiseGoogleAICacheManager(apiKey) + manager.setTtlSeconds(ttl) + return manager + } +} + +module.exports = { nodeClass: GoogleGenerativeAIContextCache } diff --git a/packages/components/nodes/cache/RedisCache/RedisCache.ts b/packages/components/nodes/cache/RedisCache/RedisCache.ts index 6646575f8..3821822a6 100644 --- a/packages/components/nodes/cache/RedisCache/RedisCache.ts +++ b/packages/components/nodes/cache/RedisCache/RedisCache.ts @@ -126,10 +126,19 @@ const getRedisClient = async (nodeData: INodeData, options: ICommonObject) => { host, username, password, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined, ...tlsOptions }) } else { - client = new Redis(redisUrl) + client = new Redis(redisUrl, { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + }) } return client diff --git a/packages/components/nodes/cache/RedisCache/RedisEmbeddingsCache.ts b/packages/components/nodes/cache/RedisCache/RedisEmbeddingsCache.ts index 1e4ed86c8..c46a9921b 100644 --- a/packages/components/nodes/cache/RedisCache/RedisEmbeddingsCache.ts +++ b/packages/components/nodes/cache/RedisCache/RedisEmbeddingsCache.ts @@ -83,10 +83,19 @@ class RedisEmbeddingsCache implements INode { host, username, password, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined, ...tlsOptions }) } else { - client = new Redis(redisUrl) + client = new Redis(redisUrl, { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + }) } ttl ??= '3600' diff --git a/packages/components/nodes/chains/ApiChain/postCore.ts b/packages/components/nodes/chains/ApiChain/postCore.ts index d7ac7cb14..0a40fb090 100644 --- a/packages/components/nodes/chains/ApiChain/postCore.ts +++ b/packages/components/nodes/chains/ApiChain/postCore.ts @@ -92,6 +92,21 @@ export class APIChain extends BaseChain implements APIChainInput { const { url, data } = JSON.parse(api_url_body) + // Validate request is not to internal/private networks + const urlObj = new URL(url) + const hostname = urlObj.hostname + + if ( + hostname === 'localhost' || + hostname === '127.0.0.1' || + hostname.startsWith('192.168.') || + hostname.startsWith('10.') || + hostname.startsWith('172.16.') || + hostname.includes('internal') + ) { + throw new Error('Access to internal networks is not allowed') + } + const res = await fetch(url, { method: 'POST', headers: this.headers, diff --git a/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts b/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts index 4dec41fd1..8a2d2cda3 100644 --- a/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts +++ b/packages/components/nodes/chatmodels/AWSBedrock/FlowiseAWSChatBedrock.ts @@ -27,7 +27,7 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal { } setVisionModel(): void { - if (!this.model.startsWith('claude-3')) { + if (!this.model.includes('claude-3')) { this.model = DEFAULT_IMAGE_MODEL this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : DEFAULT_IMAGE_MAX_TOKEN } diff --git a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts index d96028c15..02834a105 100644 --- a/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/AzureChatOpenAI/AzureChatOpenAI.ts @@ -237,7 +237,7 @@ class AzureChatOpenAI_ChatModels implements INode { console.error('Error parsing base options', exception) } } - if (modelName === 'o3-mini') { + if (modelName === 'o3-mini' || modelName.includes('o1')) { delete obj.temperature } if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) { diff --git a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts index ab741e3fd..7204801f9 100644 --- a/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts +++ b/packages/components/nodes/chatmodels/ChatAnthropic/ChatAnthropic.ts @@ -21,7 +21,7 @@ class ChatAnthropic_ChatModels implements INode { constructor() { this.label = 'ChatAnthropic' this.name = 'chatAnthropic' - this.version = 7.0 + this.version = 8.0 this.type = 'ChatAnthropic' this.icon = 'Anthropic.svg' this.category = 'Chat Models' @@ -87,6 +87,24 @@ class ChatAnthropic_ChatModels implements INode { optional: true, additionalParams: true }, + { + label: 'Extended Thinking', + name: 'extendedThinking', + type: 'boolean', + description: 'Enable extended thinking for reasoning model such as Claude Sonnet 3.7', + optional: true, + additionalParams: true + }, + { + label: 'Budget Tokens', + name: 'budgetTokens', + type: 'number', + step: 1, + default: 1024, + description: 'Maximum number of tokens Claude is allowed use for its internal reasoning process', + optional: true, + additionalParams: true + }, { label: 'Allow Image Uploads', name: 'allowImageUploads', @@ -114,6 +132,8 @@ class ChatAnthropic_ChatModels implements INode { const topK = nodeData.inputs?.topK as string const streaming = nodeData.inputs?.streaming as boolean const cache = nodeData.inputs?.cache as BaseCache + const extendedThinking = nodeData.inputs?.extendedThinking as boolean + const budgetTokens = nodeData.inputs?.budgetTokens as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData) @@ -131,6 +151,13 @@ class ChatAnthropic_ChatModels implements INode { if (topP) obj.topP = parseFloat(topP) if (topK) obj.topK = parseFloat(topK) if (cache) obj.cache = cache + if (extendedThinking) { + obj.thinking = { + type: 'enabled', + budget_tokens: parseInt(budgetTokens, 10) + } + delete obj.temperature + } const multiModalOption: IMultiModalOption = { image: { diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index 3b13ab271..9d15abba6 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -5,6 +5,7 @@ import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getModels, MODEL_TYPE } from '../../../src/modelLoader' import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI' +import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager' class GoogleGenerativeAI_ChatModels implements INode { label: string @@ -42,6 +43,12 @@ class GoogleGenerativeAI_ChatModels implements INode { type: 'BaseCache', optional: true }, + { + label: 'Context Cache', + name: 'contextCache', + type: 'GoogleAICacheManager', + optional: true + }, { label: 'Model Name', name: 'modelName', @@ -55,7 +62,8 @@ class GoogleGenerativeAI_ChatModels implements INode { type: 'string', placeholder: 'gemini-1.5-pro-exp-0801', description: 'Custom model name to use. If provided, it will override the model selected', - additionalParams: true + additionalParams: true, + optional: true }, { label: 'Temperature', @@ -156,6 +164,14 @@ class GoogleGenerativeAI_ChatModels implements INode { optional: true, additionalParams: true }, + { + label: 'Base URL', + name: 'baseUrl', + type: 'string', + description: 'Base URL for the API. Leave empty to use the default.', + optional: true, + additionalParams: true + }, { label: 'Allow Image Uploads', name: 'allowImageUploads', @@ -188,7 +204,9 @@ class GoogleGenerativeAI_ChatModels implements INode { const harmCategory = nodeData.inputs?.harmCategory as string const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string const cache = nodeData.inputs?.cache as BaseCache + const contextCache = nodeData.inputs?.contextCache as FlowiseGoogleAICacheManager const streaming = nodeData.inputs?.streaming as boolean + const baseUrl = nodeData.inputs?.baseUrl as string | undefined const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean @@ -203,6 +221,7 @@ class GoogleGenerativeAI_ChatModels implements INode { if (topK) obj.topK = parseFloat(topK) if (cache) obj.cache = cache if (temperature) obj.temperature = parseFloat(temperature) + if (baseUrl) obj.baseUrl = baseUrl // Safety Settings let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory) @@ -225,6 +244,7 @@ class GoogleGenerativeAI_ChatModels implements INode { const model = new ChatGoogleGenerativeAI(nodeData.id, obj) model.setMultiModalOption(multiModalOption) + if (contextCache) model.setContextCache(contextCache) return model } diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts index c26bf5a27..4824810eb 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI.ts @@ -25,6 +25,7 @@ import { StructuredToolInterface } from '@langchain/core/tools' import { isStructuredTool } from '@langchain/core/utils/function_calling' import { zodToJsonSchema } from 'zod-to-json-schema' import { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base' +import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager' const DEFAULT_IMAGE_MAX_TOKEN = 8192 const DEFAULT_IMAGE_MODEL = 'gemini-1.5-flash-latest' @@ -80,12 +81,16 @@ class LangchainChatGoogleGenerativeAI apiKey?: string + baseUrl?: string + streaming = false streamUsage = true private client: GenerativeModel + private contextCache?: FlowiseGoogleAICacheManager + get _isMultimodalModel() { return this.modelName.includes('vision') || this.modelName.startsWith('gemini-1.5') } @@ -147,20 +152,33 @@ class LangchainChatGoogleGenerativeAI this.getClient() } - getClient(tools?: Tool[]) { - this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel({ - model: this.modelName, - tools, - safetySettings: this.safetySettings as SafetySetting[], - generationConfig: { - candidateCount: 1, - stopSequences: this.stopSequences, - maxOutputTokens: this.maxOutputTokens, - temperature: this.temperature, - topP: this.topP, - topK: this.topK + async getClient(prompt?: Content[], tools?: Tool[]) { + this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel( + { + model: this.modelName, + tools, + safetySettings: this.safetySettings as SafetySetting[], + generationConfig: { + candidateCount: 1, + stopSequences: this.stopSequences, + maxOutputTokens: this.maxOutputTokens, + temperature: this.temperature, + topP: this.topP, + topK: this.topK + } + }, + { + baseUrl: this.baseUrl } - }) + ) + if (this.contextCache) { + const cachedContent = await this.contextCache.lookup({ + contents: prompt ? [{ ...prompt[0], parts: prompt[0].parts.slice(0, 1) }] : [], + model: this.modelName, + tools + }) + this.client.cachedContent = cachedContent as any + } } _combineLLMOutput() { @@ -209,6 +227,16 @@ class LangchainChatGoogleGenerativeAI } } + setContextCache(contextCache: FlowiseGoogleAICacheManager): void { + this.contextCache = contextCache + } + + async getNumTokens(prompt: BaseMessage[]) { + const contents = convertBaseMessagesToContent(prompt, this._isMultimodalModel) + const { totalTokens } = await this.client.countTokens({ contents }) + return totalTokens + } + async _generateNonStreaming( prompt: Content[], options: this['ParsedCallOptions'], @@ -220,9 +248,9 @@ class LangchainChatGoogleGenerativeAI this.convertFunctionResponse(prompt) if (tools.length > 0) { - this.getClient(tools as Tool[]) + await this.getClient(prompt, tools as Tool[]) } else { - this.getClient() + await this.getClient(prompt) } const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => { let output @@ -290,9 +318,9 @@ class LangchainChatGoogleGenerativeAI const tools = options.tools ?? [] if (tools.length > 0) { - this.getClient(tools as Tool[]) + await this.getClient(prompt, tools as Tool[]) } else { - this.getClient() + await this.getClient(prompt) } const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => { @@ -394,24 +422,18 @@ function getMessageAuthor(message: BaseMessage) { } function convertAuthorToRole(author: string) { - switch (author) { - /** - * Note: Gemini currently is not supporting system messages - * we will convert them to human messages and merge with following - * */ + switch (author.toLowerCase()) { case 'ai': - case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type; + case 'assistant': + case 'model': return 'model' - case 'system': - case 'human': - return 'user' case 'function': case 'tool': return 'function' + case 'system': + case 'human': default: - // Instead of throwing, we return model (Needed for Multi Agent) - // throw new Error(`Unknown / unsupported author: ${author}`) - return 'model' + return 'user' } } @@ -499,17 +521,29 @@ function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: b function checkIfEmptyContentAndSameRole(contents: Content[]) { let prevRole = '' - const removedContents: Content[] = [] + const validContents: Content[] = [] + for (const content of contents) { - const role = content.role - if (content.parts.length && content.parts[0].text === '' && role === prevRole) { - removedContents.push(content) + // Skip only if completely empty + if (!content.parts || !content.parts.length) { + continue } - prevRole = role + // Ensure role is always either 'user' or 'model' + content.role = content.role === 'model' ? 'model' : 'user' + + // Handle consecutive messages + if (content.role === prevRole && validContents.length > 0) { + // Merge with previous content if same role + validContents[validContents.length - 1].parts.push(...content.parts) + continue + } + + validContents.push(content) + prevRole = content.role } - return contents.filter((content) => !removedContents.includes(content)) + return validContents } function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) { @@ -547,7 +581,7 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel } } let actualRole = role - if (actualRole === 'function') { + if (actualRole === 'function' || actualRole === 'tool') { // GenerativeAI API will throw an error if the role is not "user" or "model." actualRole = 'user' } @@ -649,13 +683,39 @@ function zodToGeminiParameters(zodObj: any) { const jsonSchema: any = zodToJsonSchema(zodObj) // eslint-disable-next-line unused-imports/no-unused-vars const { $schema, additionalProperties, ...rest } = jsonSchema + + // Ensure all properties have type specified if (rest.properties) { Object.keys(rest.properties).forEach((key) => { - if (rest.properties[key].enum?.length) { - rest.properties[key] = { type: 'string', format: 'enum', enum: rest.properties[key].enum } + const prop = rest.properties[key] + + // Handle enum types + if (prop.enum?.length) { + rest.properties[key] = { + type: 'string', + format: 'enum', + enum: prop.enum + } + } + // Handle missing type + else if (!prop.type && !prop.oneOf && !prop.anyOf && !prop.allOf) { + // Infer type from other properties + if (prop.minimum !== undefined || prop.maximum !== undefined) { + prop.type = 'number' + } else if (prop.format === 'date-time') { + prop.type = 'string' + } else if (prop.items) { + prop.type = 'array' + } else if (prop.properties) { + prop.type = 'object' + } else { + // Default to string if type can't be inferred + prop.type = 'string' + } } }) } + return rest } diff --git a/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts b/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts index 5b813984b..44fed0b6a 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleVertexAI/ChatGoogleVertexAI.ts @@ -99,7 +99,8 @@ class GoogleVertexAI_ChatModels implements INode { type: 'string', placeholder: 'gemini-1.5-pro-exp-0801', description: 'Custom model name to use. If provided, it will override the model selected', - additionalParams: true + additionalParams: true, + optional: true }, { label: 'Temperature', diff --git a/packages/components/nodes/chatmodels/ChatLitellm/ChatLitellm.ts b/packages/components/nodes/chatmodels/ChatLitellm/ChatLitellm.ts new file mode 100644 index 000000000..352f883c6 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatLitellm/ChatLitellm.ts @@ -0,0 +1,135 @@ +import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai' +import { BaseCache } from '@langchain/core/caches' +import { BaseLLMParams } from '@langchain/core/language_models/llms' +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' + +class ChatLitellm_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatLitellm' + this.name = 'chatLitellm' + this.version = 1.0 + this.type = 'ChatLitellm' + this.icon = 'litellm.jpg' + this.category = 'Chat Models' + this.description = 'Connect to a Litellm server using OpenAI-compatible API' + this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(ChatOpenAI)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['litellmApi'], + optional: true + } + this.inputs = [ + { + label: 'Cache', + name: 'cache', + type: 'BaseCache', + optional: true + }, + { + label: 'Base URL', + name: 'basePath', + type: 'string', + placeholder: 'http://localhost:8000' + }, + { + label: 'Model Name', + name: 'modelName', + type: 'string', + placeholder: 'model_name' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Streaming', + name: 'streaming', + type: 'boolean', + default: true, + optional: true, + additionalParams: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top P', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const cache = nodeData.inputs?.cache as BaseCache + const basePath = nodeData.inputs?.basePath as string + const modelName = nodeData.inputs?.modelName as string + const temperature = nodeData.inputs?.temperature as string + const streaming = nodeData.inputs?.streaming as boolean + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const timeout = nodeData.inputs?.timeout as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('litellmApiKey', credentialData, nodeData) + + const obj: Partial & + BaseLLMParams & { openAIApiKey?: string } & { configuration?: { baseURL?: string; defaultHeaders?: ICommonObject } } = { + temperature: parseFloat(temperature), + modelName, + streaming: streaming ?? true + } + + if (basePath) { + obj.configuration = { + baseURL: basePath + } + } + + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (timeout) obj.timeout = parseInt(timeout, 10) + if (cache) obj.cache = cache + if (apiKey) obj.openAIApiKey = apiKey + + const model = new ChatOpenAI(obj) + + return model + } +} + +module.exports = { nodeClass: ChatLitellm_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatLitellm/litellm.jpg b/packages/components/nodes/chatmodels/ChatLitellm/litellm.jpg new file mode 100644 index 000000000..d6a77b2d1 Binary files /dev/null and b/packages/components/nodes/chatmodels/ChatLitellm/litellm.jpg differ diff --git a/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts b/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts index 8dfc4ec5a..b4636ad3d 100644 --- a/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts +++ b/packages/components/nodes/chatmodels/ChatNvdiaNIM/ChatNvdiaNIM.ts @@ -1,5 +1,5 @@ -import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai' import { BaseCache } from '@langchain/core/caches' +import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' @@ -16,19 +16,19 @@ class ChatNvdiaNIM_ChatModels implements INode { inputs: INodeParams[] constructor() { - this.label = 'Chat Nvdia NIM' - this.name = 'chatNvdiaNIM' - this.version = 1.0 - this.type = 'ChatNvdiaNIM' + this.label = 'Chat NVIDIA NIM' + this.name = 'Chat NVIDIA NIM' + this.version = 1.1 + this.type = 'Chat NVIDIA NIM' this.icon = 'nvdia.svg' this.category = 'Chat Models' - this.description = 'Wrapper around Nvdia NIM Inference API' + this.description = 'Wrapper around NVIDIA NIM Inference API' this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)] this.credential = { label: 'Connect Credential', name: 'credential', type: 'credential', - credentialNames: ['nvdiaNIMApi'], + credentialNames: ['nvidiaNIMApi'], optional: true } this.inputs = [ @@ -44,6 +44,13 @@ class ChatNvdiaNIM_ChatModels implements INode { type: 'string', placeholder: 'microsoft/phi-3-mini-4k-instruct' }, + { + label: 'Base Path', + name: 'basePath', + type: 'string', + description: 'Specify the URL of the deployed NIM Inference API', + placeholder: 'https://integrate.api.nvidia.com/v1' + }, { label: 'Temperature', name: 'temperature', @@ -52,13 +59,6 @@ class ChatNvdiaNIM_ChatModels implements INode { default: 0.9, optional: true }, - { - label: 'Base Path', - name: 'basePath', - type: 'string', - description: 'Specify the URL of the deployed NIM Inference API', - placeholder: 'https://integrate.api.nvidia.com/v1' - }, { label: 'Streaming', name: 'streaming', @@ -131,12 +131,12 @@ class ChatNvdiaNIM_ChatModels implements INode { const cache = nodeData.inputs?.cache as BaseCache const credentialData = await getCredentialData(nodeData.credential ?? '', options) - const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData) + const nvidiaNIMApiKey = getCredentialParam('nvidiaNIMApiKey', credentialData, nodeData) const obj: ChatOpenAIFields & { nvdiaNIMApiKey?: string } = { temperature: parseFloat(temperature), modelName, - openAIApiKey: nvdiaNIMApiKey, + openAIApiKey: nvidiaNIMApiKey ?? 'sk-', streaming: streaming ?? true } @@ -153,7 +153,7 @@ class ChatNvdiaNIM_ChatModels implements INode { try { parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions) } catch (exception) { - throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception) + throw new Error("Invalid JSON in the Chat NVIDIA NIM's baseOptions: " + exception) } } diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index c893d7278..62c06d900 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -21,7 +21,7 @@ class ChatOpenAI_ChatModels implements INode { constructor() { this.label = 'ChatOpenAI' this.name = 'chatOpenAI' - this.version = 8.1 + this.version = 8.2 this.type = 'ChatOpenAI' this.icon = 'openai.svg' this.category = 'Chat Models' @@ -172,7 +172,9 @@ class ChatOpenAI_ChatModels implements INode { ], default: 'low', optional: false, - additionalParams: true + show: { + allowImageUploads: true + } }, { label: 'Reasoning Effort', @@ -241,7 +243,7 @@ class ChatOpenAI_ChatModels implements INode { streaming: streaming ?? true } - if (modelName.includes('o3')) { + if (modelName.includes('o3') || modelName.includes('o1')) { delete obj.temperature } if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) { diff --git a/packages/components/nodes/chatmodels/ChatPerplexity/ChatPerplexity.ts b/packages/components/nodes/chatmodels/ChatPerplexity/ChatPerplexity.ts new file mode 100644 index 000000000..79817c3df --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatPerplexity/ChatPerplexity.ts @@ -0,0 +1,237 @@ +import { ChatPerplexity as LangchainChatPerplexity, PerplexityChatInput } from '@langchain/community/chat_models/perplexity' +import { BaseCache } from '@langchain/core/caches' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { ChatPerplexity } from './FlowiseChatPerplexity' +import { getModels, MODEL_TYPE } from '../../../src/modelLoader' + +class ChatPerplexity_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatPerplexity' + this.name = 'chatPerplexity' + this.version = 0.1 + this.type = 'ChatPerplexity' + this.icon = 'perplexity.svg' + this.category = 'Chat Models' + this.description = 'Wrapper around Perplexity large language models that use the Chat endpoint' + this.baseClasses = [this.type, ...getBaseClasses(LangchainChatPerplexity)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['perplexityApi'] + } + this.inputs = [ + { + label: 'Cache', + name: 'cache', + type: 'BaseCache', + optional: true + }, + { + label: 'Model Name', + name: 'model', + type: 'asyncOptions', + loadMethod: 'listModels', + default: 'sonar' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + step: 0.1, + default: 1, + optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top P', + name: 'topP', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Top K', + name: 'topK', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Presence Penalty', + name: 'presencePenalty', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Frequency Penalty', + name: 'frequencyPenalty', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Streaming', + name: 'streaming', + type: 'boolean', + default: true, + optional: true, + additionalParams: true + }, + { + label: 'Timeout', + name: 'timeout', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, + // { + // label: 'Search Domain Filter', + // name: 'searchDomainFilter', + // type: 'json', + // optional: true, + // additionalParams: true, + // description: 'Limit citations to URLs from specified domains (e.g., ["example.com", "anotherexample.org"])' + // }, + // Currently disabled as output is stored as additional_kwargs + // { + // label: 'Return Images', + // name: 'returnImages', + // type: 'boolean', + // optional: true, + // additionalParams: true, + // description: 'Whether the model should return images (if supported by the model)' + // }, + // Currently disabled as output is stored as additional_kwargs + // { + // label: 'Return Related Questions', + // name: 'returnRelatedQuestions', + // type: 'boolean', + // optional: true, + // additionalParams: true, + // description: 'Whether the online model should return related questions' + // }, + // { + // label: 'Search Recency Filter', + // name: 'searchRecencyFilter', + // type: 'options', + // options: [ + // { label: 'Not Set', name: '' }, + // { label: 'Month', name: 'month' }, + // { label: 'Week', name: 'week' }, + // { label: 'Day', name: 'day' }, + // { label: 'Hour', name: 'hour' } + // ], + // default: '', + // optional: true, + // additionalParams: true, + // description: 'Filter search results by time interval (does not apply to images)' + // }, + { + label: 'Proxy Url', + name: 'proxyUrl', + type: 'string', + optional: true, + additionalParams: true + } + // LangchainJS currently does not has a web_search_options, search_after_date_filter or search_before_date_filter parameter. + // To add web_search_options (user_location, search_context_size) and search_after_date_filter, search_before_date_filter as a modelKwargs parameter. + ] + } + + //@ts-ignore + loadMethods = { + async listModels(): Promise { + return await getModels(MODEL_TYPE.CHAT, 'chatPerplexity') + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const model = nodeData.inputs?.model as string + const temperature = nodeData.inputs?.temperature as string + const maxTokens = nodeData.inputs?.maxTokens as string + const topP = nodeData.inputs?.topP as string + const topK = nodeData.inputs?.topK as string + const presencePenalty = nodeData.inputs?.presencePenalty as string + const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string + const streaming = nodeData.inputs?.streaming as boolean + const timeout = nodeData.inputs?.timeout as string + const searchDomainFilterRaw = nodeData.inputs?.searchDomainFilter + const returnImages = nodeData.inputs?.returnImages as boolean + const returnRelatedQuestions = nodeData.inputs?.returnRelatedQuestions as boolean + const searchRecencyFilter = nodeData.inputs?.searchRecencyFilter as string + const proxyUrl = nodeData.inputs?.proxyUrl as string + const cache = nodeData.inputs?.cache as BaseCache + + if (nodeData.inputs?.credentialId) { + nodeData.credential = nodeData.inputs?.credentialId + } + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('perplexityApiKey', credentialData, nodeData) + + if (!apiKey) { + throw new Error('Perplexity API Key missing from credential') + } + + const obj: PerplexityChatInput = { + model, + apiKey, + streaming: streaming ?? true + } + + if (temperature) obj.temperature = parseFloat(temperature) + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (topK) obj.topK = parseInt(topK, 10) + if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty) + if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty) + if (timeout) obj.timeout = parseInt(timeout, 10) + if (returnImages) obj.returnImages = returnImages + if (returnRelatedQuestions) obj.returnRelatedQuestions = returnRelatedQuestions + if (searchRecencyFilter && searchRecencyFilter !== '') obj.searchRecencyFilter = searchRecencyFilter + if (cache) obj.cache = cache + + if (searchDomainFilterRaw) { + try { + obj.searchDomainFilter = + typeof searchDomainFilterRaw === 'object' ? searchDomainFilterRaw : JSON.parse(searchDomainFilterRaw) + } catch (exception) { + throw new Error('Invalid JSON in Search Domain Filter: ' + exception) + } + } + + if (proxyUrl) { + console.warn('Proxy configuration for ChatPerplexity might require adjustments to FlowiseChatPerplexity wrapper.') + } + + const perplexityModel = new ChatPerplexity(nodeData.id, obj) + return perplexityModel + } +} + +module.exports = { nodeClass: ChatPerplexity_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatPerplexity/FlowiseChatPerplexity.ts b/packages/components/nodes/chatmodels/ChatPerplexity/FlowiseChatPerplexity.ts new file mode 100644 index 000000000..51ecc2835 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatPerplexity/FlowiseChatPerplexity.ts @@ -0,0 +1,32 @@ +import { ChatPerplexity as LangchainChatPerplexity, type PerplexityChatInput } from '@langchain/community/chat_models/perplexity' +import { IMultiModalOption, IVisionChatModal } from '../../../src' + +// Extend the Langchain ChatPerplexity class to include Flowise-specific properties and methods +export class ChatPerplexity extends LangchainChatPerplexity implements IVisionChatModal { + configuredModel: string + configuredMaxToken?: number + multiModalOption: IMultiModalOption + id: string + + constructor(id: string, fields: PerplexityChatInput) { + super(fields) + this.id = id + this.configuredModel = fields?.model ?? '' // Use model from fields + this.configuredMaxToken = fields?.maxTokens + } + + // Method to revert to the original model configuration + revertToOriginalModel(): void { + this.model = this.configuredModel + this.maxTokens = this.configuredMaxToken + } + + // Method to set multimodal options + setMultiModalOption(multiModalOption: IMultiModalOption): void { + this.multiModalOption = multiModalOption + } + + setVisionModel(): void { + // pass + } +} diff --git a/packages/components/nodes/chatmodels/ChatPerplexity/perplexity.svg b/packages/components/nodes/chatmodels/ChatPerplexity/perplexity.svg new file mode 100644 index 000000000..2aa09bef5 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatPerplexity/perplexity.svg @@ -0,0 +1,8 @@ + + + \ No newline at end of file diff --git a/packages/components/nodes/chatmodels/Groq/ChatGroq_LlamaIndex.ts b/packages/components/nodes/chatmodels/Groq/ChatGroq_LlamaIndex.ts index 31e58d84f..f1566379b 100644 --- a/packages/components/nodes/chatmodels/Groq/ChatGroq_LlamaIndex.ts +++ b/packages/components/nodes/chatmodels/Groq/ChatGroq_LlamaIndex.ts @@ -48,6 +48,14 @@ class ChatGroq_LlamaIndex_ChatModels implements INode { step: 0.1, default: 0.9, optional: true + }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true } ] } @@ -62,7 +70,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const temperature = nodeData.inputs?.temperature as string const modelName = nodeData.inputs?.modelName as string - + const maxTokens = nodeData.inputs?.maxTokens as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const groqApiKey = getCredentialParam('groqApiKey', credentialData, nodeData) @@ -71,7 +79,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode { model: modelName, apiKey: groqApiKey } - + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) const model = new Groq(obj) return model } diff --git a/packages/components/nodes/chatmodels/Groq/Groq.ts b/packages/components/nodes/chatmodels/Groq/Groq.ts index 03a7dd1bf..506c7277f 100644 --- a/packages/components/nodes/chatmodels/Groq/Groq.ts +++ b/packages/components/nodes/chatmodels/Groq/Groq.ts @@ -54,6 +54,14 @@ class Groq_ChatModels implements INode { default: 0.9, optional: true }, + { + label: 'Max Tokens', + name: 'maxTokens', + type: 'number', + step: 1, + optional: true, + additionalParams: true + }, { label: 'Streaming', name: 'streaming', @@ -73,6 +81,7 @@ class Groq_ChatModels implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const modelName = nodeData.inputs?.modelName as string + const maxTokens = nodeData.inputs?.maxTokens as string const cache = nodeData.inputs?.cache as BaseCache const temperature = nodeData.inputs?.temperature as string const streaming = nodeData.inputs?.streaming as boolean @@ -86,6 +95,7 @@ class Groq_ChatModels implements INode { apiKey: groqApiKey, streaming: streaming ?? true } + if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) if (cache) obj.cache = cache const model = new ChatGroq(obj) diff --git a/packages/components/nodes/documentloaders/File/File.ts b/packages/components/nodes/documentloaders/File/File.ts index 2a8bea1a3..68ec54df0 100644 --- a/packages/components/nodes/documentloaders/File/File.ts +++ b/packages/components/nodes/documentloaders/File/File.ts @@ -47,7 +47,7 @@ class File_DocumentLoaders implements INode { }, { label: 'Pdf Usage', - name: 'pdfUsage', + name: 'usage', type: 'options', description: 'Only when loading PDF files', options: [ @@ -64,6 +64,14 @@ class File_DocumentLoaders implements INode { optional: true, additionalParams: true }, + { + label: 'Use Legacy Build', + name: 'legacyBuild', + type: 'boolean', + description: 'Use legacy build for PDF compatibility issues', + optional: true, + additionalParams: true + }, { label: 'JSONL Pointer Extraction', name: 'pointerName', @@ -113,7 +121,8 @@ class File_DocumentLoaders implements INode { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const fileBase64 = nodeData.inputs?.file as string const metadata = nodeData.inputs?.metadata - const pdfUsage = nodeData.inputs?.pdfUsage + const pdfUsage = nodeData.inputs?.pdfUsage || nodeData.inputs?.usage + const legacyBuild = nodeData.inputs?.legacyBuild as boolean const pointerName = nodeData.inputs?.pointerName as string const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string const output = nodeData.outputs?.output as string @@ -173,10 +182,21 @@ class File_DocumentLoaders implements INode { const match = file.match(/^data:([A-Za-z-+\/]+);base64,/) if (!match) { - fileBlobs.push({ - blob, - ext: extension - }) + // Fallback: check if there's a filename pattern at the end + const filenameMatch = file.match(/,filename:(.+\.\w+)$/) + if (filenameMatch && filenameMatch[1]) { + const filename = filenameMatch[1] + const fileExt = filename.split('.').pop() || '' + fileBlobs.push({ + blob, + ext: fileExt + }) + } else { + fileBlobs.push({ + blob, + ext: extension + }) + } } else { const mimeType = match[1] fileBlobs.push({ @@ -199,9 +219,18 @@ class File_DocumentLoaders implements INode { pdf: (blob) => pdfUsage === 'perFile' ? // @ts-ignore - new PDFLoader(blob, { splitPages: false, pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }) + new PDFLoader(blob, { + splitPages: false, + pdfjs: () => + // @ts-ignore + legacyBuild ? import('pdfjs-dist/legacy/build/pdf.js') : import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') + }) : // @ts-ignore - new PDFLoader(blob, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }), + new PDFLoader(blob, { + pdfjs: () => + // @ts-ignore + legacyBuild ? import('pdfjs-dist/legacy/build/pdf.js') : import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') + }), '': (blob) => new TextLoader(blob) }) let docs = [] diff --git a/packages/components/nodes/documentloaders/Github/Github.ts b/packages/components/nodes/documentloaders/Github/Github.ts index 116b7f6fd..3edef63f3 100644 --- a/packages/components/nodes/documentloaders/Github/Github.ts +++ b/packages/components/nodes/documentloaders/Github/Github.ts @@ -61,6 +61,24 @@ class Github_DocumentLoaders implements INode { optional: true, additionalParams: true }, + { + label: 'Github Base URL', + name: 'githubBaseUrl', + type: 'string', + placeholder: `https://git.example.com`, + description: 'Custom Github Base Url (e.g. Enterprise)', + optional: true, + additionalParams: true + }, + { + label: 'Github Instance API', + name: 'githubInstanceApi', + type: 'string', + placeholder: `https://api.github.com`, + description: 'Custom Github API Url (e.g. Enterprise)', + optional: true, + additionalParams: true + }, { label: 'Ignore Paths', name: 'ignorePath', @@ -134,6 +152,8 @@ class Github_DocumentLoaders implements INode { const ignorePath = nodeData.inputs?.ignorePath as string const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string const output = nodeData.outputs?.output as string + const githubInstanceApi = nodeData.inputs?.githubInstanceApi as string + const githubBaseUrl = nodeData.inputs?.githubBaseUrl as string let omitMetadataKeys: string[] = [] if (_omitMetadataKeys) { @@ -153,6 +173,12 @@ class Github_DocumentLoaders implements INode { if (maxConcurrency) githubOptions.maxConcurrency = parseInt(maxConcurrency, 10) if (maxRetries) githubOptions.maxRetries = parseInt(maxRetries, 10) if (ignorePath) githubOptions.ignorePaths = JSON.parse(ignorePath) + if (githubInstanceApi) { + githubOptions.apiUrl = githubInstanceApi.endsWith('/') ? githubInstanceApi.slice(0, -1) : githubInstanceApi + } + if (githubBaseUrl) { + githubOptions.baseUrl = githubBaseUrl.endsWith('/') ? githubBaseUrl.slice(0, -1) : githubBaseUrl + } const loader = new GithubRepoLoader(repoLink, githubOptions) diff --git a/packages/components/nodes/documentloaders/Jira/Jira.ts b/packages/components/nodes/documentloaders/Jira/Jira.ts new file mode 100644 index 000000000..9c03282ef --- /dev/null +++ b/packages/components/nodes/documentloaders/Jira/Jira.ts @@ -0,0 +1,194 @@ +import { omit } from 'lodash' +import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface' +import { TextSplitter } from 'langchain/text_splitter' +import { JiraProjectLoaderParams, JiraProjectLoader } from '@langchain/community/document_loaders/web/jira' +import { getCredentialData, getCredentialParam, handleEscapeCharacters, INodeOutputsValue } from '../../../src' + +class Jira_DocumentLoaders implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + outputs: INodeOutputsValue[] + + constructor() { + this.label = 'Jira' + this.name = 'jira' + this.version = 1.0 + this.type = 'Document' + this.icon = 'jira.svg' + this.category = 'Document Loaders' + this.description = `Load issues from Jira` + this.baseClasses = [this.type] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + description: 'Jira API Credential', + credentialNames: ['jiraApi'] + } + this.inputs = [ + { + label: 'Host', + name: 'host', + type: 'string', + placeholder: 'https://jira.example.com' + }, + { + label: 'Project Key', + name: 'projectKey', + type: 'string', + default: 'main' + }, + { + label: 'Limit per request', + name: 'limitPerRequest', + type: 'number', + step: 1, + optional: true, + placeholder: '100' + }, + { + label: 'Created after', + name: 'createdAfter', + type: 'string', + optional: true, + placeholder: '2024-01-01' + }, + { + label: 'Text Splitter', + name: 'textSplitter', + type: 'TextSplitter', + optional: true + }, + { + label: 'Additional Metadata', + name: 'metadata', + type: 'json', + description: 'Additional metadata to be added to the extracted documents', + optional: true, + additionalParams: true + }, + { + label: 'Omit Metadata Keys', + name: 'omitMetadataKeys', + type: 'string', + rows: 4, + description: + 'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, seperated by comma. Use * to omit all metadata keys execept the ones you specify in the Additional Metadata field', + placeholder: 'key1, key2, key3.nestedKey1', + optional: true, + additionalParams: true + } + ] + this.outputs = [ + { + label: 'Document', + name: 'document', + description: 'Array of document objects containing metadata and pageContent', + baseClasses: [...this.baseClasses, 'json'] + }, + { + label: 'Text', + name: 'text', + description: 'Concatenated string from pageContent of documents', + baseClasses: ['string', 'json'] + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const host = nodeData.inputs?.host as string + const projectKey = nodeData.inputs?.projectKey as string + const limitPerRequest = nodeData.inputs?.limitPerRequest as string + const createdAfter = nodeData.inputs?.createdAfter as string + const textSplitter = nodeData.inputs?.textSplitter as TextSplitter + const metadata = nodeData.inputs?.metadata + const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string + const output = nodeData.outputs?.output as string + + let omitMetadataKeys: string[] = [] + if (_omitMetadataKeys) { + omitMetadataKeys = _omitMetadataKeys.split(',').map((key) => key.trim()) + } + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const username = getCredentialParam('username', credentialData, nodeData) + const accessToken = getCredentialParam('accessToken', credentialData, nodeData) + + const jiraOptions: JiraProjectLoaderParams = { + projectKey, + host, + username, + accessToken + } + + if (limitPerRequest) { + jiraOptions.limitPerRequest = parseInt(limitPerRequest) + } + + if (createdAfter) { + jiraOptions.createdAfter = new Date(createdAfter) + } + + const loader = new JiraProjectLoader(jiraOptions) + let docs: IDocument[] = [] + + if (textSplitter) { + docs = await loader.load() + docs = await textSplitter.splitDocuments(docs) + } else { + docs = await loader.load() + } + + if (metadata) { + const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata) + docs = docs.map((doc) => ({ + ...doc, + metadata: + _omitMetadataKeys === '*' + ? { + ...parsedMetadata + } + : omit( + { + ...doc.metadata, + ...parsedMetadata + }, + omitMetadataKeys + ) + })) + } else { + docs = docs.map((doc) => ({ + ...doc, + metadata: + _omitMetadataKeys === '*' + ? {} + : omit( + { + ...doc.metadata + }, + omitMetadataKeys + ) + })) + } + + if (output === 'document') { + return docs + } else { + let finaltext = '' + for (const doc of docs) { + finaltext += `${doc.pageContent}\n` + } + return handleEscapeCharacters(finaltext, false) + } + } +} + +module.exports = { nodeClass: Jira_DocumentLoaders } diff --git a/packages/components/nodes/documentloaders/Jira/jira.svg b/packages/components/nodes/documentloaders/Jira/jira.svg new file mode 100644 index 000000000..807c5a311 --- /dev/null +++ b/packages/components/nodes/documentloaders/Jira/jira.svg @@ -0,0 +1,2 @@ + + \ No newline at end of file diff --git a/packages/components/nodes/memory/Mem0/Mem0.ts b/packages/components/nodes/memory/Mem0/Mem0.ts new file mode 100644 index 000000000..ba7960163 --- /dev/null +++ b/packages/components/nodes/memory/Mem0/Mem0.ts @@ -0,0 +1,375 @@ +import { Mem0Memory as BaseMem0Memory, Mem0MemoryInput, ClientOptions } from '@mem0/community' +import { MemoryOptions, SearchOptions } from 'mem0ai' +import { BaseMessage } from '@langchain/core/messages' +import { InputValues, MemoryVariables, OutputValues } from '@langchain/core/memory' +import { ICommonObject, IDatabaseEntity } from '../../../src' +import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam, mapChatMessageToBaseMessage } from '../../../src/utils' +import { DataSource } from 'typeorm' +import { v4 as uuidv4 } from 'uuid' + +interface BufferMemoryExtendedInput { + sessionId: string + appDataSource: DataSource + databaseEntities: IDatabaseEntity + chatflowid: string +} + +class Mem0_Memory implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Mem0' + this.name = 'mem0' + this.version = 1.1 + this.type = 'Mem0' + this.icon = 'mem0.svg' + this.category = 'Memory' + this.description = 'Stores and manages chat memory using Mem0 service' + this.baseClasses = [this.type, ...getBaseClasses(BaseMem0Memory)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + optional: false, + description: 'Configure API Key for Mem0 service', + credentialNames: ['mem0MemoryApi'] + } + this.inputs = [ + { + label: 'User ID', + name: 'user_id', + type: 'string', + description: 'Unique identifier for the user. Required only if "Use Flowise Chat ID" is OFF.', + default: 'flowise-default-user', + optional: true + }, + // Added toggle to use Flowise chat ID + { + label: 'Use Flowise Chat ID', + name: 'useFlowiseChatId', + type: 'boolean', + description: 'Use the Flowise internal Chat ID as the Mem0 User ID, overriding the "User ID" field above.', + default: false, + optional: true + }, + { + label: 'Search Only', + name: 'searchOnly', + type: 'boolean', + description: 'Search only mode', + default: false, + optional: true, + additionalParams: true + }, + { + label: 'Run ID', + name: 'run_id', + type: 'string', + description: 'Unique identifier for the run session', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Agent ID', + name: 'agent_id', + type: 'string', + description: 'Identifier for the agent', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'App ID', + name: 'app_id', + type: 'string', + description: 'Identifier for the application', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Project ID', + name: 'project_id', + type: 'string', + description: 'Identifier for the project', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Organization ID', + name: 'org_id', + type: 'string', + description: 'Identifier for the organization', + default: '', + optional: true, + additionalParams: true + }, + { + label: 'Memory Key', + name: 'memoryKey', + type: 'string', + default: 'history', + optional: true, + additionalParams: true + }, + { + label: 'Input Key', + name: 'inputKey', + type: 'string', + default: 'input', + optional: true, + additionalParams: true + }, + { + label: 'Output Key', + name: 'outputKey', + type: 'string', + default: 'text', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + return await initializeMem0(nodeData, options) + } +} + +const initializeMem0 = async (nodeData: INodeData, options: ICommonObject): Promise => { + const initialUserId = nodeData.inputs?.user_id as string + const useFlowiseChatId = nodeData.inputs?.useFlowiseChatId as boolean + + if (!useFlowiseChatId && !initialUserId) { + throw new Error('User ID field cannot be empty when "Use Flowise Chat ID" is OFF.') + } + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('apiKey', credentialData, nodeData) + + const mem0Options: ClientOptions = { + apiKey: apiKey, + host: nodeData.inputs?.host as string, + organizationId: nodeData.inputs?.org_id as string, + projectId: nodeData.inputs?.project_id as string + } + + const memOptionsUserId = initialUserId + + const constructorSessionId = initialUserId || (useFlowiseChatId ? 'flowise-chat-id-placeholder' : '') + + const memoryOptions: MemoryOptions & SearchOptions = { + user_id: memOptionsUserId, + run_id: (nodeData.inputs?.run_id as string) || undefined, + agent_id: (nodeData.inputs?.agent_id as string) || undefined, + app_id: (nodeData.inputs?.app_id as string) || undefined, + project_id: (nodeData.inputs?.project_id as string) || undefined, + org_id: (nodeData.inputs?.org_id as string) || undefined, + api_version: (nodeData.inputs?.api_version as string) || undefined, + enable_graph: (nodeData.inputs?.enable_graph as boolean) || false, + metadata: (nodeData.inputs?.metadata as Record) || {}, + filters: (nodeData.inputs?.filters as Record) || {} + } + + const obj: Mem0MemoryInput & Mem0MemoryExtendedInput & BufferMemoryExtendedInput & { searchOnly: boolean; useFlowiseChatId: boolean } = + { + apiKey: apiKey, + humanPrefix: nodeData.inputs?.humanPrefix as string, + aiPrefix: nodeData.inputs?.aiPrefix as string, + inputKey: nodeData.inputs?.inputKey as string, + sessionId: constructorSessionId, + mem0Options: mem0Options, + memoryOptions: memoryOptions, + separateMessages: false, + returnMessages: false, + appDataSource: options.appDataSource as DataSource, + databaseEntities: options.databaseEntities as IDatabaseEntity, + chatflowid: options.chatflowid as string, + searchOnly: (nodeData.inputs?.searchOnly as boolean) || false, + useFlowiseChatId: useFlowiseChatId + } + + return new Mem0MemoryExtended(obj) +} + +interface Mem0MemoryExtendedInput extends Mem0MemoryInput { + memoryOptions?: MemoryOptions | SearchOptions + useFlowiseChatId: boolean +} + +class Mem0MemoryExtended extends BaseMem0Memory implements MemoryMethods { + initialUserId: string + userId: string + memoryKey: string + inputKey: string + appDataSource: DataSource + databaseEntities: IDatabaseEntity + chatflowid: string + searchOnly: boolean + useFlowiseChatId: boolean + + constructor( + fields: Mem0MemoryInput & Mem0MemoryExtendedInput & BufferMemoryExtendedInput & { searchOnly: boolean; useFlowiseChatId: boolean } + ) { + super(fields) + this.initialUserId = fields.memoryOptions?.user_id ?? '' + this.userId = this.initialUserId + this.memoryKey = 'history' + this.inputKey = fields.inputKey ?? 'input' + this.appDataSource = fields.appDataSource + this.databaseEntities = fields.databaseEntities + this.chatflowid = fields.chatflowid + this.searchOnly = fields.searchOnly + this.useFlowiseChatId = fields.useFlowiseChatId + } + + // Selects Mem0 user_id based on toggle state (Flowise chat ID or input field) + private getEffectiveUserId(overrideUserId?: string): string { + let effectiveUserId: string | undefined + + if (this.useFlowiseChatId) { + if (overrideUserId) { + effectiveUserId = overrideUserId + } else { + throw new Error('Mem0: "Use Flowise Chat ID" is ON, but no runtime chat ID (overrideUserId) was provided.') + } + } else { + // If toggle is OFF, ALWAYS use the ID from the input field. + effectiveUserId = this.initialUserId + } + + // This check is now primarily for the case where the toggle is OFF and the initialUserId was somehow empty (should be caught by init validation). + if (!effectiveUserId) { + throw new Error('Mem0: Could not determine a valid User ID for the operation. Check User ID input field.') + } + return effectiveUserId + } + + async loadMemoryVariables(values: InputValues, overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + this.userId = effectiveUserId + if (this.memoryOptions) { + this.memoryOptions.user_id = effectiveUserId + } + return super.loadMemoryVariables(values) + } + + async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideUserId = ''): Promise { + if (this.searchOnly) { + return + } + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + this.userId = effectiveUserId + if (this.memoryOptions) { + this.memoryOptions.user_id = effectiveUserId + } + return super.saveContext(inputValues, outputValues) + } + + async clear(overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + this.userId = effectiveUserId + if (this.memoryOptions) { + this.memoryOptions.user_id = effectiveUserId + } + return super.clear() + } + + async getChatMessages( + overrideUserId = '', + returnBaseMessages = false, + prependMessages?: IMessage[] + ): Promise { + const flowiseSessionId = overrideUserId + if (!flowiseSessionId) { + console.warn('Mem0: getChatMessages called without overrideUserId (Flowise Session ID). Cannot fetch DB messages.') + return [] + } + + let chatMessage = await this.appDataSource.getRepository(this.databaseEntities['ChatMessage']).find({ + where: { + sessionId: flowiseSessionId, + chatflowid: this.chatflowid + }, + order: { + createdDate: 'DESC' + }, + take: 10 + }) + chatMessage = chatMessage.reverse() + + let returnIMessages: IMessage[] = chatMessage.map((m) => ({ + message: m.content as string, + type: m.role as MessageType + })) + + if (prependMessages?.length) { + returnIMessages.unshift(...prependMessages) + // Reverted to original simpler unshift + chatMessage.unshift(...(prependMessages as any)) // Cast as any + } + + if (returnBaseMessages) { + const memoryVariables = await this.loadMemoryVariables({}, overrideUserId) + const mem0History = memoryVariables[this.memoryKey] + + if (mem0History && typeof mem0History === 'string') { + const systemMessage = { + role: 'apiMessage' as MessageType, + content: mem0History, + id: uuidv4() + } + // Ensure Mem0 history message also conforms structurally if mapChatMessageToBaseMessage is strict + chatMessage.unshift(systemMessage as any) // Cast needed if mixing structures + } else if (mem0History) { + console.warn('Mem0 history is not a string, cannot prepend directly.') + } + + return await mapChatMessageToBaseMessage(chatMessage) + } + + return returnIMessages + } + + async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + const input = msgArray.find((msg) => msg.type === 'userMessage') + const output = msgArray.find((msg) => msg.type === 'apiMessage') + + if (input && output) { + const inputValues = { [this.inputKey ?? 'input']: input.text } + const outputValues = { output: output.text } + await this.saveContext(inputValues, outputValues, effectiveUserId) + } else { + console.warn('Mem0: Could not find both input and output messages to save context.') + } + } + + async clearChatMessages(overrideUserId = ''): Promise { + const effectiveUserId = this.getEffectiveUserId(overrideUserId) + await this.clear(effectiveUserId) + + const flowiseSessionId = overrideUserId + if (flowiseSessionId) { + await this.appDataSource + .getRepository(this.databaseEntities['ChatMessage']) + .delete({ sessionId: flowiseSessionId, chatflowid: this.chatflowid }) + } else { + console.warn('Mem0: clearChatMessages called without overrideUserId (Flowise Session ID). Cannot clear DB messages.') + } + } +} + +module.exports = { nodeClass: Mem0_Memory } diff --git a/packages/components/nodes/memory/Mem0/mem0.svg b/packages/components/nodes/memory/Mem0/mem0.svg new file mode 100644 index 000000000..42a7d6d90 --- /dev/null +++ b/packages/components/nodes/memory/Mem0/mem0.svg @@ -0,0 +1,3 @@ + + + diff --git a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts index 44832466e..df70c4949 100644 --- a/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts +++ b/packages/components/nodes/memory/MongoDBMemory/MongoDBMemory.ts @@ -155,7 +155,10 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { if (input) { const newInputMessage = new HumanMessage(input.text) - const messageToAdd = [newInputMessage].map((msg) => msg.toDict()) + const messageToAdd = [newInputMessage].map((msg) => ({ + ...msg.toDict(), + timestamp: new Date() // Add timestamp to the message + })) await collection.updateOne( { sessionId: id }, { @@ -167,7 +170,10 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { if (output) { const newOutputMessage = new AIMessage(output.text) - const messageToAdd = [newOutputMessage].map((msg) => msg.toDict()) + const messageToAdd = [newOutputMessage].map((msg) => ({ + ...msg.toDict(), + timestamp: new Date() // Add timestamp to the message + })) await collection.updateOne( { sessionId: id }, { diff --git a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts index 2f4f3ca07..e1813fae7 100644 --- a/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts +++ b/packages/components/nodes/memory/RedisBackedChatMemory/RedisBackedChatMemory.ts @@ -132,7 +132,21 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods { } private async withRedisClient(fn: (client: Redis) => Promise): Promise { - const client = typeof this.redisOptions === 'string' ? new Redis(this.redisOptions) : new Redis(this.redisOptions) + const client = + typeof this.redisOptions === 'string' + ? new Redis(this.redisOptions, { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + }) + : new Redis({ + ...this.redisOptions, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + }) try { return await fn(client) } finally { diff --git a/packages/components/nodes/recordmanager/PostgresRecordManager/PostgresRecordManager.ts b/packages/components/nodes/recordmanager/PostgresRecordManager/PostgresRecordManager.ts index 63beee927..ebb429af0 100644 --- a/packages/components/nodes/recordmanager/PostgresRecordManager/PostgresRecordManager.ts +++ b/packages/components/nodes/recordmanager/PostgresRecordManager/PostgresRecordManager.ts @@ -2,7 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { ListKeyOptions, RecordManagerInterface, UpdateOptions } from '@langchain/community/indexes/base' import { DataSource } from 'typeorm' -import { getHost } from '../../vectorstores/Postgres/utils' +import { getHost, getSSL } from '../../vectorstores/Postgres/utils' import { getDatabase, getPort, getTableName } from './utils' const serverCredentialsExists = !!process.env.POSTGRES_RECORDMANAGER_USER && !!process.env.POSTGRES_RECORDMANAGER_PASSWORD @@ -51,6 +51,14 @@ class PostgresRecordManager_RecordManager implements INode { placeholder: getPort(), optional: true }, + { + label: 'SSL', + name: 'ssl', + description: 'Use SSL to connect to Postgres', + type: 'boolean', + additionalParams: true, + optional: true + }, { label: 'Additional Connection Configuration', name: 'additionalConfig', @@ -149,6 +157,7 @@ class PostgresRecordManager_RecordManager implements INode { type: 'postgres', host: getHost(nodeData), port: getPort(nodeData), + ssl: getSSL(nodeData), username: user, password: password, database: getDatabase(nodeData) @@ -218,6 +227,8 @@ class PostgresRecordManager implements RecordManagerInterface { const queryRunner = dataSource.createQueryRunner() const tableName = this.sanitizeTableName(this.tableName) + await queryRunner.query('CREATE EXTENSION IF NOT EXISTS pgcrypto;') + await queryRunner.manager.query(` CREATE TABLE IF NOT EXISTS "${tableName}" ( uuid UUID PRIMARY KEY DEFAULT gen_random_uuid(), @@ -249,9 +260,9 @@ class PostgresRecordManager implements RecordManagerInterface { const dataSource = await this.getDataSource() try { const queryRunner = dataSource.createQueryRunner() - const res = await queryRunner.manager.query('SELECT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)') + const res = await queryRunner.manager.query('SELECT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) AS now') await queryRunner.release() - return Number.parseFloat(res[0].extract) + return Number.parseFloat(res[0].now) } catch (error) { console.error('Error getting time in PostgresRecordManager:') throw error diff --git a/packages/components/nodes/recordmanager/PostgresRecordManager/README.md b/packages/components/nodes/recordmanager/PostgresRecordManager/README.md index 4c2c592f7..b4684361e 100644 --- a/packages/components/nodes/recordmanager/PostgresRecordManager/README.md +++ b/packages/components/nodes/recordmanager/PostgresRecordManager/README.md @@ -4,14 +4,15 @@ Postgres Record Manager integration for Flowise ## 🌱 Env Variables -| Variable | Description | Type | Default | -| --------------------------------- | ----------------------------------------------- | ------ | ----------------- | -| POSTGRES_RECORDMANAGER_HOST | Default `host` for Postgres Record Manager | String | | -| POSTGRES_RECORDMANAGER_PORT | Default `port` for Postgres Record Manager | Number | 5432 | -| POSTGRES_RECORDMANAGER_USER | Default `user` for Postgres Record Manager | String | | -| POSTGRES_RECORDMANAGER_PASSWORD | Default `password` for Postgres Record Manager | String | | -| POSTGRES_RECORDMANAGER_DATABASE | Default `database` for Postgres Record Manager | String | | -| POSTGRES_RECORDMANAGER_TABLE_NAME | Default `tableName` for Postgres Record Manager | String | upsertion_records | +| Variable | Description | Type | Default | +| --------------------------------- | ----------------------------------------------- | ------- | ----------------- | +| POSTGRES_RECORDMANAGER_HOST | Default `host` for Postgres Record Manager | String | | +| POSTGRES_RECORDMANAGER_PORT | Default `port` for Postgres Record Manager | Number | 5432 | +| POSTGRES_RECORDMANAGER_USER | Default `user` for Postgres Record Manager | String | | +| POSTGRES_RECORDMANAGER_PASSWORD | Default `password` for Postgres Record Manager | String | | +| POSTGRES_RECORDMANAGER_DATABASE | Default `database` for Postgres Record Manager | String | | +| POSTGRES_RECORDMANAGER_TABLE_NAME | Default `tableName` for Postgres Record Manager | String | upsertion_records | +| POSTGRES_RECORDMANAGER_SSL | Default `ssl` for Postgres Vector Store | Boolean | false | ## License diff --git a/packages/components/nodes/recordmanager/PostgresRecordManager/utils.ts b/packages/components/nodes/recordmanager/PostgresRecordManager/utils.ts index f9a8d9ae0..e3547fc89 100644 --- a/packages/components/nodes/recordmanager/PostgresRecordManager/utils.ts +++ b/packages/components/nodes/recordmanager/PostgresRecordManager/utils.ts @@ -12,6 +12,10 @@ export function getPort(nodeData?: INodeData) { return defaultChain(nodeData?.inputs?.port, process.env.POSTGRES_RECORDMANAGER_PORT, '5432') } +export function getSSL(nodeData?: INodeData) { + return defaultChain(nodeData?.inputs?.ssl, process.env.POSTGRES_RECORDMANAGER_SSL, false) +} + export function getTableName(nodeData?: INodeData) { return defaultChain(nodeData?.inputs?.tableName, process.env.POSTGRES_RECORDMANAGER_TABLE_NAME, 'upsertion_records') } diff --git a/packages/components/nodes/retrievers/ExtractMetadataRetriever/ExtractMetadataRetriever.ts b/packages/components/nodes/retrievers/ExtractMetadataRetriever/ExtractMetadataRetriever.ts index fa37b99e4..481684454 100644 --- a/packages/components/nodes/retrievers/ExtractMetadataRetriever/ExtractMetadataRetriever.ts +++ b/packages/components/nodes/retrievers/ExtractMetadataRetriever/ExtractMetadataRetriever.ts @@ -150,6 +150,7 @@ class ExtractMetadataRetriever_Retrievers implements INode { prompt: dynamicMetadataFilterRetrieverPrompt, topK: topK ? parseInt(topK, 10) : (vectorStore as any)?.k ?? 4 }) + retriever.filter = vectorStore?.lc_kwargs?.filter ?? (vectorStore as any).filter if (output === 'retriever') return retriever else if (output === 'document') return await retriever.getRelevantDocuments(finalInputQuery) diff --git a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts index 0a4d69881..580d4ba4c 100644 --- a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts +++ b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts @@ -172,6 +172,7 @@ Passage:` else if (promptKey) obj.promptTemplate = promptKey const retriever = new HydeRetriever(obj) + retriever.filter = vectorStore?.lc_kwargs?.filter ?? (vectorStore as any).filter if (output === 'retriever') return retriever else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) diff --git a/packages/components/nodes/retrievers/MultiQueryRetriever/MultiQueryRetriever.ts b/packages/components/nodes/retrievers/MultiQueryRetriever/MultiQueryRetriever.ts index 3802f0da8..fd845a35c 100644 --- a/packages/components/nodes/retrievers/MultiQueryRetriever/MultiQueryRetriever.ts +++ b/packages/components/nodes/retrievers/MultiQueryRetriever/MultiQueryRetriever.ts @@ -71,7 +71,7 @@ class MultiQueryRetriever_Retrievers implements INode { const retriever = MultiQueryRetriever.fromLLM({ llm: model, - retriever: vectorStore.asRetriever(), + retriever: vectorStore.asRetriever({ filter: vectorStore?.lc_kwargs?.filter ?? vectorStore?.filter }), verbose: process.env.DEBUG === 'true', // @ts-ignore prompt: PromptTemplate.fromTemplate(prompt) diff --git a/packages/components/nodes/retrievers/RRFRetriever/ReciprocalRankFusion.ts b/packages/components/nodes/retrievers/RRFRetriever/ReciprocalRankFusion.ts index 36141c5ca..47ae2d000 100644 --- a/packages/components/nodes/retrievers/RRFRetriever/ReciprocalRankFusion.ts +++ b/packages/components/nodes/retrievers/RRFRetriever/ReciprocalRankFusion.ts @@ -50,7 +50,7 @@ export class ReciprocalRankFusion extends BaseDocumentCompressor { }) const docList: Document>[][] = [] for (let i = 0; i < queries.length; i++) { - const resultOne = await this.baseRetriever.vectorStore.similaritySearch(queries[i], 5) + const resultOne = await this.baseRetriever.vectorStore.similaritySearch(queries[i], 5, this.baseRetriever.filter) const docs: any[] = [] resultOne.forEach((doc) => { docs.push(doc) diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts index bdfb10d97..94c2d0217 100644 --- a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts +++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts @@ -100,6 +100,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode { maxK: maxK ? parseInt(maxK, 10) : 100, kIncrement: kIncrement ? parseInt(kIncrement, 10) : 2 }) + retriever.filter = vectorStore?.lc_kwargs?.filter ?? (vectorStore as any).filter if (output === 'retriever') return retriever else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) diff --git a/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts b/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts index 182f1a41b..371a8986f 100644 --- a/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts +++ b/packages/components/nodes/sequentialagents/LLMNode/LLMNode.ts @@ -1,4 +1,4 @@ -import { flatten, uniq } from 'lodash' +import { difference, flatten, uniq } from 'lodash' import { DataSource } from 'typeorm' import { z } from 'zod' import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchain/core/runnables' @@ -430,8 +430,15 @@ class LLMNode_SeqAgents implements INode { const abortControllerSignal = options.signal as AbortController const llmNodeInputVariables = uniq([...getInputVariables(systemPrompt), ...getInputVariables(humanPrompt)]) - if (!llmNodeInputVariables.every((element) => Object.keys(llmNodeInputVariablesValues).includes(element))) { - throw new Error('LLM Node input variables values are not provided!') + const missingInputVars = difference(llmNodeInputVariables, Object.keys(llmNodeInputVariablesValues)).join(' ') + const allVariablesSatisfied = missingInputVars.length === 0 + if (!allVariablesSatisfied) { + const nodeInputVars = llmNodeInputVariables.join(' ') + const providedInputVars = Object.keys(llmNodeInputVariablesValues).join(' ') + + throw new Error( + `LLM Node input variables values are not provided! Required: ${nodeInputVars}, Provided: ${providedInputVars}. Missing: ${missingInputVars}` + ) } const workerNode = async (state: ISeqAgentsState, config: RunnableConfig) => { diff --git a/packages/components/nodes/tools/ChatflowTool/ChatflowTool.ts b/packages/components/nodes/tools/ChatflowTool/ChatflowTool.ts index f0f718110..c22e0f35c 100644 --- a/packages/components/nodes/tools/ChatflowTool/ChatflowTool.ts +++ b/packages/components/nodes/tools/ChatflowTool/ChatflowTool.ts @@ -313,6 +313,7 @@ class ChatflowTool extends StructuredTool { method: 'POST', headers: { 'Content-Type': 'application/json', + 'flowise-tool': 'true', ...this.headers }, body: JSON.stringify(body) diff --git a/packages/components/nodes/tools/Composio/Composio.ts b/packages/components/nodes/tools/Composio/Composio.ts index 8b666fd13..e08ad9ac1 100644 --- a/packages/components/nodes/tools/Composio/Composio.ts +++ b/packages/components/nodes/tools/Composio/Composio.ts @@ -41,7 +41,7 @@ class Composio_Tools implements INode { constructor() { this.label = 'Composio' this.name = 'composio' - this.version = 1.0 + this.version = 2.0 this.type = 'Composio' this.icon = 'composio.svg' this.category = 'Tools' @@ -73,7 +73,7 @@ class Composio_Tools implements INode { { label: 'Actions to Use', name: 'actions', - type: 'asyncOptions', + type: 'asyncMultiOptions', loadMethod: 'listActions', description: 'Select the actions you want to use', refresh: true @@ -216,8 +216,18 @@ class Composio_Tools implements INode { throw new Error('API Key Required') } + const _actions = nodeData.inputs?.actions + let actions = [] + if (_actions) { + try { + actions = typeof _actions === 'string' ? JSON.parse(_actions) : _actions + } catch (error) { + console.error('Error parsing actions:', error) + } + } + const toolset = new LangchainToolSet({ apiKey: composioApiKey }) - const tools = await toolset.getTools({ actions: [nodeData.inputs?.actions as string] }) + const tools = await toolset.getTools({ actions }) return tools } } diff --git a/packages/components/nodes/tools/CurrentDateTime/CurrentDateTime.ts b/packages/components/nodes/tools/CurrentDateTime/CurrentDateTime.ts new file mode 100644 index 000000000..f65747c4d --- /dev/null +++ b/packages/components/nodes/tools/CurrentDateTime/CurrentDateTime.ts @@ -0,0 +1,74 @@ +import { z } from 'zod' +import { INode } from '../../../src/Interface' +import { DynamicStructuredTool } from '../CustomTool/core' + +const code = ` +const now = new Date(); + +// Format date as YYYY-MM-DD +const date = now.toISOString().split('T')[0]; + +// Get time in HH:MM:SS format +const time = now.toTimeString().split(' ')[0]; + +// Get day of week +const days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']; +const day = days[now.getDay()]; + +// Get timezone information +const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; +const timezoneOffset = now.getTimezoneOffset(); +const timezoneOffsetHours = Math.abs(Math.floor(timezoneOffset / 60)); +const timezoneOffsetMinutes = Math.abs(timezoneOffset % 60); +const timezoneOffsetFormatted = + (timezoneOffset <= 0 ? '+' : '-') + + timezoneOffsetHours.toString().padStart(2, '0') + ':' + + timezoneOffsetMinutes.toString().padStart(2, '0'); + +return { + date, + time, + day, + timezone, + timezoneOffset: timezoneOffsetFormatted, + iso8601: now.toISOString(), + unix_timestamp: Math.floor(now.getTime() / 1000) +}; +` + +class CurrentDateTime_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + + constructor() { + this.label = 'CurrentDateTime' + this.name = 'currentDateTime' + this.version = 1.0 + this.type = 'CurrentDateTime' + this.icon = 'currentDateTime.svg' + this.category = 'Tools' + this.description = 'Get todays day, date and time.' + this.baseClasses = [this.type, 'Tool'] + } + + async init(): Promise { + const obj = { + name: 'current_date_time', + description: 'Useful to get current day, date and time.', + schema: z.object({}), + code: code + } + + let dynamicStructuredTool = new DynamicStructuredTool(obj) + + return dynamicStructuredTool + } +} + +module.exports = { nodeClass: CurrentDateTime_Tools } diff --git a/packages/components/nodes/tools/CurrentDateTime/currentDateTime.svg b/packages/components/nodes/tools/CurrentDateTime/currentDateTime.svg new file mode 100644 index 000000000..929244cf4 --- /dev/null +++ b/packages/components/nodes/tools/CurrentDateTime/currentDateTime.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/tools/MCP/BraveSearch/BraveSearchMCP.ts b/packages/components/nodes/tools/MCP/BraveSearch/BraveSearchMCP.ts new file mode 100644 index 000000000..9d5ccd39f --- /dev/null +++ b/packages/components/nodes/tools/MCP/BraveSearch/BraveSearchMCP.ts @@ -0,0 +1,108 @@ +import { Tool } from '@langchain/core/tools' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface' +import { getCredentialData, getCredentialParam, getNodeModulesPackagePath } from '../../../../src/utils' +import { MCPToolkit } from '../core' + +class BraveSearch_MCP implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + documentation: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Brave Search MCP' + this.name = 'braveSearchMCP' + this.version = 1.0 + this.type = 'BraveSearch MCP Tool' + this.icon = 'brave.svg' + this.category = 'Tools (MCP)' + this.description = 'MCP server that integrates the Brave Search API - a real-time API to access web search capabilities' + this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/brave-search' + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['braveSearchApi'] + } + this.inputs = [ + { + label: 'Available Actions', + name: 'mcpActions', + type: 'asyncMultiOptions', + loadMethod: 'listActions', + refresh: true + } + ] + this.baseClasses = ['Tool'] + } + + //@ts-ignore + loadMethods = { + listActions: async (nodeData: INodeData, options: ICommonObject): Promise => { + try { + const toolset = await this.getTools(nodeData, options) + toolset.sort((a: any, b: any) => a.name.localeCompare(b.name)) + + return toolset.map(({ name, ...rest }) => ({ + label: name.toUpperCase(), + name: name, + description: rest.description || name + })) + } catch (error) { + return [ + { + label: 'No Available Actions', + name: 'error', + description: 'No available actions, please check your API key and refresh' + } + ] + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const tools = await this.getTools(nodeData, options) + + const _mcpActions = nodeData.inputs?.mcpActions + let mcpActions = [] + if (_mcpActions) { + try { + mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions + } catch (error) { + console.error('Error parsing mcp actions:', error) + } + } + + return tools.filter((tool: any) => mcpActions.includes(tool.name)) + } + + async getTools(nodeData: INodeData, options: ICommonObject): Promise { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const braveApiKey = getCredentialParam('braveApiKey', credentialData, nodeData) + const packagePath = getNodeModulesPackagePath('@modelcontextprotocol/server-brave-search/dist/index.js') + + const serverParams = { + command: 'node', + args: [packagePath], + env: { + BRAVE_API_KEY: braveApiKey + } + } + + const toolkit = new MCPToolkit(serverParams, 'stdio') + await toolkit.initialize() + + const tools = toolkit.tools ?? [] + + return tools as Tool[] + } +} + +module.exports = { nodeClass: BraveSearch_MCP } diff --git a/packages/components/nodes/tools/MCP/BraveSearch/brave.svg b/packages/components/nodes/tools/MCP/BraveSearch/brave.svg new file mode 100644 index 000000000..b1e233577 --- /dev/null +++ b/packages/components/nodes/tools/MCP/BraveSearch/brave.svg @@ -0,0 +1,8 @@ + + + + + + + + diff --git a/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts b/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts new file mode 100644 index 000000000..b24144ea5 --- /dev/null +++ b/packages/components/nodes/tools/MCP/CustomMCP/CustomMCP.ts @@ -0,0 +1,136 @@ +import { Tool } from '@langchain/core/tools' +import { INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface' +import { MCPToolkit } from '../core' + +const mcpServerConfig = `{ + "command": "npx", + "args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/files"] +}` + +class Custom_MCP implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + documentation: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Custom MCP' + this.name = 'customMCP' + this.version = 1.0 + this.type = 'Custom MCP Tool' + this.icon = 'customMCP.png' + this.category = 'Tools (MCP)' + this.description = 'Custom MCP Config' + this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/brave-search' + this.inputs = [ + { + label: 'MCP Server Config', + name: 'mcpServerConfig', + type: 'code', + hideCodeExecute: true, + placeholder: mcpServerConfig + }, + { + label: 'Available Actions', + name: 'mcpActions', + type: 'asyncMultiOptions', + loadMethod: 'listActions', + refresh: true + } + ] + this.baseClasses = ['Tool'] + } + + //@ts-ignore + loadMethods = { + listActions: async (nodeData: INodeData): Promise => { + try { + const toolset = await this.getTools(nodeData) + toolset.sort((a: any, b: any) => a.name.localeCompare(b.name)) + + return toolset.map(({ name, ...rest }) => ({ + label: name.toUpperCase(), + name: name, + description: rest.description || name + })) + } catch (error) { + return [ + { + label: 'No Available Actions', + name: 'error', + description: 'No available actions, please check your API key and refresh' + } + ] + } + } + } + + async init(nodeData: INodeData): Promise { + const tools = await this.getTools(nodeData) + + const _mcpActions = nodeData.inputs?.mcpActions + let mcpActions = [] + if (_mcpActions) { + try { + mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions + } catch (error) { + console.error('Error parsing mcp actions:', error) + } + } + + return tools.filter((tool: any) => mcpActions.includes(tool.name)) + } + + async getTools(nodeData: INodeData): Promise { + const mcpServerConfig = nodeData.inputs?.mcpServerConfig as string + + if (!mcpServerConfig) { + throw new Error('MCP Server Config is required') + } + + try { + let serverParams + if (typeof mcpServerConfig === 'object') { + serverParams = mcpServerConfig + } else if (typeof mcpServerConfig === 'string') { + const serverParamsString = convertToValidJSONString(mcpServerConfig) + serverParams = JSON.parse(serverParamsString) + } + + // Compatible with stdio and SSE + let toolkit: MCPToolkit + if (serverParams?.command === undefined) { + toolkit = new MCPToolkit(serverParams, 'sse') + } else { + toolkit = new MCPToolkit(serverParams, 'stdio') + } + + await toolkit.initialize() + + const tools = toolkit.tools ?? [] + + return tools as Tool[] + } catch (error) { + throw new Error(`Invalid MCP Server Config: ${error}`) + } + } +} + +function convertToValidJSONString(inputString: string) { + try { + const jsObject = Function('return ' + inputString)() + return JSON.stringify(jsObject, null, 2) + } catch (error) { + console.error('Error converting to JSON:', error) + return '' + } +} + +module.exports = { nodeClass: Custom_MCP } diff --git a/packages/components/nodes/tools/MCP/CustomMCP/customMCP.png b/packages/components/nodes/tools/MCP/CustomMCP/customMCP.png new file mode 100644 index 000000000..695023461 Binary files /dev/null and b/packages/components/nodes/tools/MCP/CustomMCP/customMCP.png differ diff --git a/packages/components/nodes/tools/MCP/Github/GithubMCP.ts b/packages/components/nodes/tools/MCP/Github/GithubMCP.ts new file mode 100644 index 000000000..f0beafe95 --- /dev/null +++ b/packages/components/nodes/tools/MCP/Github/GithubMCP.ts @@ -0,0 +1,114 @@ +import { Tool } from '@langchain/core/tools' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface' +import { getCredentialData, getCredentialParam, getNodeModulesPackagePath } from '../../../../src/utils' +import { MCPToolkit } from '../core' + +class Github_MCP implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + documentation: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Github MCP' + this.name = 'githubMCP' + this.version = 1.0 + this.type = 'Github MCP Tool' + this.icon = 'github.svg' + this.category = 'Tools (MCP)' + this.description = 'MCP Server for the GitHub API' + this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/github' + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['githubApi'] + } + this.inputs = [ + { + label: 'Available Actions', + name: 'mcpActions', + type: 'asyncMultiOptions', + loadMethod: 'listActions', + refresh: true + } + ] + this.baseClasses = ['Tool'] + } + + //@ts-ignore + loadMethods = { + listActions: async (nodeData: INodeData, options: ICommonObject): Promise => { + try { + const toolset = await this.getTools(nodeData, options) + toolset.sort((a: any, b: any) => a.name.localeCompare(b.name)) + + return toolset.map(({ name, ...rest }) => ({ + label: name.toUpperCase(), + name: name, + description: rest.description || name + })) + } catch (error) { + console.error('Error listing actions:', error) + return [ + { + label: 'No Available Actions', + name: 'error', + description: 'No available actions, please check your Github Access Token and refresh' + } + ] + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const tools = await this.getTools(nodeData, options) + + const _mcpActions = nodeData.inputs?.mcpActions + let mcpActions = [] + if (_mcpActions) { + try { + mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions + } catch (error) { + console.error('Error parsing mcp actions:', error) + } + } + + return tools.filter((tool: any) => mcpActions.includes(tool.name)) + } + + async getTools(nodeData: INodeData, options: ICommonObject): Promise { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const accessToken = getCredentialParam('accessToken', credentialData, nodeData) + + if (!accessToken) { + throw new Error('Missing Github Access Token') + } + + const packagePath = getNodeModulesPackagePath('@modelcontextprotocol/server-github/dist/index.js') + + const serverParams = { + command: 'node', + args: [packagePath], + env: { + GITHUB_PERSONAL_ACCESS_TOKEN: accessToken + } + } + + const toolkit = new MCPToolkit(serverParams, 'stdio') + await toolkit.initialize() + + const tools = toolkit.tools ?? [] + + return tools as Tool[] + } +} + +module.exports = { nodeClass: Github_MCP } diff --git a/packages/components/nodes/tools/MCP/Github/github.svg b/packages/components/nodes/tools/MCP/Github/github.svg new file mode 100644 index 000000000..01f228d10 --- /dev/null +++ b/packages/components/nodes/tools/MCP/Github/github.svg @@ -0,0 +1,13 @@ + + + + + + + + + + + + + diff --git a/packages/components/nodes/tools/MCP/PostgreSQL/PostgreSQLMCP.ts b/packages/components/nodes/tools/MCP/PostgreSQL/PostgreSQLMCP.ts new file mode 100644 index 000000000..d9fc0418d --- /dev/null +++ b/packages/components/nodes/tools/MCP/PostgreSQL/PostgreSQLMCP.ts @@ -0,0 +1,111 @@ +import { Tool } from '@langchain/core/tools' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface' +import { getCredentialData, getCredentialParam, getNodeModulesPackagePath } from '../../../../src/utils' +import { MCPToolkit } from '../core' + +class PostgreSQL_MCP implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + credential: INodeParams + documentation: string + inputs: INodeParams[] + + constructor() { + this.label = 'PostgreSQL MCP' + this.name = 'postgreSQLMCP' + this.version = 1.0 + this.type = 'PostgreSQL MCP Tool' + this.icon = 'postgres.svg' + this.category = 'Tools (MCP)' + this.description = 'MCP server that provides read-only access to PostgreSQL databases' + this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/postgres' + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['PostgresUrl'] + } + this.inputs = [ + { + label: 'Available Actions', + name: 'mcpActions', + type: 'asyncMultiOptions', + loadMethod: 'listActions', + refresh: true + } + ] + this.baseClasses = ['Tool'] + } + + //@ts-ignore + loadMethods = { + listActions: async (nodeData: INodeData, options: ICommonObject): Promise => { + try { + const toolset = await this.getTools(nodeData, options) + toolset.sort((a: any, b: any) => a.name.localeCompare(b.name)) + + return toolset.map(({ name, ...rest }) => ({ + label: name.toUpperCase(), + name: name, + description: rest.description || name + })) + } catch (error) { + console.error('Error listing actions:', error) + return [ + { + label: 'No Available Actions', + name: 'error', + description: 'No available actions, please check your postgres url and refresh' + } + ] + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const tools = await this.getTools(nodeData, options) + + const _mcpActions = nodeData.inputs?.mcpActions + let mcpActions = [] + if (_mcpActions) { + try { + mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions + } catch (error) { + console.error('Error parsing mcp actions:', error) + } + } + + return tools.filter((tool: any) => mcpActions.includes(tool.name)) + } + + async getTools(nodeData: INodeData, options: ICommonObject): Promise { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const postgresUrl = getCredentialParam('postgresUrl', credentialData, nodeData) + + if (!postgresUrl) { + throw new Error('No postgres url provided') + } + + const packagePath = getNodeModulesPackagePath('@modelcontextprotocol/server-postgres/dist/index.js') + + const serverParams = { + command: 'node', + args: [packagePath, postgresUrl] + } + + const toolkit = new MCPToolkit(serverParams, 'stdio') + await toolkit.initialize() + + const tools = toolkit.tools ?? [] + + return tools as Tool[] + } +} + +module.exports = { nodeClass: PostgreSQL_MCP } diff --git a/packages/components/nodes/tools/MCP/PostgreSQL/postgres.svg b/packages/components/nodes/tools/MCP/PostgreSQL/postgres.svg new file mode 100644 index 000000000..f631e7a84 --- /dev/null +++ b/packages/components/nodes/tools/MCP/PostgreSQL/postgres.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/tools/MCP/SequentialThinking/SequentialThinkingMCP.ts b/packages/components/nodes/tools/MCP/SequentialThinking/SequentialThinkingMCP.ts new file mode 100644 index 000000000..27d73ecda --- /dev/null +++ b/packages/components/nodes/tools/MCP/SequentialThinking/SequentialThinkingMCP.ts @@ -0,0 +1,98 @@ +import { Tool } from '@langchain/core/tools' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface' +import { getNodeModulesPackagePath } from '../../../../src/utils' +import { MCPToolkit } from '../core' + +class SequentialThinking_MCP implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + documentation: string + inputs: INodeParams[] + + constructor() { + this.label = 'Sequential Thinking MCP' + this.name = 'sequentialThinkingMCP' + this.version = 1.0 + this.type = 'Sequential Thinking MCP Tool' + this.icon = 'sequentialthinking.svg' + this.category = 'Tools (MCP)' + this.description = + 'MCP server that provides a tool for dynamic and reflective problem-solving through a structured thinking process' + this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/sequentialthinking' + this.inputs = [ + { + label: 'Available Actions', + name: 'mcpActions', + type: 'asyncMultiOptions', + loadMethod: 'listActions', + refresh: true + } + ] + this.baseClasses = ['Tool'] + } + + //@ts-ignore + loadMethods = { + listActions: async (nodeData: INodeData, options: ICommonObject): Promise => { + try { + const toolset = await this.getTools(nodeData, options) + toolset.sort((a: any, b: any) => a.name.localeCompare(b.name)) + + return toolset.map(({ name, ...rest }) => ({ + label: name.toUpperCase(), + name: name, + description: rest.description || name + })) + } catch (error) { + console.error('Error listing actions:', error) + return [ + { + label: 'No Available Actions', + name: 'error', + description: 'No available actions, please refresh' + } + ] + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const tools = await this.getTools(nodeData, options) + + const _mcpActions = nodeData.inputs?.mcpActions + let mcpActions = [] + if (_mcpActions) { + try { + mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions + } catch (error) { + console.error('Error parsing mcp actions:', error) + } + } + + return tools.filter((tool: any) => mcpActions.includes(tool.name)) + } + + async getTools(_nodeData: INodeData, _options: ICommonObject): Promise { + const packagePath = getNodeModulesPackagePath('@modelcontextprotocol/server-sequential-thinking/dist/index.js') + + const serverParams = { + command: 'node', + args: [packagePath] + } + + const toolkit = new MCPToolkit(serverParams, 'stdio') + await toolkit.initialize() + + const tools = toolkit.tools ?? [] + + return tools as Tool[] + } +} + +module.exports = { nodeClass: SequentialThinking_MCP } diff --git a/packages/components/nodes/tools/MCP/SequentialThinking/sequentialthinking.svg b/packages/components/nodes/tools/MCP/SequentialThinking/sequentialthinking.svg new file mode 100644 index 000000000..3e5a7de2e --- /dev/null +++ b/packages/components/nodes/tools/MCP/SequentialThinking/sequentialthinking.svg @@ -0,0 +1,36 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/packages/components/nodes/tools/MCP/Slack/SlackMCP.ts b/packages/components/nodes/tools/MCP/Slack/SlackMCP.ts new file mode 100644 index 000000000..af3068207 --- /dev/null +++ b/packages/components/nodes/tools/MCP/Slack/SlackMCP.ts @@ -0,0 +1,116 @@ +import { Tool } from '@langchain/core/tools' +import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface' +import { getCredentialData, getCredentialParam, getNodeModulesPackagePath } from '../../../../src/utils' +import { MCPToolkit } from '../core' + +class Slack_MCP implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + documentation: string + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'Slack MCP' + this.name = 'slackMCP' + this.version = 1.0 + this.type = 'Slack MCP Tool' + this.icon = 'slack.svg' + this.category = 'Tools (MCP)' + this.description = 'MCP Server for the Slack API' + this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/slack' + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['slackApi'] + } + this.inputs = [ + { + label: 'Available Actions', + name: 'mcpActions', + type: 'asyncMultiOptions', + loadMethod: 'listActions', + refresh: true + } + ] + this.baseClasses = ['Tool'] + } + + //@ts-ignore + loadMethods = { + listActions: async (nodeData: INodeData, options: ICommonObject): Promise => { + try { + const toolset = await this.getTools(nodeData, options) + toolset.sort((a: any, b: any) => a.name.localeCompare(b.name)) + + return toolset.map(({ name, ...rest }) => ({ + label: name.toUpperCase(), + name: name, + description: rest.description || name + })) + } catch (error) { + console.error('Error listing actions:', error) + return [ + { + label: 'No Available Actions', + name: 'error', + description: 'No available actions, please check your Slack Bot Token and refresh' + } + ] + } + } + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const tools = await this.getTools(nodeData, options) + + const _mcpActions = nodeData.inputs?.mcpActions + let mcpActions = [] + if (_mcpActions) { + try { + mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions + } catch (error) { + console.error('Error parsing mcp actions:', error) + } + } + + return tools.filter((tool: any) => mcpActions.includes(tool.name)) + } + + async getTools(nodeData: INodeData, options: ICommonObject): Promise { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const botToken = getCredentialParam('botToken', credentialData, nodeData) + const teamId = getCredentialParam('teamId', credentialData, nodeData) + + if (!botToken || !teamId) { + throw new Error('Missing Credentials') + } + + const packagePath = getNodeModulesPackagePath('@modelcontextprotocol/server-slack/dist/index.js') + + const serverParams = { + command: 'node', + args: [packagePath], + env: { + SLACK_BOT_TOKEN: botToken, + SLACK_TEAM_ID: teamId + } + } + + const toolkit = new MCPToolkit(serverParams, 'stdio') + await toolkit.initialize() + + const tools = toolkit.tools ?? [] + + return tools as Tool[] + } +} + +module.exports = { nodeClass: Slack_MCP } diff --git a/packages/components/nodes/tools/MCP/Slack/slack.svg b/packages/components/nodes/tools/MCP/Slack/slack.svg new file mode 100644 index 000000000..69a4eb6a2 --- /dev/null +++ b/packages/components/nodes/tools/MCP/Slack/slack.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/tools/MCP/core.ts b/packages/components/nodes/tools/MCP/core.ts new file mode 100644 index 000000000..7c894fcc4 --- /dev/null +++ b/packages/components/nodes/tools/MCP/core.ts @@ -0,0 +1,150 @@ +import { CallToolRequest, CallToolResultSchema, ListToolsResult, ListToolsResultSchema } from '@modelcontextprotocol/sdk/types.js' +import { Client } from '@modelcontextprotocol/sdk/client/index.js' +import { StdioClientTransport, StdioServerParameters } from '@modelcontextprotocol/sdk/client/stdio.js' +import { BaseToolkit, tool, Tool } from '@langchain/core/tools' +import { z } from 'zod' +import { StreamableHTTPClientTransport } from '@modelcontextprotocol/sdk/client/streamableHttp.js' +import { SSEClientTransport } from '@modelcontextprotocol/sdk/client/sse.js' + +export class MCPToolkit extends BaseToolkit { + tools: Tool[] = [] + _tools: ListToolsResult | null = null + model_config: any + transport: StdioClientTransport | SSEClientTransport | StreamableHTTPClientTransport | null = null + client: Client | null = null + serverParams: StdioServerParameters | any + transportType: 'stdio' | 'sse' + constructor(serverParams: StdioServerParameters | any, transportType: 'stdio' | 'sse') { + super() + this.serverParams = serverParams + this.transportType = transportType + } + + // Method to create a new client with transport + async createClient(): Promise { + const client = new Client( + { + name: 'flowise-client', + version: '1.0.0' + }, + { + capabilities: {} + } + ) + + let transport: StdioClientTransport | SSEClientTransport | StreamableHTTPClientTransport + + if (this.transportType === 'stdio') { + // Compatible with overridden PATH configuration + const params = { + ...this.serverParams, + env: { + ...(this.serverParams.env || {}), + PATH: process.env.PATH + } + } + + transport = new StdioClientTransport(params as StdioServerParameters) + await client.connect(transport) + } else { + if (this.serverParams.url === undefined) { + throw new Error('URL is required for SSE transport') + } + + const baseUrl = new URL(this.serverParams.url) + try { + transport = new StreamableHTTPClientTransport(baseUrl) + await client.connect(transport) + } catch (error) { + transport = new SSEClientTransport(baseUrl) + await client.connect(transport) + } + } + + return client + } + + async initialize() { + if (this._tools === null) { + this.client = await this.createClient() + + this._tools = await this.client.request({ method: 'tools/list' }, ListToolsResultSchema) + + this.tools = await this.get_tools() + + // Close the initial client after initialization + await this.client.close() + } + } + + async get_tools(): Promise { + if (this._tools === null || this.client === null) { + throw new Error('Must initialize the toolkit first') + } + const toolsPromises = this._tools.tools.map(async (tool: any) => { + if (this.client === null) { + throw new Error('Client is not initialized') + } + return await MCPTool({ + toolkit: this, + name: tool.name, + description: tool.description || '', + argsSchema: createSchemaModel(tool.inputSchema) + }) + }) + return Promise.all(toolsPromises) + } +} + +export async function MCPTool({ + toolkit, + name, + description, + argsSchema +}: { + toolkit: MCPToolkit + name: string + description: string + argsSchema: any +}): Promise { + return tool( + async (input): Promise => { + // Create a new client for this request + const client = await toolkit.createClient() + + try { + const req: CallToolRequest = { method: 'tools/call', params: { name: name, arguments: input } } + const res = await client.request(req, CallToolResultSchema) + const content = res.content + const contentString = JSON.stringify(content) + return contentString + } finally { + // Always close the client after the request completes + await client.close() + } + }, + { + name: name, + description: description, + schema: argsSchema + } + ) +} + +function createSchemaModel( + inputSchema: { + type: 'object' + properties?: import('zod').objectOutputType<{}, import('zod').ZodTypeAny, 'passthrough'> | undefined + } & { [k: string]: unknown } +): any { + if (inputSchema.type !== 'object' || !inputSchema.properties) { + throw new Error('Invalid schema type or missing properties') + } + + const schemaProperties = Object.entries(inputSchema.properties).reduce((acc, [key, _]) => { + acc[key] = z.any() + return acc + }, {} as Record) + + return z.object(schemaProperties) +} diff --git a/packages/components/nodes/tools/OpenAPIToolkit/OpenAPIToolkit.ts b/packages/components/nodes/tools/OpenAPIToolkit/OpenAPIToolkit.ts index f34759ae2..d44f5f103 100644 --- a/packages/components/nodes/tools/OpenAPIToolkit/OpenAPIToolkit.ts +++ b/packages/components/nodes/tools/OpenAPIToolkit/OpenAPIToolkit.ts @@ -48,6 +48,13 @@ class OpenAPIToolkit_Tools implements INode { additionalParams: true, optional: true }, + { + label: 'Remove null parameters', + name: 'removeNulls', + type: 'boolean', + optional: true, + description: 'Remove all keys with null values from the parsed arguments' + }, { label: 'Custom Code', name: 'customCode', @@ -71,6 +78,7 @@ class OpenAPIToolkit_Tools implements INode { const yamlFileBase64 = nodeData.inputs?.yamlFile as string const customCode = nodeData.inputs?.customCode as string const _headers = nodeData.inputs?.headers as string + const removeNulls = nodeData.inputs?.removeNulls as boolean const headers = typeof _headers === 'object' ? _headers : _headers ? JSON.parse(_headers) : {} @@ -106,7 +114,7 @@ class OpenAPIToolkit_Tools implements INode { const flow = { chatflowId: options.chatflowid } - const tools = getTools(_data.paths, baseUrl, headers, variables, flow, toolReturnDirect, customCode) + const tools = getTools(_data.paths, baseUrl, headers, variables, flow, toolReturnDirect, customCode, removeNulls) return tools } } @@ -119,17 +127,18 @@ const jsonSchemaToZodSchema = (schema: any, requiredList: string[], keyName: str zodShape[key] = jsonSchemaToZodSchema(schema.properties[key], requiredList, key) } return z.object(zodShape) - } else if (schema.oneOf) { - // Handle oneOf by mapping each option to a Zod schema - const zodSchemas = schema.oneOf.map((subSchema: any) => jsonSchemaToZodSchema(subSchema, requiredList, keyName)) - return z.union(zodSchemas) + } else if (schema.oneOf || schema.anyOf) { + // Handle oneOf/anyOf by mapping each option to a Zod schema + const schemas = schema.oneOf || schema.anyOf + const zodSchemas = schemas.map((subSchema: any) => jsonSchemaToZodSchema(subSchema, requiredList, keyName)) + return z.union(zodSchemas).describe(schema?.description ?? schema?.title ?? keyName) } else if (schema.enum) { - // Handle enum types + // Handle enum types with their title and description return requiredList.includes(keyName) - ? z.enum(schema.enum).describe(schema?.description ?? keyName) + ? z.enum(schema.enum).describe(schema?.description ?? schema?.title ?? keyName) : z .enum(schema.enum) - .describe(schema?.description ?? keyName) + .describe(schema?.description ?? schema?.title ?? keyName) .optional() } else if (schema.type === 'string') { return requiredList.includes(keyName) @@ -141,21 +150,32 @@ const jsonSchemaToZodSchema = (schema: any, requiredList: string[], keyName: str } else if (schema.type === 'array') { return z.array(jsonSchemaToZodSchema(schema.items, requiredList, keyName)) } else if (schema.type === 'boolean') { - return requiredList.includes(keyName) - ? z.number({ required_error: `${keyName} required` }).describe(schema?.description ?? keyName) - : z - .number() - .describe(schema?.description ?? keyName) - .optional() - } else if (schema.type === 'number') { return requiredList.includes(keyName) ? z.boolean({ required_error: `${keyName} required` }).describe(schema?.description ?? keyName) : z .boolean() .describe(schema?.description ?? keyName) .optional() + } else if (schema.type === 'number') { + let numberSchema = z.number() + if (typeof schema.minimum === 'number') { + numberSchema = numberSchema.min(schema.minimum) + } + if (typeof schema.maximum === 'number') { + numberSchema = numberSchema.max(schema.maximum) + } + return requiredList.includes(keyName) + ? numberSchema.describe(schema?.description ?? keyName) + : numberSchema.describe(schema?.description ?? keyName).optional() + } else if (schema.type === 'integer') { + let numberSchema = z.number().int() + return requiredList.includes(keyName) + ? numberSchema.describe(schema?.description ?? keyName) + : numberSchema.describe(schema?.description ?? keyName).optional() + } else if (schema.type === 'null') { + return z.null() } - + console.error(`jsonSchemaToZodSchema returns UNKNOWN! ${keyName}`, schema) // Fallback to unknown type if unrecognized return z.unknown() } @@ -163,9 +183,23 @@ const jsonSchemaToZodSchema = (schema: any, requiredList: string[], keyName: str const extractParameters = (param: ICommonObject, paramZodObj: ICommonObject) => { const paramSchema = param.schema const paramName = param.name - const paramDesc = param.description || param.name + const paramDesc = paramSchema.description || paramSchema.title || param.description || param.name - if (paramSchema.type === 'string') { + if (paramSchema.enum) { + const enumValues = paramSchema.enum as string[] + // Combine title and description from schema + const enumDesc = [paramSchema.title, paramSchema.description, `Valid values: ${enumValues.join(', ')}`].filter(Boolean).join('. ') + + if (param.required) { + paramZodObj[paramName] = z.enum(enumValues as [string, ...string[]]).describe(enumDesc) + } else { + paramZodObj[paramName] = z + .enum(enumValues as [string, ...string[]]) + .describe(enumDesc) + .optional() + } + return paramZodObj + } else if (paramSchema.type === 'string') { if (param.required) { paramZodObj[paramName] = z.string({ required_error: `${paramName} required` }).describe(paramDesc) } else { @@ -183,6 +217,10 @@ const extractParameters = (param: ICommonObject, paramZodObj: ICommonObject) => } else { paramZodObj[paramName] = z.boolean().describe(paramDesc).optional() } + } else if (paramSchema.anyOf || paramSchema.type === 'anyOf') { + // Handle anyOf by using jsonSchemaToZodSchema + const requiredList = param.required ? [paramName] : [] + paramZodObj[paramName] = jsonSchemaToZodSchema(paramSchema, requiredList, paramName) } return paramZodObj @@ -195,7 +233,8 @@ const getTools = ( variables: IVariable[], flow: ICommonObject, returnDirect: boolean, - customCode?: string + customCode?: string, + removeNulls?: boolean ) => { const tools = [] for (const path in paths) { @@ -269,7 +308,9 @@ const getTools = ( baseUrl: `${baseUrl}${path}`, method: method, headers, - customCode + customCode, + strict: spec['x-strict'] === true, + removeNulls } const dynamicStructuredTool = new DynamicStructuredTool(toolObj) diff --git a/packages/components/nodes/tools/OpenAPIToolkit/core.ts b/packages/components/nodes/tools/OpenAPIToolkit/core.ts index 8341adc54..f7701770e 100644 --- a/packages/components/nodes/tools/OpenAPIToolkit/core.ts +++ b/packages/components/nodes/tools/OpenAPIToolkit/core.ts @@ -7,6 +7,20 @@ import { CallbackManagerForToolRun, Callbacks, CallbackManager, parseCallbackCon import { availableDependencies, defaultAllowBuiltInDep, prepareSandboxVars } from '../../../src/utils' import { ICommonObject } from '../../../src/Interface' +const removeNulls = (obj: Record) => { + Object.keys(obj).forEach((key) => { + if (obj[key] === null) { + delete obj[key] + } else if (typeof obj[key] === 'object' && obj[key] !== null) { + removeNulls(obj[key]) + if (Object.keys(obj[key]).length === 0) { + delete obj[key] + } + } + }) + return obj +} + interface HttpRequestObject { PathParameters?: Record QueryParameters?: Record @@ -104,6 +118,8 @@ export interface DynamicStructuredToolInput< method: string headers: ICommonObject customCode?: string + strict?: boolean + removeNulls?: boolean } export class DynamicStructuredTool< @@ -122,12 +138,15 @@ export class DynamicStructuredTool< customCode?: string + strict?: boolean + func: DynamicStructuredToolInput['func'] // @ts-ignore schema: T private variables: any[] private flowObj: any + private removeNulls: boolean constructor(fields: DynamicStructuredToolInput) { super(fields) @@ -140,6 +159,8 @@ export class DynamicStructuredTool< this.method = fields.method this.headers = fields.headers this.customCode = fields.customCode + this.strict = fields.strict + this.removeNulls = fields.removeNulls ?? false } async call( @@ -156,7 +177,7 @@ export class DynamicStructuredTool< try { parsed = await this.schema.parseAsync(arg) } catch (e) { - throw new ToolInputParsingException(`Received tool input did not match expected schema`, JSON.stringify(arg)) + throw new ToolInputParsingException(`Received tool input did not match expected schema ${e}`, JSON.stringify(arg)) } const callbackManager_ = await CallbackManager.configure( config.callbacks, @@ -203,9 +224,15 @@ export class DynamicStructuredTool< fs: undefined, process: undefined } - if (typeof arg === 'object' && Object.keys(arg).length) { - for (const item in arg) { - sandbox[`$${item}`] = arg[item] + let processedArg = { ...arg } + + if (this.removeNulls && typeof processedArg === 'object' && processedArg !== null) { + processedArg = removeNulls(processedArg) + } + + if (typeof processedArg === 'object' && Object.keys(processedArg).length) { + for (const item in processedArg) { + sandbox[`$${item}`] = processedArg[item] } } @@ -262,4 +289,8 @@ export class DynamicStructuredTool< setFlowObject(flow: any) { this.flowObj = flow } + + isStrict(): boolean { + return this.strict === true + } } diff --git a/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts b/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts index 68e44c546..a44415c15 100644 --- a/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts +++ b/packages/components/nodes/tools/TavilyAPI/TavilyAPI.ts @@ -13,16 +13,137 @@ class TavilyAPI_Tools implements INode { baseClasses: string[] credential: INodeParams inputs: INodeParams[] + additionalParams: boolean constructor() { this.label = 'Tavily API' this.name = 'tavilyAPI' - this.version = 1.0 + this.version = 1.2 this.type = 'TavilyAPI' this.icon = 'tavily.svg' this.category = 'Tools' - this.description = 'Wrapper around TavilyAPI - a real-time API to access Google search results' - this.inputs = [] + this.description = 'Wrapper around TavilyAPI - A specialized search engine designed for LLMs and AI agents' + this.inputs = [ + { + label: 'Topic', + name: 'topic', + type: 'options', + options: [ + { label: 'General', name: 'general' }, + { label: 'News', name: 'news' } + ], + default: 'general', + description: 'The category of the search. News for real-time updates, general for broader searches', + additionalParams: true, + optional: true + }, + { + label: 'Search Depth', + name: 'searchDepth', + type: 'options', + options: [ + { label: 'Basic', name: 'basic' }, + { label: 'Advanced', name: 'advanced' } + ], + default: 'basic', + description: 'The depth of the search. Advanced costs 2 API Credits, basic costs 1', + additionalParams: true, + optional: true + }, + { + label: 'Chunks Per Source', + name: 'chunksPerSource', + type: 'number', + default: 3, + description: 'Number of content chunks per source (1-3). Only for advanced search', + additionalParams: true, + optional: true + }, + { + label: 'Max Results', + name: 'maxResults', + type: 'number', + default: 5, + additionalParams: true, + description: 'Maximum number of search results (0-20)', + optional: true + }, + { + label: 'Time Range', + name: 'timeRange', + type: 'options', + options: [ + { label: 'Day', name: 'day' }, + { label: 'Week', name: 'week' }, + { label: 'Month', name: 'month' }, + { label: 'Year', name: 'year' } + ], + optional: true, + additionalParams: true, + description: 'Time range to filter results' + }, + { + label: 'Days', + name: 'days', + type: 'number', + default: 7, + additionalParams: true, + description: 'Number of days back from current date (only for news topic)', + optional: true + }, + { + label: 'Include Answer', + name: 'includeAnswer', + type: 'boolean', + default: false, + description: 'Include an LLM-generated answer to the query', + additionalParams: true, + optional: true + }, + { + label: 'Include Raw Content', + name: 'includeRawContent', + type: 'boolean', + default: false, + description: 'Include cleaned and parsed HTML content of each result', + additionalParams: true, + optional: true + }, + { + label: 'Include Images', + name: 'includeImages', + type: 'boolean', + default: false, + description: 'Include image search results', + additionalParams: true, + optional: true + }, + { + label: 'Include Image Descriptions', + name: 'includeImageDescriptions', + type: 'boolean', + default: false, + description: 'Include descriptive text for each image', + additionalParams: true, + optional: true + }, + { + label: 'Include Domains', + name: 'includeDomains', + type: 'string', + optional: true, + description: 'Comma-separated list of domains to include in results', + additionalParams: true + }, + { + label: 'Exclude Domains', + name: 'excludeDomains', + type: 'string', + optional: true, + description: 'Comma-separated list of domains to exclude from results', + additionalParams: true + } + ] this.credential = { label: 'Connect Credential', name: 'credential', @@ -35,7 +156,38 @@ class TavilyAPI_Tools implements INode { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const credentialData = await getCredentialData(nodeData.credential ?? '', options) const tavilyApiKey = getCredentialParam('tavilyApiKey', credentialData, nodeData) - return new TavilySearchResults({ apiKey: tavilyApiKey }) + + const topic = nodeData.inputs?.topic as string + const searchDepth = nodeData.inputs?.searchDepth as string + const chunksPerSource = nodeData.inputs?.chunksPerSource as number + const maxResults = nodeData.inputs?.maxResults as number + const timeRange = nodeData.inputs?.timeRange as string + const days = nodeData.inputs?.days as number + const includeAnswer = nodeData.inputs?.includeAnswer as boolean + const includeRawContent = nodeData.inputs?.includeRawContent as boolean + const includeImages = nodeData.inputs?.includeImages as boolean + const includeImageDescriptions = nodeData.inputs?.includeImageDescriptions as boolean + const includeDomains = nodeData.inputs?.includeDomains as string + const excludeDomains = nodeData.inputs?.excludeDomains as string + + const config: any = { + apiKey: tavilyApiKey, + topic, + searchDepth, + maxResults, + includeAnswer: includeAnswer || undefined, + includeRawContent: includeRawContent || undefined, + includeImages: includeImages || undefined, + includeImageDescriptions: includeImageDescriptions || undefined + } + + if (chunksPerSource) config.chunksPerSource = chunksPerSource + if (timeRange) config.timeRange = timeRange + if (days) config.days = days + if (includeDomains) config.includeDomains = includeDomains.split(',').map((d) => d.trim()) + if (excludeDomains) config.excludeDomains = excludeDomains.split(',').map((d) => d.trim()) + + return new TavilySearchResults(config) } } diff --git a/packages/components/nodes/tools/WebScraperTool/WebScraperTool.ts b/packages/components/nodes/tools/WebScraperTool/WebScraperTool.ts new file mode 100644 index 000000000..de655d170 --- /dev/null +++ b/packages/components/nodes/tools/WebScraperTool/WebScraperTool.ts @@ -0,0 +1,434 @@ +import { INode, INodeParams, INodeData, ICommonObject } from '../../../src/Interface' +import { getBaseClasses } from '../../../src/utils' +import { Tool } from '@langchain/core/tools' +import fetch from 'node-fetch' +import * as cheerio from 'cheerio' +import { URL } from 'url' +import { xmlScrape } from '../../../src/utils' + +interface ScrapedPageData { + url: string + title: string + description: string + body_text: string + error?: string +} + +class WebScraperRecursiveTool extends Tool { + name = 'web_scraper_tool' + description = `Scrapes web pages recursively or via default sitemap. Extracts title, description, and paragraph text. Input should be a single URL string. Returns a JSON string array of scraped page data objects.` + + private maxDepth: number + private maxPages: number | null + private timeoutMs: number + private useSitemap: boolean + private visitedUrls: Set + private scrapedPagesCount: number + + constructor(maxDepth: number = 1, maxPages: number | null = 10, timeoutMs: number = 60000, useSitemap: boolean = false) { + super() + + this.maxDepth = Math.max(1, maxDepth) + this.maxPages = maxPages !== null && maxPages > 0 ? maxPages : null + this.timeoutMs = timeoutMs > 0 ? timeoutMs : 60000 + this.useSitemap = useSitemap + this.visitedUrls = new Set() + this.scrapedPagesCount = 0 + + let desc = '' + if (this.useSitemap) { + desc = `Scrapes URLs listed in the detected default sitemap (/sitemap.xml)` + if (this.maxPages !== null) { + desc += ` up to ${this.maxPages} pages` + } + desc += `, with a ${ + this.timeoutMs / 1000 + }-second timeout per page. Falls back to Recursive Link Following if sitemap is not found or empty.` + } else { + desc = `Recursively scrapes web pages starting from a given URL` + if (this.maxDepth > 0) { + desc += ` up to ${this.maxDepth} level(s) deep` + } + if (this.maxPages !== null) { + desc += ` or until ${this.maxPages} pages are scraped` + } + desc += `, with a ${this.timeoutMs / 1000}-second timeout per page, whichever comes first.` + } + desc += ` Extracts title, description, and paragraph text. Input should be a single URL string. Returns a JSON string array of scraped page data.` + this.description = desc + } + + private async scrapeSingleUrl(url: string): Promise & { foundLinks: string[] }> { + try { + const response = await fetch(url, { timeout: this.timeoutMs, redirect: 'follow', follow: 5 }) + if (!response.ok) { + const errorText = await response.text() + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `HTTP Error: ${response.status} ${response.statusText}. ${errorText}` + } + } + const contentType = response.headers.get('content-type') + + if (contentType === null) { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Skipped content due to missing Content-Type header` + } + } + + if (!contentType.includes('text/html') && url !== this.visitedUrls.values().next().value) { + if (!contentType.includes('text/xml') && !contentType.includes('application/xml')) { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Skipped non-HTML/XML content (Content-Type: ${contentType})` + } + } + + if (!contentType.includes('text/html')) { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Skipped non-HTML content (Content-Type: ${contentType})` + } + } + } + + const html = await response.text() + const $ = cheerio.load(html) + const title = $('title').first().text() || 'No title found' + let description = + $('meta[name="description"]').attr('content') || + $('meta[property="og:description"]').attr('content') || + $('meta[name="twitter:description"]').attr('content') || + 'No description found' + const paragraphs: string[] = [] + $('p').each((_i, elem) => { + const paragraphText = $(elem).text() + if (paragraphText) { + paragraphs.push(paragraphText.trim()) + } + }) + const body_text = paragraphs.join(' ').replace(/\s\s+/g, ' ').trim() + const foundLinks: string[] = [] + + $('a').each((_i, elem) => { + const href = $(elem).attr('href') + if (href) { + try { + const absoluteUrl = new URL(href, url).toString() + if (absoluteUrl.startsWith('http') && !absoluteUrl.includes('#')) { + foundLinks.push(absoluteUrl) + } + } catch (e) { + // Ignore invalid URLs + } + } + }) + + return { + title: title.trim(), + description: description.trim(), + body_text: body_text, + foundLinks: [...new Set(foundLinks)] + } + } catch (error: any) { + if (error.type === 'request-timeout') { + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Scraping Error: Request Timeout after ${this.timeoutMs}ms` + } + } + return { + title: '', + description: '', + body_text: '', + foundLinks: [], + error: `Scraping Error: ${error?.message || 'Unknown error'}` + } + } + } + + private async scrapeRecursive(url: string, currentDepth: number): Promise { + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + return [] + } + if (currentDepth > this.maxDepth) { + return [] + } + if (this.visitedUrls.has(url)) { + return [] + } + try { + new URL(url) + if (!url.startsWith('http')) throw new Error('Invalid protocol') + } catch (e) { + if (this.maxPages !== null) { + this.scrapedPagesCount++ + } + return [{ url, title: '', description: '', body_text: '', error: `Invalid URL format or protocol` }] + } + this.visitedUrls.add(url) + if (this.maxPages !== null) { + this.scrapedPagesCount++ + } + + const { foundLinks, ...scrapedContent } = await this.scrapeSingleUrl(url) + const currentPageData: ScrapedPageData = { url, ...scrapedContent } + let results: ScrapedPageData[] = [currentPageData] + + if (!currentPageData.error && currentDepth < this.maxDepth && (this.maxPages === null || this.scrapedPagesCount < this.maxPages)) { + const recursivePromises: Promise[] = [] + for (const link of foundLinks) { + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + break + } + if (!this.visitedUrls.has(link)) { + recursivePromises.push(this.scrapeRecursive(link, currentDepth + 1)) + } + } + if (recursivePromises.length > 0) { + const nestedResults = await Promise.all(recursivePromises) + results = results.concat(...nestedResults) + } + } else if (currentPageData.error) { + // Do nothing if there was an error scraping the current page + } + return results + } + + private async scrapeUrlsFromList(urlList: string[]): Promise { + const results: ScrapedPageData[] = [] + const scrapePromises: Promise[] = [] + + for (const url of urlList) { + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + break + } + if (this.visitedUrls.has(url)) { + continue + } + + this.visitedUrls.add(url) + this.scrapedPagesCount++ + + const promise = (async () => { + const { foundLinks: _ignoreLinks, ...scrapedContent } = await this.scrapeSingleUrl(url) + results.push({ url, ...scrapedContent }) + })() + scrapePromises.push(promise) + } + + await Promise.all(scrapePromises) + + return results.slice(0, this.maxPages ?? results.length) + } + + async _call(initialInput: string): Promise { + this.visitedUrls = new Set() + this.scrapedPagesCount = 0 + let performedFallback = false + let sitemapAttempted = false + + if (!initialInput || typeof initialInput !== 'string') { + return JSON.stringify({ error: 'Input must be a single URL string.' }) + } + + try { + let allScrapedData: ScrapedPageData[] = [] + let urlsFromSitemap: string[] = [] + + if (this.useSitemap) { + sitemapAttempted = true + let sitemapUrlToFetch: string | undefined = undefined + + try { + const baseUrl = new URL(initialInput) + sitemapUrlToFetch = new URL('/sitemap.xml', baseUrl.origin).toString() + } catch (e) { + return JSON.stringify({ error: 'Invalid initial URL provided for sitemap detection.' }) + } + + if (!sitemapUrlToFetch) { + return JSON.stringify({ error: 'Could not determine sitemap URL.' }) + } + + try { + const limitParam = this.maxPages === null ? Infinity : this.maxPages + urlsFromSitemap = await xmlScrape(sitemapUrlToFetch, limitParam) + } catch (sitemapError) { + urlsFromSitemap = [] + } + + if (urlsFromSitemap.length > 0) { + allScrapedData = await this.scrapeUrlsFromList(urlsFromSitemap) + } else { + performedFallback = true + } + } + + if (!sitemapAttempted || performedFallback) { + allScrapedData = await this.scrapeRecursive(initialInput, 1) + } + + if (this.maxPages !== null && this.scrapedPagesCount >= this.maxPages) { + // Log or indicate that the max page limit was reached during scraping + } + + if (performedFallback) { + const warningResult = { + warning: 'Sitemap not found or empty; fell back to recursive scraping.', + scrapedData: allScrapedData + } + return JSON.stringify(warningResult) + } else { + return JSON.stringify(allScrapedData) + } + } catch (error: any) { + return JSON.stringify({ error: `Failed scrape operation: ${error?.message || 'Unknown error'}` }) + } + } +} + +class WebScraperRecursive_Tools implements INode { + label: string + name: string + version: number + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'Web Scraper Tool' + this.name = 'webScraperTool' + this.version = 1.1 + this.type = 'Tool' + this.icon = 'webScraperTool.svg' + this.category = 'Tools' + this.description = 'Scrapes web pages recursively by following links OR by fetching URLs from the default sitemap.' + this.baseClasses = [this.type, ...getBaseClasses(WebScraperRecursiveTool)] + this.inputs = [ + { + label: 'Scraping Mode', + name: 'scrapeMode', + type: 'options', + options: [ + { label: 'Recursive Link Following', name: 'recursive' }, + { label: 'Sitemap', name: 'sitemap' } + ], + default: 'recursive', + description: + "Select discovery method: 'Recursive' follows links found on pages (uses Max Depth). 'Sitemap' tries sitemap.xml first, but falls back to 'Recursive' if the sitemap is not found or empty.", + additionalParams: true + }, + { + label: 'Max Depth', + name: 'maxDepth', + type: 'number', + description: + 'Maximum levels of links to follow (e.g., 1 = only the initial URL, 2 = initial URL + links found on it). Default 1.', + placeholder: '1', + default: 1, + optional: true, + additionalParams: true + }, + { + label: 'Max Pages', + name: 'maxPages', + type: 'number', + description: + 'Maximum total number of pages to scrape, regardless of mode or depth. Stops when this limit is reached. Leave empty for no page limit. Default: 10.', + placeholder: '10', + default: 10, + optional: true, + additionalParams: true + }, + { + label: 'Timeout (s)', + name: 'timeoutS', + type: 'number', + description: 'Maximum time in seconds to wait for each page request to complete. Accepts decimals (e.g., 0.5). Default 60.', + placeholder: '60', + default: 60, + optional: true, + additionalParams: true + }, + { + label: 'Tool Description', + name: 'description', + type: 'string', + description: + 'Custom description of what the tool does. This is for LLM to determine when to use this tool. Overrides the default description.', + rows: 4, + additionalParams: true, + optional: true, + placeholder: `Scrapes web pages recursively or via default sitemap. Extracts title, description, and paragraph text. Input should be a single URL string. Returns a JSON string array of scraped page data objects.` + } + ] + } + + async init(nodeData: INodeData, _: string, _options: ICommonObject): Promise { + const scrapeMode = (nodeData.inputs?.scrapeMode as string) ?? 'recursive' + const useSitemap = scrapeMode === 'sitemap' + + const maxDepthInput = nodeData.inputs?.maxDepth as string | number | undefined + let maxDepth = 1 + if (maxDepthInput !== undefined && maxDepthInput !== '') { + const parsedDepth = parseInt(String(maxDepthInput), 10) + if (!isNaN(parsedDepth) && parsedDepth > 0) { + maxDepth = parsedDepth + } + } + + const maxPagesInput = nodeData.inputs?.maxPages as string | number | undefined + let maxPages: number | null = 10 + if (maxPagesInput === undefined || maxPagesInput === '') { + maxPages = null + } else { + const parsedPages = parseInt(String(maxPagesInput), 10) + if (!isNaN(parsedPages) && parsedPages > 0) { + maxPages = parsedPages + } else if (parsedPages <= 0) { + maxPages = null + } + } + + const timeoutInputS = nodeData.inputs?.timeoutS as string | number | undefined + let timeoutMs = 60000 + if (timeoutInputS !== undefined && timeoutInputS !== '') { + const parsedTimeoutS = parseFloat(String(timeoutInputS)) + if (!isNaN(parsedTimeoutS) && parsedTimeoutS > 0) { + timeoutMs = Math.round(parsedTimeoutS * 1000) + } + } + + const customDescription = nodeData.inputs?.description as string + + const tool = new WebScraperRecursiveTool(maxDepth, maxPages, timeoutMs, useSitemap) + + if (customDescription) { + tool.description = customDescription + } + + return tool + } +} + +module.exports = { nodeClass: WebScraperRecursive_Tools } diff --git a/packages/components/nodes/tools/WebScraperTool/webScraperTool.svg b/packages/components/nodes/tools/WebScraperTool/webScraperTool.svg new file mode 100644 index 000000000..c753ab175 --- /dev/null +++ b/packages/components/nodes/tools/WebScraperTool/webScraperTool.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts index 1bfddefca..079f186ba 100644 --- a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts +++ b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts @@ -88,6 +88,7 @@ class CustomFunction_Utilities implements INode { chatflowId: options.chatflowid, sessionId: options.sessionId, chatId: options.chatId, + rawOutput: options.rawOutput || '', input } diff --git a/packages/components/nodes/vectorstores/Postgres/Postgres.ts b/packages/components/nodes/vectorstores/Postgres/Postgres.ts index 1e7621c3f..ad0f82bb0 100644 --- a/packages/components/nodes/vectorstores/Postgres/Postgres.ts +++ b/packages/components/nodes/vectorstores/Postgres/Postgres.ts @@ -12,6 +12,26 @@ import { getContentColumnName, getDatabase, getHost, getPort, getTableName } fro const serverCredentialsExists = !!process.env.POSTGRES_VECTORSTORE_USER && !!process.env.POSTGRES_VECTORSTORE_PASSWORD +// added temporarily to fix the base class return for VectorStore when postgres node is using TypeORM +function getVectorStoreBaseClasses() { + // Try getting base classes through the utility function + const baseClasses = getBaseClasses(VectorStore) + + // If we got results, return them + if (baseClasses && baseClasses.length > 0) { + return baseClasses + } + + // If VectorStore is recognized as a class but getBaseClasses returned nothing, + // return the known inheritance chain + if (VectorStore instanceof Function) { + return ['VectorStore'] + } + + // Fallback to minimum required class + return ['VectorStore'] +} + class Postgres_VectorStores implements INode { label: string name: string @@ -83,6 +103,14 @@ class Postgres_VectorStores implements INode { placeholder: getPort(), optional: true }, + { + label: 'SSL', + name: 'ssl', + description: 'Use SSL to connect to Postgres', + type: 'boolean', + additionalParams: true, + optional: true + }, { label: 'Table Name', name: 'tableName', @@ -187,7 +215,11 @@ class Postgres_VectorStores implements INode { { label: 'Postgres Vector Store', name: 'vectorStore', - baseClasses: [this.type, ...getBaseClasses(VectorStore)] + baseClasses: [ + this.type, + // ...getBaseClasses(VectorStore), // disabled temporarily for using TypeORM + ...getVectorStoreBaseClasses() // added temporarily for using TypeORM + ] } ] } diff --git a/packages/components/nodes/vectorstores/Postgres/README.md b/packages/components/nodes/vectorstores/Postgres/README.md index 2f85ada23..6bee360f4 100644 --- a/packages/components/nodes/vectorstores/Postgres/README.md +++ b/packages/components/nodes/vectorstores/Postgres/README.md @@ -4,15 +4,16 @@ Postgres Vector Store integration for Flowise ## 🌱 Env Variables -| Variable | Description | Type | Default | -| ---------------------------------------- | ----------------------------------------------------- | ------ | ----------- | -| POSTGRES_VECTORSTORE_HOST | Default `host` for Postgres Vector Store | String | | -| POSTGRES_VECTORSTORE_PORT | Default `port` for Postgres Vector Store | Number | 5432 | -| POSTGRES_VECTORSTORE_USER | Default `user` for Postgres Vector Store | String | | -| POSTGRES_VECTORSTORE_PASSWORD | Default `password` for Postgres Vector Store | String | | -| POSTGRES_VECTORSTORE_DATABASE | Default `database` for Postgres Vector Store | String | | -| POSTGRES_VECTORSTORE_TABLE_NAME | Default `tableName` for Postgres Vector Store | String | documents | -| POSTGRES_VECTORSTORE_CONTENT_COLUMN_NAME | Default `contentColumnName` for Postgres Vector Store | String | pageContent | +| Variable | Description | Type | Default | +| ---------------------------------------- | ----------------------------------------------------- | ------- | ----------- | +| POSTGRES_VECTORSTORE_HOST | Default `host` for Postgres Vector Store | String | | +| POSTGRES_VECTORSTORE_PORT | Default `port` for Postgres Vector Store | Number | 5432 | +| POSTGRES_VECTORSTORE_USER | Default `user` for Postgres Vector Store | String | | +| POSTGRES_VECTORSTORE_PASSWORD | Default `password` for Postgres Vector Store | String | | +| POSTGRES_VECTORSTORE_DATABASE | Default `database` for Postgres Vector Store | String | | +| POSTGRES_VECTORSTORE_TABLE_NAME | Default `tableName` for Postgres Vector Store | String | documents | +| POSTGRES_VECTORSTORE_CONTENT_COLUMN_NAME | Default `contentColumnName` for Postgres Vector Store | String | pageContent | +| POSTGRES_VECTORSTORE_SSL | Default `ssl` for Postgres Vector Store | Boolean | false | ## License diff --git a/packages/components/nodes/vectorstores/Postgres/driver/Base.ts b/packages/components/nodes/vectorstores/Postgres/driver/Base.ts index fc1379f26..f117a065c 100644 --- a/packages/components/nodes/vectorstores/Postgres/driver/Base.ts +++ b/packages/components/nodes/vectorstores/Postgres/driver/Base.ts @@ -2,7 +2,7 @@ import { VectorStore } from '@langchain/core/vectorstores' import { getCredentialData, getCredentialParam, ICommonObject, INodeData } from '../../../../src' import { Document } from '@langchain/core/documents' import { Embeddings } from '@langchain/core/embeddings' -import { getDatabase, getHost, getPort, getTableName } from '../utils' +import { getDatabase, getHost, getPort, getSSL, getTableName } from '../utils' export abstract class VectorStoreDriver { constructor(protected nodeData: INodeData, protected options: ICommonObject) {} @@ -23,6 +23,10 @@ export abstract class VectorStoreDriver { return getPort(this.nodeData) as number } + getSSL() { + return getSSL(this.nodeData) as boolean + } + getDatabase() { return getDatabase(this.nodeData) as string } diff --git a/packages/components/nodes/vectorstores/Postgres/driver/TypeORM.ts b/packages/components/nodes/vectorstores/Postgres/driver/TypeORM.ts index 655934994..3a0c5ab00 100644 --- a/packages/components/nodes/vectorstores/Postgres/driver/TypeORM.ts +++ b/packages/components/nodes/vectorstores/Postgres/driver/TypeORM.ts @@ -29,6 +29,7 @@ export class TypeORMDriver extends VectorStoreDriver { type: 'postgres', host: this.getHost(), port: this.getPort(), + ssl: this.getSSL(), username: user, // Required by TypeORMVectorStore user: user, // Required by Pool in similaritySearchVectorWithScore password: password, diff --git a/packages/components/nodes/vectorstores/Postgres/utils.ts b/packages/components/nodes/vectorstores/Postgres/utils.ts index e2b18b570..96d59f562 100644 --- a/packages/components/nodes/vectorstores/Postgres/utils.ts +++ b/packages/components/nodes/vectorstores/Postgres/utils.ts @@ -12,6 +12,10 @@ export function getPort(nodeData?: INodeData) { return defaultChain(nodeData?.inputs?.port, process.env.POSTGRES_VECTORSTORE_PORT, '5432') } +export function getSSL(nodeData?: INodeData) { + return defaultChain(nodeData?.inputs?.ssl, process.env.POSTGRES_VECTORSTORE_SSL, false) +} + export function getTableName(nodeData?: INodeData) { return defaultChain(nodeData?.inputs?.tableName, process.env.POSTGRES_VECTORSTORE_TABLE_NAME, 'documents') } diff --git a/packages/components/nodes/vectorstores/Redis/Redis.ts b/packages/components/nodes/vectorstores/Redis/Redis.ts index 3c9fd773c..d4fbcf49e 100644 --- a/packages/components/nodes/vectorstores/Redis/Redis.ts +++ b/packages/components/nodes/vectorstores/Redis/Redis.ts @@ -147,7 +147,15 @@ class Redis_VectorStores implements INode { } try { - const redisClient = createClient({ url: redisUrl }) + const redisClient = createClient({ + url: redisUrl, + socket: { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined // milliseconds + } + }) await redisClient.connect() const storeConfig: RedisVectorStoreConfig = { @@ -212,7 +220,15 @@ class Redis_VectorStores implements INode { redisUrl = 'redis://' + username + ':' + password + '@' + host + ':' + portStr } - const redisClient = createClient({ url: redisUrl }) + const redisClient = createClient({ + url: redisUrl, + socket: { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined // milliseconds + } + }) const storeConfig: RedisVectorStoreConfig = { redisClient: redisClient, diff --git a/packages/components/nodes/vectorstores/VectorStoreUtils.ts b/packages/components/nodes/vectorstores/VectorStoreUtils.ts index c5bc941bf..5c646749a 100644 --- a/packages/components/nodes/vectorstores/VectorStoreUtils.ts +++ b/packages/components/nodes/vectorstores/VectorStoreUtils.ts @@ -10,11 +10,16 @@ export const resolveVectorStoreOrRetriever = ( const searchType = nodeData.outputs?.searchType as string const topK = nodeData.inputs?.topK as string const k = topK ? parseFloat(topK) : 4 + const alpha = nodeData.inputs?.alpha // If it is already pre-defined in lc_kwargs, then don't pass it again const filter = vectorStore?.lc_kwargs?.filter ? undefined : metadataFilter if (output === 'retriever') { + const searchKwargs: Record = {} + if (alpha !== undefined) { + searchKwargs.alpha = parseFloat(alpha) + } if ('mmr' === searchType) { const fetchK = nodeData.inputs?.fetchK as string const lambda = nodeData.inputs?.lambda as string @@ -25,13 +30,18 @@ export const resolveVectorStoreOrRetriever = ( k: k, filter, searchKwargs: { + //...searchKwargs, fetchK: f, lambda: l } }) } else { // "searchType" is "similarity" - return vectorStore.asRetriever(k, filter) + return vectorStore.asRetriever({ + k: k, + filter: filter, + searchKwargs: Object.keys(searchKwargs).length > 0 ? searchKwargs : undefined + }) } } else if (output === 'vectorStore') { ;(vectorStore as any).k = k diff --git a/packages/components/nodes/vectorstores/Weaviate/Weaviate.ts b/packages/components/nodes/vectorstores/Weaviate/Weaviate.ts index 85e66fb46..ae2c0164f 100644 --- a/packages/components/nodes/vectorstores/Weaviate/Weaviate.ts +++ b/packages/components/nodes/vectorstores/Weaviate/Weaviate.ts @@ -26,7 +26,7 @@ class Weaviate_VectorStores implements INode { constructor() { this.label = 'Weaviate' this.name = 'weaviate' - this.version = 3.0 + this.version = 4.0 this.type = 'Weaviate' this.icon = 'weaviate.png' this.category = 'Vector Stores' @@ -124,6 +124,16 @@ class Weaviate_VectorStores implements INode { } ] addMMRInputParams(this.inputs) + this.inputs.push({ + label: 'Alpha (for Hybrid Search)', + name: 'alpha', + description: + 'Number between 0 and 1 that determines the weighting of keyword (BM25) portion of the hybrid search. A value of 1 is a pure vector search, while 0 is a pure keyword search.', + placeholder: '1', + type: 'number', + additionalParams: true, + optional: true + }) this.outputs = [ { label: 'Weaviate Retriever', diff --git a/packages/components/package.json b/packages/components/package.json index 5d83568c6..44246c650 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,13 +1,11 @@ { "name": "flowise-components", - "version": "2.2.5", + "version": "3.0.0", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", "scripts": { "build": "tsc && gulp", - "dev:gulp": "gulp", - "dev": "tsc-watch --noClear -p ./tsconfig.json --onSuccess \"pnpm dev:gulp\"", "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", "clean": "rimraf dist", "nuke": "rimraf dist node_modules .turbo" @@ -27,7 +25,7 @@ "@aws-sdk/client-s3": "^3.427.0", "@aws-sdk/client-secrets-manager": "^3.699.0", "@datastax/astra-db-ts": "1.5.0", - "@dqbd/tiktoken": "^1.0.7", + "@dqbd/tiktoken": "^1.0.21", "@e2b/code-interpreter": "^0.0.5", "@elastic/elasticsearch": "^8.9.0", "@flowiseai/nodevm": "^3.9.25", @@ -36,29 +34,37 @@ "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^2.5.0", - "@google/generative-ai": "^0.15.0", + "@google-cloud/storage": "^7.15.2", + "@google/generative-ai": "^0.24.0", "@huggingface/inference": "^2.6.1", - "@langchain/anthropic": "0.3.7", - "@langchain/aws": "0.1.2", + "@langchain/anthropic": "0.3.14", + "@langchain/aws": "0.1.4", "@langchain/baidu-qianfan": "^0.1.0", "@langchain/cohere": "^0.0.7", - "@langchain/community": "^0.3.24", + "@langchain/community": "^0.3.29", "@langchain/core": "0.3.37", "@langchain/exa": "^0.0.5", - "@langchain/google-genai": "0.1.3", + "@langchain/google-genai": "0.2.3", "@langchain/google-vertexai": "^0.2.0", "@langchain/groq": "0.1.2", "@langchain/langgraph": "^0.0.22", "@langchain/mistralai": "^0.2.0", "@langchain/mongodb": "^0.0.1", - "@langchain/ollama": "0.1.2", - "@langchain/openai": "0.4.2", + "@langchain/ollama": "0.2.0", + "@langchain/openai": "0.5.6", "@langchain/pinecone": "^0.1.3", "@langchain/qdrant": "^0.0.5", "@langchain/weaviate": "^0.0.1", "@langchain/xai": "^0.0.1", + "@mem0/community": "^0.0.1", "@mendable/firecrawl-js": "^0.0.28", "@mistralai/mistralai": "0.1.3", + "@modelcontextprotocol/sdk": "^1.10.1", + "@modelcontextprotocol/server-brave-search": "^0.6.2", + "@modelcontextprotocol/server-github": "^2025.1.23", + "@modelcontextprotocol/server-postgres": "^0.6.2", + "@modelcontextprotocol/server-sequential-thinking": "^0.6.2", + "@modelcontextprotocol/server-slack": "^2025.1.17", "@notionhq/client": "^2.2.8", "@opensearch-project/opensearch": "^1.2.0", "@pinecone-database/pinecone": "4.0.0", @@ -72,7 +78,7 @@ "@zilliz/milvus2-sdk-node": "^2.2.24", "apify-client": "^2.7.1", "assemblyai": "^4.2.2", - "axios": "1.6.2", + "axios": "1.7.9", "cheerio": "^1.0.0-rc.12", "chromadb": "^1.10.0", "cohere-ai": "^7.7.5", @@ -115,7 +121,7 @@ "notion-to-md": "^3.1.1", "object-hash": "^3.0.0", "ollama": "^0.5.11", - "openai": "^4.82.0", + "openai": "^4.96.0", "papaparse": "^5.4.1", "pdf-parse": "^1.1.1", "pdfjs-dist": "^3.7.107", @@ -130,7 +136,7 @@ "typeorm": "^0.3.6", "weaviate-ts-client": "^1.1.0", "winston": "^3.9.0", - "ws": "^8.9.0", + "ws": "^8.18.0", "zod": "3.22.4", "zod-to-json-schema": "^3.21.4" }, diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index 32e39ed71..6fd3d884d 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -8,6 +8,7 @@ import { Moderation } from '../nodes/moderation/Moderation' export type NodeParamsType = | 'asyncOptions' + | 'asyncMultiOptions' | 'options' | 'multiOptions' | 'datagrid' @@ -57,12 +58,13 @@ export interface INodeOptionsValue { label: string name: string description?: string + imageSrc?: string } export interface INodeOutputsValue { label: string name: string - baseClasses: string[] + baseClasses?: string[] description?: string hidden?: boolean isAnchor?: boolean @@ -83,10 +85,12 @@ export interface INodeParams { rows?: number list?: boolean acceptVariable?: boolean + acceptNodeOutputAsVariable?: boolean placeholder?: string fileType?: string additionalParams?: boolean loadMethod?: string + loadConfig?: boolean hidden?: boolean hideCodeExecute?: boolean codeExample?: string @@ -96,6 +100,11 @@ export interface INodeParams { refresh?: boolean freeSolo?: boolean loadPreviousNodes?: boolean + array?: Array + show?: INodeDisplay + hide?: INodeDisplay + generateDocStoreDescription?: boolean + generateInstruction?: boolean } export interface INodeExecutionData { @@ -103,7 +112,7 @@ export interface INodeExecutionData { } export interface INodeDisplay { - [key: string]: string[] | string + [key: string]: string[] | string | boolean | number | ICommonObject } export interface INodeProperties { @@ -120,11 +129,15 @@ export interface INodeProperties { badge?: string deprecateMessage?: string hideOutput?: boolean + hideInput?: boolean author?: string documentation?: string + color?: string + hint?: string } export interface INode extends INodeProperties { + credential?: INodeParams inputs?: INodeParams[] output?: INodeOutputsValue[] loadMethods?: { @@ -167,6 +180,7 @@ export interface IUsedTool { toolInput: object toolOutput: string | object sourceDocuments?: ICommonObject[] + error?: string } export interface IMultiAgentNode { @@ -411,14 +425,19 @@ export interface IServerSideEventStreamer { streamCustomEvent(chatId: string, eventType: string, data: any): void streamSourceDocumentsEvent(chatId: string, data: any): void streamUsedToolsEvent(chatId: string, data: any): void + streamCalledToolsEvent(chatId: string, data: any): void streamFileAnnotationsEvent(chatId: string, data: any): void streamToolEvent(chatId: string, data: any): void streamAgentReasoningEvent(chatId: string, data: any): void + streamAgentFlowExecutedDataEvent(chatId: string, data: any): void + streamAgentFlowEvent(chatId: string, data: any): void streamNextAgentEvent(chatId: string, data: any): void + streamNextAgentFlowEvent(chatId: string, data: any): void streamActionEvent(chatId: string, data: any): void streamArtifactsEvent(chatId: string, data: any): void streamAbortEvent(chatId: string): void streamEndEvent(chatId: string): void + streamUsageMetadataEvent(chatId: string, data: any): void } export enum FollowUpPromptProvider { @@ -435,6 +454,7 @@ export type FollowUpPromptProviderConfig = { [key in FollowUpPromptProvider]: { credentialId: string modelName: string + baseUrl: string prompt: string temperature: string } @@ -444,3 +464,17 @@ export type FollowUpPromptConfig = { status: boolean selectedProvider: FollowUpPromptProvider } & FollowUpPromptProviderConfig + +export interface ICondition { + type: string + value1: CommonType + operation: string + value2: CommonType + isFulfilled?: boolean +} + +export interface IHumanInput { + type: 'proceed' | 'reject' + startNodeId: string + feedback?: string +} diff --git a/packages/components/src/agentflowv2Generator.ts b/packages/components/src/agentflowv2Generator.ts new file mode 100644 index 000000000..c5765f521 --- /dev/null +++ b/packages/components/src/agentflowv2Generator.ts @@ -0,0 +1,655 @@ +import { ICommonObject } from './Interface' +import { z } from 'zod' +import { StructuredOutputParser } from '@langchain/core/output_parsers' +import { isEqual, get, cloneDeep } from 'lodash' +import { BaseChatModel } from '@langchain/core/language_models/chat_models' + +const ToolType = z.array(z.string()).describe('List of tools') + +// Define a more specific NodePosition schema +const NodePositionType = z.object({ + x: z.number().describe('X coordinate of the node position'), + y: z.number().describe('Y coordinate of the node position') +}) + +// Define a more specific EdgeData schema +const EdgeDataType = z.object({ + edgeLabel: z.string().optional().describe('Label for the edge') +}) + +// Define a basic NodeData schema to avoid using .passthrough() which might cause issues +const NodeDataType = z + .object({ + label: z.string().optional().describe('Label for the node'), + name: z.string().optional().describe('Name of the node') + }) + .optional() + +const NodeType = z.object({ + id: z.string().describe('Unique identifier for the node'), + type: z.enum(['agentFlow']).describe('Type of the node'), + position: NodePositionType.describe('Position of the node in the UI'), + width: z.number().describe('Width of the node'), + height: z.number().describe('Height of the node'), + selected: z.boolean().optional().describe('Whether the node is selected'), + positionAbsolute: NodePositionType.optional().describe('Absolute position of the node'), + data: NodeDataType +}) + +const EdgeType = z.object({ + id: z.string().describe('Unique identifier for the edge'), + type: z.enum(['agentFlow']).describe('Type of the node'), + source: z.string().describe('ID of the source node'), + sourceHandle: z.string().describe('ID of the source handle'), + target: z.string().describe('ID of the target node'), + targetHandle: z.string().describe('ID of the target handle'), + data: EdgeDataType.optional().describe('Data associated with the edge') +}) + +const NodesEdgesType = z + .object({ + description: z.string().optional().describe('Description of the workflow'), + usecases: z.array(z.string()).optional().describe('Use cases for this workflow'), + nodes: z.array(NodeType).describe('Array of nodes in the workflow'), + edges: z.array(EdgeType).describe('Array of edges connecting the nodes') + }) + .describe('Generate Agentflowv2 nodes and edges') + +interface NodePosition { + x: number + y: number +} + +interface EdgeData { + edgeLabel?: string + sourceColor?: string + targetColor?: string + isHumanInput?: boolean +} + +interface AgentToolConfig { + agentSelectedTool: string + agentSelectedToolConfig: { + agentSelectedTool: string + } +} + +interface NodeInputs { + agentTools?: AgentToolConfig[] + selectedTool?: string + toolInputArgs?: Record[] + selectedToolConfig?: { + selectedTool: string + } + [key: string]: any +} + +interface NodeData { + label?: string + name?: string + id?: string + inputs?: NodeInputs + inputAnchors?: InputAnchor[] + inputParams?: InputParam[] + outputs?: Record + outputAnchors?: OutputAnchor[] + credential?: string + color?: string + [key: string]: any +} + +interface Node { + id: string + type: 'agentFlow' | 'iteration' + position: NodePosition + width: number + height: number + selected?: boolean + positionAbsolute?: NodePosition + data: NodeData + parentNode?: string + extent?: string +} + +interface Edge { + id: string + type: 'agentFlow' + source: string + sourceHandle: string + target: string + targetHandle: string + data?: EdgeData + label?: string +} + +interface InputAnchor { + id: string + label: string + name: string + type?: string + [key: string]: any +} + +interface InputParam { + id: string + name: string + label?: string + type?: string + display?: boolean + show?: Record + hide?: Record + [key: string]: any +} + +interface OutputAnchor { + id: string + label: string + name: string +} + +export const generateAgentflowv2 = async (config: Record, question: string, options: ICommonObject) => { + try { + const result = await generateNodesEdges(config, question, options) + + const { nodes, edges } = generateNodesData(result, config) + + const updatedNodes = await generateSelectedTools(nodes, config, question, options) + + const updatedEdges = updateEdges(edges, nodes) + + return { nodes: updatedNodes, edges: updatedEdges } + } catch (error) { + console.error('Error generating AgentflowV2:', error) + return { error: error.message || 'Unknown error occurred' } + } +} + +const updateEdges = (edges: Edge[], nodes: Node[]): Edge[] => { + const isMultiOutput = (source: string) => { + return source.includes('conditionAgentflow') || source.includes('conditionAgentAgentflow') || source.includes('humanInputAgentflow') + } + const findNodeColor = (nodeId: string) => { + const node = nodes.find((node) => node.id === nodeId) + return node?.data?.color + } + + // filter out edges that do not exist in nodes + edges = edges.filter((edge) => { + return nodes.some((node) => node.id === edge.source || node.id === edge.target) + }) + + // filter out the edge that has hideInput/hideOutput on the source/target node + const indexToDelete = [] + for (let i = 0; i < edges.length; i += 1) { + const edge = edges[i] + const sourceNode = nodes.find((node) => node.id === edge.source) + if (sourceNode?.data?.hideOutput) { + indexToDelete.push(i) + } + + const targetNode = nodes.find((node) => node.id === edge.target) + if (targetNode?.data?.hideInput) { + indexToDelete.push(i) + } + } + + // delete the edges at the index in indexToDelete + for (let i = indexToDelete.length - 1; i >= 0; i -= 1) { + edges.splice(indexToDelete[i], 1) + } + + const updatedEdges = edges.map((edge) => { + return { + ...edge, + data: { + ...edge.data, + sourceColor: findNodeColor(edge.source), + targetColor: findNodeColor(edge.target), + edgeLabel: isMultiOutput(edge.source) && edge.label && edge.label.trim() !== '' ? edge.label.trim() : undefined, + isHumanInput: edge.source.includes('humanInputAgentflow') ? true : false + }, + type: 'agentFlow', + id: `${edge.source}-${edge.sourceHandle}-${edge.target}-${edge.targetHandle}` + } + }) as Edge[] + + if (updatedEdges.length > 0) { + updatedEdges.forEach((edge) => { + if (isMultiOutput(edge.source)) { + if (edge.sourceHandle.includes('true')) { + edge.sourceHandle = edge.sourceHandle.replace('true', '0') + } else if (edge.sourceHandle.includes('false')) { + edge.sourceHandle = edge.sourceHandle.replace('false', '1') + } + } + }) + } + + return updatedEdges +} + +const generateSelectedTools = async (nodes: Node[], config: Record, question: string, options: ICommonObject) => { + const selectedTools: string[] = [] + + for (let i = 0; i < nodes.length; i += 1) { + const node = nodes[i] + if (!node.data.inputs) { + node.data.inputs = {} + } + + if (node.data.name === 'agentAgentflow') { + const sysPrompt = `You are a workflow orchestrator that is designed to make agent coordination and execution easy. Your goal is to select the tools that are needed to achieve the given task. + +Here are the tools to choose from: +${config.toolNodes} + +Here's the selected tools: +${JSON.stringify(selectedTools, null, 2)} + +Output Format should be a list of tool names: +For example:["googleCustomSearch", "slackMCP"] + +Now, select the tools that are needed to achieve the given task. You must only select tools that are in the list of tools above. You must NOT select the tools that are already in the list of selected tools. +` + const tools = await _generateSelectedTools({ ...config, prompt: sysPrompt }, question, options) + if (Array.isArray(tools) && tools.length > 0) { + selectedTools.push(...tools) + + const existingTools = node.data.inputs.agentTools || [] + node.data.inputs.agentTools = [ + ...existingTools, + ...tools.map((tool) => ({ + agentSelectedTool: tool, + agentSelectedToolConfig: { + agentSelectedTool: tool + } + })) + ] + } + } else if (node.data.name === 'toolAgentflow') { + const sysPrompt = `You are a workflow orchestrator that is designed to make agent coordination and execution easy. Your goal is to select ONE tool that is needed to achieve the given task. + +Here are the tools to choose from: +${config.toolNodes} + +Here's the selected tools: +${JSON.stringify(selectedTools, null, 2)} + +Output Format should ONLY one tool name inside of a list: +For example:["googleCustomSearch"] + +Now, select the ONLY tool that is needed to achieve the given task. You must only select tool that is in the list of tools above. You must NOT select the tool that is already in the list of selected tools. +` + const tools = await _generateSelectedTools({ ...config, prompt: sysPrompt }, question, options) + if (Array.isArray(tools) && tools.length > 0) { + selectedTools.push(...tools) + + node.data.inputs.selectedTool = tools[0] + node.data.inputs.toolInputArgs = [] + node.data.inputs.selectedToolConfig = { + selectedTool: tools[0] + } + } + } + } + + return nodes +} + +const _generateSelectedTools = async (config: Record, question: string, options: ICommonObject) => { + try { + const chatModelComponent = config.componentNodes[config.selectedChatModel?.name] + if (!chatModelComponent) { + throw new Error('Chat model component not found') + } + const nodeInstanceFilePath = chatModelComponent.filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newToolNodeInstance = new nodeModule.nodeClass() + const model = (await newToolNodeInstance.init(config.selectedChatModel, '', options)) as BaseChatModel + + // Create a parser to validate the output + const parser = StructuredOutputParser.fromZodSchema(ToolType) + + // Generate JSON schema from our Zod schema + const formatInstructions = parser.getFormatInstructions() + + // Full conversation with system prompt and instructions + const messages = [ + { + role: 'system', + content: `${config.prompt}\n\n${formatInstructions}\n\nMake sure to follow the exact JSON schema structure.` + }, + { + role: 'user', + content: question + } + ] + + // Standard completion without structured output + const response = await model.invoke(messages) + + // Try to extract JSON from the response + const responseContent = response.content.toString() + const jsonMatch = responseContent.match(/```json\n([\s\S]*?)\n```/) || responseContent.match(/{[\s\S]*?}/) + + if (jsonMatch) { + const jsonStr = jsonMatch[1] || jsonMatch[0] + try { + const parsedJSON = JSON.parse(jsonStr) + // Validate with our schema + return ToolType.parse(parsedJSON) + } catch (parseError) { + console.error('Error parsing JSON from response:', parseError) + return { error: 'Failed to parse JSON from response', content: responseContent } + } + } else { + console.error('No JSON found in response:', responseContent) + return { error: 'No JSON found in response', content: responseContent } + } + } catch (error) { + console.error('Error generating AgentflowV2:', error) + return { error: error.message || 'Unknown error occurred' } + } +} + +const generateNodesEdges = async (config: Record, question: string, options?: ICommonObject) => { + try { + const chatModelComponent = config.componentNodes[config.selectedChatModel?.name] + if (!chatModelComponent) { + throw new Error('Chat model component not found') + } + const nodeInstanceFilePath = chatModelComponent.filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newToolNodeInstance = new nodeModule.nodeClass() + const model = (await newToolNodeInstance.init(config.selectedChatModel, '', options)) as BaseChatModel + + // Create a parser to validate the output + const parser = StructuredOutputParser.fromZodSchema(NodesEdgesType) + + // Generate JSON schema from our Zod schema + const formatInstructions = parser.getFormatInstructions() + + // Full conversation with system prompt and instructions + const messages = [ + { + role: 'system', + content: `${config.prompt}\n\n${formatInstructions}\n\nMake sure to follow the exact JSON schema structure.` + }, + { + role: 'user', + content: question + } + ] + + // Standard completion without structured output + const response = await model.invoke(messages) + + // Try to extract JSON from the response + const responseContent = response.content.toString() + const jsonMatch = responseContent.match(/```json\n([\s\S]*?)\n```/) || responseContent.match(/{[\s\S]*?}/) + + if (jsonMatch) { + const jsonStr = jsonMatch[1] || jsonMatch[0] + try { + const parsedJSON = JSON.parse(jsonStr) + // Validate with our schema + return NodesEdgesType.parse(parsedJSON) + } catch (parseError) { + console.error('Error parsing JSON from response:', parseError) + return { error: 'Failed to parse JSON from response', content: responseContent } + } + } else { + console.error('No JSON found in response:', responseContent) + return { error: 'No JSON found in response', content: responseContent } + } + } catch (error) { + console.error('Error generating AgentflowV2:', error) + return { error: error.message || 'Unknown error occurred' } + } +} + +const generateNodesData = (result: Record, config: Record) => { + try { + if (result.error) { + return result + } + + let nodes = result.nodes + + for (let i = 0; i < nodes.length; i += 1) { + const node = nodes[i] + let nodeName = node.data.name + + // If nodeName is not found in data.name, try extracting from node.id + if (!nodeName || !config.componentNodes[nodeName]) { + nodeName = node.id.split('_')[0] + } + + const componentNode = config.componentNodes[nodeName] + if (!componentNode) { + continue + } + + const initializedNodeData = initNode(cloneDeep(componentNode), node.id) + nodes[i].data = { + ...initializedNodeData, + label: node.data?.label + } + + if (nodes[i].data.name === 'iterationAgentflow') { + nodes[i].type = 'iteration' + } + + if (nodes[i].parentNode) { + nodes[i].extent = 'parent' + } + } + + return { nodes, edges: result.edges } + } catch (error) { + console.error('Error generating AgentflowV2:', error) + return { error: error.message || 'Unknown error occurred' } + } +} + +const initNode = (nodeData: Record, newNodeId: string): NodeData => { + const inputParams = [] + const incoming = nodeData.inputs ? nodeData.inputs.length : 0 + + // Inputs + for (let i = 0; i < incoming; i += 1) { + const newInput = { + ...nodeData.inputs[i], + id: `${newNodeId}-input-${nodeData.inputs[i].name}-${nodeData.inputs[i].type}` + } + inputParams.push(newInput) + } + + // Credential + if (nodeData.credential) { + const newInput = { + ...nodeData.credential, + id: `${newNodeId}-input-${nodeData.credential.name}-${nodeData.credential.type}` + } + inputParams.unshift(newInput) + } + + // Outputs + let outputAnchors = initializeOutputAnchors(nodeData, newNodeId) + + /* Initial + inputs = [ + { + label: 'field_label_1', + name: 'string' + }, + { + label: 'field_label_2', + name: 'CustomType' + } + ] + + => Convert to inputs, inputParams, inputAnchors + + => inputs = { 'field': 'defaultvalue' } // Turn into inputs object with default values + + => // For inputs that are part of whitelistTypes + inputParams = [ + { + label: 'field_label_1', + name: 'string' + } + ] + + => // For inputs that are not part of whitelistTypes + inputAnchors = [ + { + label: 'field_label_2', + name: 'CustomType' + } + ] + */ + + // Inputs + if (nodeData.inputs) { + const defaultInputs = initializeDefaultNodeData(nodeData.inputs) + nodeData.inputAnchors = showHideInputAnchors({ ...nodeData, inputAnchors: [], inputs: defaultInputs }) + nodeData.inputParams = showHideInputParams({ ...nodeData, inputParams, inputs: defaultInputs }) + nodeData.inputs = defaultInputs + } else { + nodeData.inputAnchors = [] + nodeData.inputParams = [] + nodeData.inputs = {} + } + + // Outputs + if (nodeData.outputs) { + nodeData.outputs = initializeDefaultNodeData(outputAnchors) + } else { + nodeData.outputs = {} + } + nodeData.outputAnchors = outputAnchors + + // Credential + if (nodeData.credential) nodeData.credential = '' + + nodeData.id = newNodeId + + return nodeData +} + +const initializeDefaultNodeData = (nodeParams: Record[]) => { + const initialValues: Record = {} + + for (let i = 0; i < nodeParams.length; i += 1) { + const input = nodeParams[i] + initialValues[input.name] = input.default || '' + } + + return initialValues +} + +const createAgentFlowOutputs = (nodeData: Record, newNodeId: string) => { + if (nodeData.hideOutput) return [] + + if (nodeData.outputs?.length) { + return nodeData.outputs.map((_: any, index: number) => ({ + id: `${newNodeId}-output-${index}`, + label: nodeData.label, + name: nodeData.name + })) + } + + return [ + { + id: `${newNodeId}-output-${nodeData.name}`, + label: nodeData.label, + name: nodeData.name + } + ] +} + +const initializeOutputAnchors = (nodeData: Record, newNodeId: string): OutputAnchor[] => { + return createAgentFlowOutputs(nodeData, newNodeId) +} + +const _showHideOperation = (nodeData: Record, inputParam: Record, displayType: string, index?: number) => { + const displayOptions = inputParam[displayType] + /* For example: + show: { + enableMemory: true + } + */ + Object.keys(displayOptions).forEach((path) => { + const comparisonValue = displayOptions[path] + if (path.includes('$index') && index) { + path = path.replace('$index', index.toString()) + } + const groundValue = get(nodeData.inputs, path, '') + + if (Array.isArray(comparisonValue)) { + if (displayType === 'show' && !comparisonValue.includes(groundValue)) { + inputParam.display = false + } + if (displayType === 'hide' && comparisonValue.includes(groundValue)) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'string') { + if (displayType === 'show' && !(comparisonValue === groundValue || new RegExp(comparisonValue).test(groundValue))) { + inputParam.display = false + } + if (displayType === 'hide' && (comparisonValue === groundValue || new RegExp(comparisonValue).test(groundValue))) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'boolean') { + if (displayType === 'show' && comparisonValue !== groundValue) { + inputParam.display = false + } + if (displayType === 'hide' && comparisonValue === groundValue) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'object') { + if (displayType === 'show' && !isEqual(comparisonValue, groundValue)) { + inputParam.display = false + } + if (displayType === 'hide' && isEqual(comparisonValue, groundValue)) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'number') { + if (displayType === 'show' && comparisonValue !== groundValue) { + inputParam.display = false + } + if (displayType === 'hide' && comparisonValue === groundValue) { + inputParam.display = false + } + } + }) +} + +const showHideInputs = (nodeData: Record, inputType: string, overrideParams?: Record, arrayIndex?: number) => { + const params = overrideParams ?? nodeData[inputType] ?? [] + + for (let i = 0; i < params.length; i += 1) { + const inputParam = params[i] + + // Reset display flag to false for each inputParam + inputParam.display = true + + if (inputParam.show) { + _showHideOperation(nodeData, inputParam, 'show', arrayIndex) + } + if (inputParam.hide) { + _showHideOperation(nodeData, inputParam, 'hide', arrayIndex) + } + } + + return params +} + +const showHideInputParams = (nodeData: Record): InputParam[] => { + return showHideInputs(nodeData, 'inputParams') +} + +const showHideInputAnchors = (nodeData: Record): InputAnchor[] => { + return showHideInputs(nodeData, 'inputAnchors') +} diff --git a/packages/components/src/agents.ts b/packages/components/src/agents.ts index 889e17593..0bda4021c 100644 --- a/packages/components/src/agents.ts +++ b/packages/components/src/agents.ts @@ -24,6 +24,7 @@ import { } from 'langchain/agents' import { formatLogToString } from 'langchain/agents/format_scratchpad/log' import { IUsedTool } from './Interface' +import { getErrorMessage } from './error' export const SOURCE_DOCUMENTS_PREFIX = '\n\n----FLOWISE_SOURCE_DOCUMENTS----\n\n' export const ARTIFACTS_PREFIX = '\n\n----FLOWISE_ARTIFACTS----\n\n' @@ -463,7 +464,21 @@ export class AgentExecutor extends BaseChain { throw e } observation = await new ExceptionTool().call(observation, runManager?.getChild()) + usedTools.push({ + tool: tool.name, + toolInput: action.toolInput as any, + toolOutput: '', + error: getErrorMessage(e) + }) return { action, observation: observation ?? '' } + } else { + usedTools.push({ + tool: tool.name, + toolInput: action.toolInput as any, + toolOutput: '', + error: getErrorMessage(e) + }) + return { action, observation: getErrorMessage(e) } } } if (typeof observation === 'string' && observation.includes(SOURCE_DOCUMENTS_PREFIX)) { diff --git a/packages/components/src/error.ts b/packages/components/src/error.ts new file mode 100644 index 000000000..12ba0a670 --- /dev/null +++ b/packages/components/src/error.ts @@ -0,0 +1,25 @@ +type ErrorWithMessage = { + message: string +} + +const isErrorWithMessage = (error: unknown): error is ErrorWithMessage => { + return ( + typeof error === 'object' && error !== null && 'message' in error && typeof (error as Record).message === 'string' + ) +} + +const toErrorWithMessage = (maybeError: unknown): ErrorWithMessage => { + if (isErrorWithMessage(maybeError)) return maybeError + + try { + return new Error(JSON.stringify(maybeError)) + } catch { + // fallback in case there's an error stringifying the maybeError + // like with circular references for example. + return new Error(String(maybeError)) + } +} + +export const getErrorMessage = (error: unknown) => { + return toErrorWithMessage(error).message +} diff --git a/packages/components/src/followUpPrompts.ts b/packages/components/src/followUpPrompts.ts index b888a87d9..ecfcfe825 100644 --- a/packages/components/src/followUpPrompts.ts +++ b/packages/components/src/followUpPrompts.ts @@ -8,7 +8,7 @@ import { z } from 'zod' import { PromptTemplate } from '@langchain/core/prompts' import { StructuredOutputParser } from '@langchain/core/output_parsers' import { ChatGroq } from '@langchain/groq' -import ollama from 'ollama' +import { Ollama } from 'ollama' const FollowUpPromptType = z .object({ @@ -122,7 +122,11 @@ export const generateFollowUpPrompts = async ( return structuredResponse } case FollowUpPromptProvider.OLLAMA: { - const response = await ollama.chat({ + const ollamaClient = new Ollama({ + host: providerConfig.baseUrl || 'http://127.0.0.1:11434' + }) + + const response = await ollamaClient.chat({ model: providerConfig.modelName, messages: [ { diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index 4018f2185..0da42f479 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -29,7 +29,8 @@ import { ICommonObject, IDatabaseEntity, INodeData, IServerSideEventStreamer } f import { LangWatch, LangWatchSpan, LangWatchTrace, autoconvertTypedValues } from 'langwatch' import { DataSource } from 'typeorm' import { ChatGenerationChunk } from '@langchain/core/outputs' -import { AIMessageChunk } from '@langchain/core/messages' +import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages' +import { Serialized } from '@langchain/core/load/serializable' interface AgentRun extends Run { actions: AgentAction[] @@ -120,6 +121,50 @@ function getPhoenixTracer(options: PhoenixTracerOptions): Tracer | undefined { } } +interface OpikTracerOptions { + apiKey: string + baseUrl: string + projectName: string + workspace: string + sdkIntegration?: string + sessionId?: string + enableCallback?: boolean +} + +function getOpikTracer(options: OpikTracerOptions): Tracer | undefined { + const SEMRESATTRS_PROJECT_NAME = 'openinference.project.name' + try { + const traceExporter = new ProtoOTLPTraceExporter({ + url: `${options.baseUrl}/v1/private/otel/v1/traces`, + headers: { + Authorization: options.apiKey, + projectName: options.projectName, + 'Comet-Workspace': options.workspace + } + }) + const tracerProvider = new NodeTracerProvider({ + resource: new Resource({ + [ATTR_SERVICE_NAME]: options.projectName, + [ATTR_SERVICE_VERSION]: '1.0.0', + [SEMRESATTRS_PROJECT_NAME]: options.projectName + }) + }) + tracerProvider.addSpanProcessor(new SimpleSpanProcessor(traceExporter)) + if (options.enableCallback) { + registerInstrumentations({ + instrumentations: [] + }) + const lcInstrumentation = new LangChainInstrumentation() + lcInstrumentation.manuallyInstrument(CallbackManagerModule) + tracerProvider.register() + } + return tracerProvider.getTracer(`opik-tracer-${uuidv4().toString()}`) + } catch (err) { + if (process.env.DEBUG === 'true') console.error(`Error setting up Opik tracer: ${err.message}`) + return undefined + } +} + function tryGetJsonSpaces() { try { return parseInt(getEnvironmentVariable('LOG_JSON_SPACES') ?? '2') @@ -558,6 +603,28 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO const tracer: Tracer | undefined = getPhoenixTracer(phoenixOptions) callbacks.push(tracer) + } else if (provider === 'opik') { + const opikApiKey = getCredentialParam('opikApiKey', credentialData, nodeData) + const opikEndpoint = getCredentialParam('opikUrl', credentialData, nodeData) + const opikWorkspace = getCredentialParam('opikWorkspace', credentialData, nodeData) + const opikProject = analytic[provider].opikProjectName as string + + let opikOptions: OpikTracerOptions = { + apiKey: opikApiKey, + baseUrl: opikEndpoint ?? 'https://www.comet.com/opik/api', + projectName: opikProject ?? 'default', + workspace: opikWorkspace ?? 'default', + sdkIntegration: 'Flowise', + enableCallback: true + } + + if (options.chatId) opikOptions.sessionId = options.chatId + if (nodeData?.inputs?.analytics?.opik) { + opikOptions = { ...opikOptions, ...nodeData?.inputs?.analytics?.opik } + } + + const tracer: Tracer | undefined = getOpikTracer(opikOptions) + callbacks.push(tracer) } } } @@ -568,118 +635,184 @@ export const additionalCallbacks = async (nodeData: INodeData, options: ICommonO } export class AnalyticHandler { - nodeData: INodeData - options: ICommonObject = {} - handlers: ICommonObject = {} + private static instances: Map = new Map() + private nodeData: INodeData + private options: ICommonObject + private handlers: ICommonObject = {} + private initialized: boolean = false + private analyticsConfig: string | undefined + private chatId: string + private createdAt: number - constructor(nodeData: INodeData, options: ICommonObject) { - this.options = options + private constructor(nodeData: INodeData, options: ICommonObject) { this.nodeData = nodeData - this.init() + this.options = options + this.analyticsConfig = options.analytic + this.chatId = options.chatId + this.createdAt = Date.now() + } + + static getInstance(nodeData: INodeData, options: ICommonObject): AnalyticHandler { + const chatId = options.chatId + if (!chatId) throw new Error('ChatId is required for analytics') + + // Reset instance if analytics config changed for this chat + const instance = AnalyticHandler.instances.get(chatId) + if (instance?.analyticsConfig !== options.analytic) { + AnalyticHandler.resetInstance(chatId) + } + + if (!AnalyticHandler.instances.get(chatId)) { + AnalyticHandler.instances.set(chatId, new AnalyticHandler(nodeData, options)) + } + return AnalyticHandler.instances.get(chatId)! + } + + static resetInstance(chatId: string): void { + AnalyticHandler.instances.delete(chatId) + } + + // Keep this as backup for orphaned instances + static cleanup(maxAge: number = 3600000): void { + const now = Date.now() + for (const [chatId, instance] of AnalyticHandler.instances) { + if (now - instance.createdAt > maxAge) { + AnalyticHandler.resetInstance(chatId) + } + } } async init() { + if (this.initialized) return + try { if (!this.options.analytic) return const analytic = JSON.parse(this.options.analytic) - for (const provider in analytic) { const providerStatus = analytic[provider].status as boolean - if (providerStatus) { const credentialId = analytic[provider].credentialId as string const credentialData = await getCredentialData(credentialId ?? '', this.options) - if (provider === 'langSmith') { - const langSmithProject = analytic[provider].projectName as string - const langSmithApiKey = getCredentialParam('langSmithApiKey', credentialData, this.nodeData) - const langSmithEndpoint = getCredentialParam('langSmithEndpoint', credentialData, this.nodeData) - - const client = new LangsmithClient({ - apiUrl: langSmithEndpoint ?? 'https://api.smith.langchain.com', - apiKey: langSmithApiKey - }) - - this.handlers['langSmith'] = { client, langSmithProject } - } else if (provider === 'langFuse') { - const release = analytic[provider].release as string - const langFuseSecretKey = getCredentialParam('langFuseSecretKey', credentialData, this.nodeData) - const langFusePublicKey = getCredentialParam('langFusePublicKey', credentialData, this.nodeData) - const langFuseEndpoint = getCredentialParam('langFuseEndpoint', credentialData, this.nodeData) - - const langfuse = new Langfuse({ - secretKey: langFuseSecretKey, - publicKey: langFusePublicKey, - baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com', - sdkIntegration: 'Flowise', - release - }) - this.handlers['langFuse'] = { client: langfuse } - } else if (provider === 'lunary') { - const lunaryPublicKey = getCredentialParam('lunaryAppId', credentialData, this.nodeData) - const lunaryEndpoint = getCredentialParam('lunaryEndpoint', credentialData, this.nodeData) - - lunary.init({ - publicKey: lunaryPublicKey, - apiUrl: lunaryEndpoint, - runtime: 'flowise' - }) - - this.handlers['lunary'] = { client: lunary } - } else if (provider === 'langWatch') { - const langWatchApiKey = getCredentialParam('langWatchApiKey', credentialData, this.nodeData) - const langWatchEndpoint = getCredentialParam('langWatchEndpoint', credentialData, this.nodeData) - - const langwatch = new LangWatch({ - apiKey: langWatchApiKey, - endpoint: langWatchEndpoint - }) - - this.handlers['langWatch'] = { client: langwatch } - } else if (provider === 'arize') { - const arizeApiKey = getCredentialParam('arizeApiKey', credentialData, this.nodeData) - const arizeSpaceId = getCredentialParam('arizeSpaceId', credentialData, this.nodeData) - const arizeEndpoint = getCredentialParam('arizeEndpoint', credentialData, this.nodeData) - const arizeProject = analytic[provider].projectName as string - - let arizeOptions: ArizeTracerOptions = { - apiKey: arizeApiKey, - spaceId: arizeSpaceId, - baseUrl: arizeEndpoint ?? 'https://otlp.arize.com', - projectName: arizeProject ?? 'default', - sdkIntegration: 'Flowise', - enableCallback: false - } - - const arize: Tracer | undefined = getArizeTracer(arizeOptions) - const rootSpan: Span | undefined = undefined - - this.handlers['arize'] = { client: arize, arizeProject, rootSpan } - } else if (provider === 'phoenix') { - const phoenixApiKey = getCredentialParam('phoenixApiKey', credentialData, this.nodeData) - const phoenixEndpoint = getCredentialParam('phoenixEndpoint', credentialData, this.nodeData) - const phoenixProject = analytic[provider].projectName as string - - let phoenixOptions: PhoenixTracerOptions = { - apiKey: phoenixApiKey, - baseUrl: phoenixEndpoint ?? 'https://app.phoenix.arize.com', - projectName: phoenixProject ?? 'default', - sdkIntegration: 'Flowise', - enableCallback: false - } - - const phoenix: Tracer | undefined = getPhoenixTracer(phoenixOptions) - const rootSpan: Span | undefined = undefined - - this.handlers['phoenix'] = { client: phoenix, phoenixProject, rootSpan } - } + await this.initializeProvider(provider, analytic[provider], credentialData) } } + this.initialized = true } catch (e) { throw new Error(e) } } + // Add getter for handlers (useful for debugging) + getHandlers(): ICommonObject { + return this.handlers + } + + async initializeProvider(provider: string, providerConfig: any, credentialData: any) { + if (provider === 'langSmith') { + const langSmithProject = providerConfig.projectName as string + const langSmithApiKey = getCredentialParam('langSmithApiKey', credentialData, this.nodeData) + const langSmithEndpoint = getCredentialParam('langSmithEndpoint', credentialData, this.nodeData) + + const client = new LangsmithClient({ + apiUrl: langSmithEndpoint ?? 'https://api.smith.langchain.com', + apiKey: langSmithApiKey + }) + + this.handlers['langSmith'] = { client, langSmithProject } + } else if (provider === 'langFuse') { + const release = providerConfig.release as string + const langFuseSecretKey = getCredentialParam('langFuseSecretKey', credentialData, this.nodeData) + const langFusePublicKey = getCredentialParam('langFusePublicKey', credentialData, this.nodeData) + const langFuseEndpoint = getCredentialParam('langFuseEndpoint', credentialData, this.nodeData) + + const langfuse = new Langfuse({ + secretKey: langFuseSecretKey, + publicKey: langFusePublicKey, + baseUrl: langFuseEndpoint ?? 'https://cloud.langfuse.com', + sdkIntegration: 'Flowise', + release + }) + this.handlers['langFuse'] = { client: langfuse } + } else if (provider === 'lunary') { + const lunaryPublicKey = getCredentialParam('lunaryAppId', credentialData, this.nodeData) + const lunaryEndpoint = getCredentialParam('lunaryEndpoint', credentialData, this.nodeData) + + lunary.init({ + publicKey: lunaryPublicKey, + apiUrl: lunaryEndpoint, + runtime: 'flowise' + }) + + this.handlers['lunary'] = { client: lunary } + } else if (provider === 'langWatch') { + const langWatchApiKey = getCredentialParam('langWatchApiKey', credentialData, this.nodeData) + const langWatchEndpoint = getCredentialParam('langWatchEndpoint', credentialData, this.nodeData) + + const langwatch = new LangWatch({ + apiKey: langWatchApiKey, + endpoint: langWatchEndpoint + }) + + this.handlers['langWatch'] = { client: langwatch } + } else if (provider === 'arize') { + const arizeApiKey = getCredentialParam('arizeApiKey', credentialData, this.nodeData) + const arizeSpaceId = getCredentialParam('arizeSpaceId', credentialData, this.nodeData) + const arizeEndpoint = getCredentialParam('arizeEndpoint', credentialData, this.nodeData) + const arizeProject = providerConfig.projectName as string + + let arizeOptions: ArizeTracerOptions = { + apiKey: arizeApiKey, + spaceId: arizeSpaceId, + baseUrl: arizeEndpoint ?? 'https://otlp.arize.com', + projectName: arizeProject ?? 'default', + sdkIntegration: 'Flowise', + enableCallback: false + } + + const arize: Tracer | undefined = getArizeTracer(arizeOptions) + const rootSpan: Span | undefined = undefined + + this.handlers['arize'] = { client: arize, arizeProject, rootSpan } + } else if (provider === 'phoenix') { + const phoenixApiKey = getCredentialParam('phoenixApiKey', credentialData, this.nodeData) + const phoenixEndpoint = getCredentialParam('phoenixEndpoint', credentialData, this.nodeData) + const phoenixProject = providerConfig.projectName as string + + let phoenixOptions: PhoenixTracerOptions = { + apiKey: phoenixApiKey, + baseUrl: phoenixEndpoint ?? 'https://app.phoenix.arize.com', + projectName: phoenixProject ?? 'default', + sdkIntegration: 'Flowise', + enableCallback: false + } + + const phoenix: Tracer | undefined = getPhoenixTracer(phoenixOptions) + const rootSpan: Span | undefined = undefined + + this.handlers['phoenix'] = { client: phoenix, phoenixProject, rootSpan } + } else if (provider === 'opik') { + const opikApiKey = getCredentialParam('opikApiKey', credentialData, this.nodeData) + const opikEndpoint = getCredentialParam('opikUrl', credentialData, this.nodeData) + const opikWorkspace = getCredentialParam('opikWorkspace', credentialData, this.nodeData) + const opikProject = providerConfig.opikProjectName as string + + let opikOptions: OpikTracerOptions = { + apiKey: opikApiKey, + baseUrl: opikEndpoint ?? 'https://www.comet.com/opik/api', + projectName: opikProject ?? 'default', + workspace: opikWorkspace ?? 'default', + sdkIntegration: 'Flowise', + enableCallback: false + } + + const opik: Tracer | undefined = getOpikTracer(opikOptions) + const rootSpan: Span | undefined = undefined + + this.handlers['opik'] = { client: opik, opikProject, rootSpan } + } + } + async onChainStart(name: string, input: string, parentIds?: ICommonObject) { const returnIds: ICommonObject = { langSmith: {}, @@ -687,7 +820,8 @@ export class AnalyticHandler { lunary: {}, langWatch: {}, arize: {}, - phoenix: {} + phoenix: {}, + opik: {} } if (Object.prototype.hasOwnProperty.call(this.handlers, 'langSmith')) { @@ -869,6 +1003,40 @@ export class AnalyticHandler { returnIds['phoenix'].chainSpan = chainSpanId } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const tracer: Tracer | undefined = this.handlers['opik'].client + let rootSpan: Span | undefined = this.handlers['opik'].rootSpan + + if (!parentIds || !Object.keys(parentIds).length) { + rootSpan = tracer ? tracer.startSpan('Flowise') : undefined + if (rootSpan) { + rootSpan.setAttribute('session.id', this.options.chatId) + rootSpan.setAttribute('openinference.span.kind', 'CHAIN') + rootSpan.setAttribute('input.value', input) + rootSpan.setAttribute('input.mime_type', 'text/plain') + rootSpan.setAttribute('output.value', '[Object]') + rootSpan.setAttribute('output.mime_type', 'text/plain') + rootSpan.setStatus({ code: SpanStatusCode.OK }) + rootSpan.end() + } + this.handlers['opik'].rootSpan = rootSpan + } + + const rootSpanContext = rootSpan + ? opentelemetry.trace.setSpan(opentelemetry.context.active(), rootSpan as Span) + : opentelemetry.context.active() + const chainSpan = tracer?.startSpan(name, undefined, rootSpanContext) + if (chainSpan) { + chainSpan.setAttribute('openinference.span.kind', 'CHAIN') + chainSpan.setAttribute('input.value', JSON.stringify(input)) + chainSpan.setAttribute('input.mime_type', 'application/json') + } + const chainSpanId: any = chainSpan?.spanContext().spanId + + this.handlers['opik'].chainSpan = { [chainSpanId]: chainSpan } + returnIds['opik'].chainSpan = chainSpanId + } + return returnIds } @@ -946,6 +1114,21 @@ export class AnalyticHandler { chainSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const chainSpan: Span | undefined = this.handlers['opik'].chainSpan[returnIds['opik'].chainSpan] + if (chainSpan) { + chainSpan.setAttribute('output.value', JSON.stringify(output)) + chainSpan.setAttribute('output.mime_type', 'application/json') + chainSpan.setStatus({ code: SpanStatusCode.OK }) + chainSpan.end() + } + } + + if (shutdown) { + // Cleanup this instance when chain ends + AnalyticHandler.resetInstance(this.chatId) + } } async onChainError(returnIds: ICommonObject, error: string | object, shutdown = false) { @@ -1024,9 +1207,14 @@ export class AnalyticHandler { chainSpan.end() } } + + if (shutdown) { + // Cleanup this instance when chain ends + AnalyticHandler.resetInstance(this.chatId) + } } - async onLLMStart(name: string, input: string, parentIds: ICommonObject) { + async onLLMStart(name: string, input: string | BaseMessageLike[], parentIds: ICommonObject) { const returnIds: ICommonObject = { langSmith: {}, langFuse: {}, @@ -1038,13 +1226,18 @@ export class AnalyticHandler { if (Object.prototype.hasOwnProperty.call(this.handlers, 'langSmith')) { const parentRun: RunTree | undefined = this.handlers['langSmith'].chainRun[parentIds['langSmith'].chainRun] + if (parentRun) { + const inputs: any = {} + if (Array.isArray(input)) { + inputs.messages = input + } else { + inputs.prompts = [input] + } const childLLMRun = await parentRun.createChild({ name, run_type: 'llm', - inputs: { - prompts: [input] - } + inputs }) await childLLMRun.postRun() this.handlers['langSmith'].llmRun = { [childLLMRun.id]: childLLMRun } @@ -1131,6 +1324,25 @@ export class AnalyticHandler { returnIds['phoenix'].llmSpan = llmSpanId } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const tracer: Tracer | undefined = this.handlers['opik'].client + const rootSpan: Span | undefined = this.handlers['opik'].rootSpan + + const rootSpanContext = rootSpan + ? opentelemetry.trace.setSpan(opentelemetry.context.active(), rootSpan as Span) + : opentelemetry.context.active() + const llmSpan = tracer?.startSpan(name, undefined, rootSpanContext) + if (llmSpan) { + llmSpan.setAttribute('openinference.span.kind', 'LLM') + llmSpan.setAttribute('input.value', JSON.stringify(input)) + llmSpan.setAttribute('input.mime_type', 'application/json') + } + const llmSpanId: any = llmSpan?.spanContext().spanId + + this.handlers['opik'].llmSpan = { [llmSpanId]: llmSpan } + returnIds['opik'].llmSpan = llmSpanId + } + return returnIds } @@ -1196,6 +1408,16 @@ export class AnalyticHandler { llmSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const llmSpan: Span | undefined = this.handlers['opik'].llmSpan[returnIds['opik'].llmSpan] + if (llmSpan) { + llmSpan.setAttribute('output.value', JSON.stringify(output)) + llmSpan.setAttribute('output.mime_type', 'application/json') + llmSpan.setStatus({ code: SpanStatusCode.OK }) + llmSpan.end() + } + } } async onLLMError(returnIds: ICommonObject, error: string | object) { @@ -1260,6 +1482,16 @@ export class AnalyticHandler { llmSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const llmSpan: Span | undefined = this.handlers['opik'].llmSpan[returnIds['opik'].llmSpan] + if (llmSpan) { + llmSpan.setAttribute('error.value', JSON.stringify(error)) + llmSpan.setAttribute('error.mime_type', 'application/json') + llmSpan.setStatus({ code: SpanStatusCode.ERROR, message: error.toString() }) + llmSpan.end() + } + } } async onToolStart(name: string, input: string | object, parentIds: ICommonObject) { @@ -1269,7 +1501,8 @@ export class AnalyticHandler { lunary: {}, langWatch: {}, arize: {}, - phoenix: {} + phoenix: {}, + opik: {} } if (Object.prototype.hasOwnProperty.call(this.handlers, 'langSmith')) { @@ -1368,6 +1601,25 @@ export class AnalyticHandler { returnIds['phoenix'].toolSpan = toolSpanId } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const tracer: Tracer | undefined = this.handlers['opik'].client + const rootSpan: Span | undefined = this.handlers['opik'].rootSpan + + const rootSpanContext = rootSpan + ? opentelemetry.trace.setSpan(opentelemetry.context.active(), rootSpan as Span) + : opentelemetry.context.active() + const toolSpan = tracer?.startSpan(name, undefined, rootSpanContext) + if (toolSpan) { + toolSpan.setAttribute('openinference.span.kind', 'TOOL') + toolSpan.setAttribute('input.value', JSON.stringify(input)) + toolSpan.setAttribute('input.mime_type', 'application/json') + } + const toolSpanId: any = toolSpan?.spanContext().spanId + + this.handlers['opik'].toolSpan = { [toolSpanId]: toolSpan } + returnIds['opik'].toolSpan = toolSpanId + } + return returnIds } @@ -1433,6 +1685,16 @@ export class AnalyticHandler { toolSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const toolSpan: Span | undefined = this.handlers['opik'].toolSpan[returnIds['opik'].toolSpan] + if (toolSpan) { + toolSpan.setAttribute('output.value', JSON.stringify(output)) + toolSpan.setAttribute('output.mime_type', 'application/json') + toolSpan.setStatus({ code: SpanStatusCode.OK }) + toolSpan.end() + } + } } async onToolError(returnIds: ICommonObject, error: string | object) { @@ -1497,5 +1759,98 @@ export class AnalyticHandler { toolSpan.end() } } + + if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { + const toolSpan: Span | undefined = this.handlers['opik'].toolSpan[returnIds['opik'].toolSpan] + if (toolSpan) { + toolSpan.setAttribute('error.value', JSON.stringify(error)) + toolSpan.setAttribute('error.mime_type', 'application/json') + toolSpan.setStatus({ code: SpanStatusCode.ERROR, message: error.toString() }) + toolSpan.end() + } + } + } +} + +/** + * Custom callback handler for streaming detailed intermediate information + * during agent execution, specifically tool invocation inputs and outputs. + */ +export class CustomStreamingHandler extends BaseCallbackHandler { + name = 'custom_streaming_handler' + + private sseStreamer: IServerSideEventStreamer + private chatId: string + + constructor(sseStreamer: IServerSideEventStreamer, chatId: string) { + super() + this.sseStreamer = sseStreamer + this.chatId = chatId + } + + /** + * Handle the start of a tool invocation + */ + async handleToolStart(tool: Serialized, input: string, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + const toolName = typeof tool === 'object' && tool.name ? tool.name : 'unknown-tool' + const toolInput = typeof input === 'string' ? input : JSON.stringify(input, null, 2) + + // Stream the tool invocation details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'tool_start', + name: toolName, + input: toolInput, + runId, + parentRunId: parentRunId || null + }) + } + + /** + * Handle the end of a tool invocation + */ + async handleToolEnd(output: string | object, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + const toolOutput = typeof output === 'string' ? output : JSON.stringify(output, null, 2) + + // Stream the tool output details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'tool_end', + output: toolOutput, + runId, + parentRunId: parentRunId || null + }) + } + + /** + * Handle tool errors + */ + async handleToolError(error: Error, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + // Stream the tool error details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'tool_error', + error: error.message, + runId, + parentRunId: parentRunId || null + }) + } + + /** + * Handle agent actions + */ + async handleAgentAction(action: AgentAction, runId: string, parentRunId?: string): Promise { + if (!this.sseStreamer) return + + // Stream the agent action details using the agent_trace event type for consistency + this.sseStreamer.streamCustomEvent(this.chatId, 'agent_trace', { + step: 'agent_action', + action: JSON.stringify(action), + runId, + parentRunId: parentRunId || null + }) } } diff --git a/packages/components/src/index.ts b/packages/components/src/index.ts index ec713bec3..2944bc320 100644 --- a/packages/components/src/index.ts +++ b/packages/components/src/index.ts @@ -10,3 +10,5 @@ export * from './speechToText' export * from './storageUtils' export * from './handler' export * from './followUpPrompts' +export * from './validator' +export * from './agentflowv2Generator' diff --git a/packages/components/src/storageUtils.ts b/packages/components/src/storageUtils.ts index 3639553e1..a918c4f00 100644 --- a/packages/components/src/storageUtils.ts +++ b/packages/components/src/storageUtils.ts @@ -8,6 +8,7 @@ import { S3Client, S3ClientConfig } from '@aws-sdk/client-s3' +import { Storage } from '@google-cloud/storage' import { Readable } from 'node:stream' import { getUserHome } from './utils' import sanitize from 'sanitize-filename' @@ -34,6 +35,25 @@ export const addBase64FilesToStorage = async (fileBase64: string, chatflowid: st }) await s3Client.send(putObjCmd) + fileNames.push(sanitizedFilename) + return 'FILE-STORAGE::' + JSON.stringify(fileNames) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const splitDataURI = fileBase64.split(',') + const filename = splitDataURI.pop()?.split(':')[1] ?? '' + const bf = Buffer.from(splitDataURI.pop() || '', 'base64') + const mime = splitDataURI[0].split(':')[1].split(';')[0] + const sanitizedFilename = _sanitizeFilename(filename) + const normalizedChatflowid = chatflowid.replace(/\\/g, '/') + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = `${normalizedChatflowid}/${normalizedFilename}` + const file = bucket.file(filePath) + await new Promise((resolve, reject) => { + file.createWriteStream({ contentType: mime, metadata: { contentEncoding: 'base64' } }) + .on('error', (err) => reject(err)) + .on('finish', () => resolve()) + .end(bf) + }) fileNames.push(sanitizedFilename) return 'FILE-STORAGE::' + JSON.stringify(fileNames) } else { @@ -76,8 +96,22 @@ export const addArrayFilesToStorage = async (mime: string, bf: Buffer, fileName: await s3Client.send(putObjCmd) fileNames.push(sanitizedFilename) return 'FILE-STORAGE::' + JSON.stringify(fileNames) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPaths = paths.map((p) => p.replace(/\\/g, '/')) + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = [...normalizedPaths, normalizedFilename].join('/') + const file = bucket.file(filePath) + await new Promise((resolve, reject) => { + file.createWriteStream() + .on('error', (err) => reject(err)) + .on('finish', () => resolve()) + .end(bf) + }) + fileNames.push(sanitizedFilename) + return 'FILE-STORAGE::' + JSON.stringify(fileNames) } else { - const dir = path.join(getStoragePath(), ...paths) + const dir = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) if (!fs.existsSync(dir)) { fs.mkdirSync(dir, { recursive: true }) } @@ -109,8 +143,21 @@ export const addSingleFileToStorage = async (mime: string, bf: Buffer, fileName: }) await s3Client.send(putObjCmd) return 'FILE-STORAGE::' + sanitizedFilename + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPaths = paths.map((p) => p.replace(/\\/g, '/')) + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = [...normalizedPaths, normalizedFilename].join('/') + const file = bucket.file(filePath) + await new Promise((resolve, reject) => { + file.createWriteStream({ contentType: mime, metadata: { contentEncoding: 'base64' } }) + .on('error', (err) => reject(err)) + .on('finish', () => resolve()) + .end(bf) + }) + return 'FILE-STORAGE::' + sanitizedFilename } else { - const dir = path.join(getStoragePath(), ...paths) + const dir = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) if (!fs.existsSync(dir)) { fs.mkdirSync(dir, { recursive: true }) } @@ -146,6 +193,11 @@ export const getFileFromUpload = async (filePath: string): Promise => { // @ts-ignore const buffer = Buffer.concat(response.Body.toArray()) return buffer + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const file = bucket.file(filePath) + const [buffer] = await file.download() + return buffer } else { return fs.readFileSync(filePath) } @@ -179,8 +231,16 @@ export const getFileFromStorage = async (file: string, ...paths: string[]): Prom // @ts-ignore const buffer = Buffer.concat(response.Body.toArray()) return buffer + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPaths = paths.map((p) => p.replace(/\\/g, '/')) + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = [...normalizedPaths, normalizedFilename].join('/') + const file = bucket.file(filePath) + const [buffer] = await file.download() + return buffer } else { - const fileInStorage = path.join(getStoragePath(), ...paths, sanitizedFilename) + const fileInStorage = path.join(getStoragePath(), ...paths.map(_sanitizeFilename), sanitizedFilename) return fs.readFileSync(fileInStorage) } } @@ -208,8 +268,12 @@ export const removeFilesFromStorage = async (...paths: string[]) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPath = paths.map((p) => p.replace(/\\/g, '/')).join('/') + await bucket.deleteFiles({ prefix: `${normalizedPath}/` }) } else { - const directory = path.join(getStoragePath(), ...paths) + const directory = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) _deleteLocalFolderRecursive(directory) } } @@ -223,6 +287,9 @@ export const removeSpecificFileFromUpload = async (filePath: string) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + await bucket.file(filePath).delete() } else { fs.unlinkSync(filePath) } @@ -237,13 +304,22 @@ export const removeSpecificFileFromStorage = async (...paths: string[]) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const fileName = paths.pop() + if (fileName) { + const sanitizedFilename = _sanitizeFilename(fileName) + paths.push(sanitizedFilename) + } + const normalizedPath = paths.map((p) => p.replace(/\\/g, '/')).join('/') + await bucket.file(normalizedPath).delete() } else { const fileName = paths.pop() if (fileName) { const sanitizedFilename = _sanitizeFilename(fileName) paths.push(sanitizedFilename) } - const file = path.join(getStoragePath(), ...paths) + const file = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) fs.unlinkSync(file) } } @@ -257,8 +333,12 @@ export const removeFolderFromStorage = async (...paths: string[]) => { Key = Key.substring(1) } await _deleteS3Folder(Key) + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedPath = paths.map((p) => p.replace(/\\/g, '/')).join('/') + await bucket.deleteFiles({ prefix: `${normalizedPath}/` }) } else { - const directory = path.join(getStoragePath(), ...paths) + const directory = path.join(getStoragePath(), ...paths.map(_sanitizeFilename)) _deleteLocalFolderRecursive(directory, true) } } @@ -355,6 +435,14 @@ export const streamStorageFile = async ( const blob = await body.transformToByteArray() return Buffer.from(blob) } + } else if (storageType === 'gcs') { + const { bucket } = getGcsClient() + const normalizedChatflowId = chatflowId.replace(/\\/g, '/') + const normalizedChatId = chatId.replace(/\\/g, '/') + const normalizedFilename = sanitizedFilename.replace(/\\/g, '/') + const filePath = `${normalizedChatflowId}/${normalizedChatId}/${normalizedFilename}` + const [buffer] = await bucket.file(filePath).download() + return buffer } else { const filePath = path.join(getStoragePath(), chatflowId, chatId, sanitizedFilename) //raise error if file path is not absolute @@ -372,6 +460,28 @@ export const streamStorageFile = async ( } } +export const getGcsClient = () => { + const pathToGcsCredential = process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL + const projectId = process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID + const bucketName = process.env.GOOGLE_CLOUD_STORAGE_BUCKET_NAME + + if (!pathToGcsCredential) { + throw new Error('GOOGLE_CLOUD_STORAGE_CREDENTIAL env variable is required') + } + if (!bucketName) { + throw new Error('GOOGLE_CLOUD_STORAGE_BUCKET_NAME env variable is required') + } + + const storageConfig = { + keyFilename: pathToGcsCredential, + ...(projectId ? { projectId } : {}) + } + + const storage = new Storage(storageConfig) + const bucket = storage.bucket(bucketName) + return { storage, bucket } +} + export const getS3Config = () => { const accessKeyId = process.env.S3_STORAGE_ACCESS_KEY_ID const secretAccessKey = process.env.S3_STORAGE_SECRET_ACCESS_KEY @@ -384,20 +494,21 @@ export const getS3Config = () => { throw new Error('S3 storage configuration is missing') } - let credentials: S3ClientConfig['credentials'] | undefined + const s3Config: S3ClientConfig = { + region: region, + endpoint: customURL, + forcePathStyle: forcePathStyle + } + if (accessKeyId && secretAccessKey) { - credentials = { - accessKeyId, - secretAccessKey + s3Config.credentials = { + accessKeyId: accessKeyId, + secretAccessKey: secretAccessKey } } - const s3Client = new S3Client({ - credentials, - region, - endpoint: customURL, - forcePathStyle: forcePathStyle - }) + const s3Client = new S3Client(s3Config) + return { s3Client, Bucket } } diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 7fc8426b1..957fd4992 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -5,7 +5,7 @@ import * as path from 'path' import { JSDOM } from 'jsdom' import { z } from 'zod' import { DataSource } from 'typeorm' -import { ICommonObject, IDatabaseEntity, IDocument, IMessage, INodeData, IVariable, MessageContentImageUrl } from './Interface' +import { ICommonObject, IDatabaseEntity, IFileUpload, IMessage, INodeData, IVariable, MessageContentImageUrl } from './Interface' import { AES, enc } from 'crypto-js' import { omit } from 'lodash' import { AIMessage, HumanMessage, BaseMessage } from '@langchain/core/messages' @@ -27,14 +27,18 @@ if (USE_AWS_SECRETS_MANAGER) { const accessKeyId = process.env.SECRETKEY_AWS_ACCESS_KEY const secretAccessKey = process.env.SECRETKEY_AWS_SECRET_KEY - let credentials: SecretsManagerClientConfig['credentials'] | undefined + const secretManagerConfig: SecretsManagerClientConfig = { + region: region + } + if (accessKeyId && secretAccessKey) { - credentials = { + secretManagerConfig.credentials = { accessKeyId, secretAccessKey } } - secretsManagerClient = new SecretsManagerClient({ credentials, region }) + + secretsManagerClient = new SecretsManagerClient(secretManagerConfig) } /* @@ -280,14 +284,16 @@ export const getInputVariables = (paramValue: string): string[] => { } /** - * Transform curly braces into double curly braces if the content includes a colon. + * Transform single curly braces into double curly braces if the content includes a colon. * @param input - The original string that may contain { ... } segments. * @returns The transformed string, where { ... } containing a colon has been replaced with {{ ... }}. */ export const transformBracesWithColon = (input: string): string => { - // This regex will match anything of the form `{ ... }` (no nested braces). - // `[^{}]*` means: match any characters that are not `{` or `}` zero or more times. - const regex = /\{([^{}]*?)\}/g + // This regex uses negative lookbehind (? { // groupContent is the text inside the braces `{ ... }`. @@ -537,6 +543,15 @@ const getEncryptionKey = async (): Promise => { return process.env.FLOWISE_SECRETKEY_OVERWRITE } try { + if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { + const secretId = process.env.SECRETKEY_AWS_NAME || 'FlowiseEncryptionKey' + const command = new GetSecretValueCommand({ SecretId: secretId }) + const response = await secretsManagerClient.send(command) + + if (response.SecretString) { + return response.SecretString + } + } return await fs.promises.readFile(getEncryptionKeyPath(), 'utf8') } catch (error) { throw new Error(error) @@ -555,18 +570,24 @@ const decryptCredentialData = async (encryptedData: string): Promise doc.pageContent).join('\n') - messageWithFileUploads += `${pageContents}\n\n` + const documents: string = await fileLoaderNodeInstance.init(nodeData, '', options) + messageWithFileUploads += `${documents}\n\n` } } const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content @@ -766,17 +788,23 @@ export const mapChatMessageToBaseMessage = async (chatmessages: any[] = []): Pro * @param {IMessage[]} chatHistory * @returns {string} */ -export const convertChatHistoryToText = (chatHistory: IMessage[] = []): string => { +export const convertChatHistoryToText = (chatHistory: IMessage[] | { content: string; role: string }[] = []): string => { return chatHistory .map((chatMessage) => { - if (chatMessage.type === 'apiMessage') { - return `Assistant: ${chatMessage.message}` - } else if (chatMessage.type === 'userMessage') { - return `Human: ${chatMessage.message}` + if (!chatMessage) return '' + const messageContent = 'message' in chatMessage ? chatMessage.message : chatMessage.content + if (!messageContent || messageContent.trim() === '') return '' + + const messageType = 'type' in chatMessage ? chatMessage.type : chatMessage.role + if (messageType === 'apiMessage' || messageType === 'assistant') { + return `Assistant: ${messageContent}` + } else if (messageType === 'userMessage' || messageType === 'user') { + return `Human: ${messageContent}` } else { - return `${chatMessage.message}` + return `${messageContent}` } }) + .filter((message) => message !== '') // Remove empty messages .join('\n') } @@ -820,6 +848,12 @@ export const convertSchemaToZod = (schema: string | object): ICommonObject => { } else { zodObj[sch.property] = z.boolean().describe(sch.description).optional() } + } else if (sch.type === 'date') { + if (sch.required) { + zodObj[sch.property] = z.date({ required_error: `${sch.property} required` }).describe(sch.description) + } else { + zodObj[sch.property] = z.date().describe(sch.description).optional() + } } } return zodObj diff --git a/packages/components/src/validator.ts b/packages/components/src/validator.ts new file mode 100644 index 000000000..4948165eb --- /dev/null +++ b/packages/components/src/validator.ts @@ -0,0 +1,29 @@ +/** + * Validates if a string is a valid UUID v4 + * @param {string} uuid The string to validate + * @returns {boolean} True if valid UUID, false otherwise + */ +export const isValidUUID = (uuid: string): boolean => { + // UUID v4 regex pattern + const uuidV4Pattern = /^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$/i + return uuidV4Pattern.test(uuid) +} + +/** + * Validates if a string contains path traversal attempts + * @param {string} path The string to validate + * @returns {boolean} True if path traversal detected, false otherwise + */ +export const isPathTraversal = (path: string): boolean => { + // Check for common path traversal patterns + const dangerousPatterns = [ + '..', // Directory traversal + '/', // Root directory + '\\', // Windows root directory + '%2e', // URL encoded . + '%2f', // URL encoded / + '%5c' // URL encoded \ + ] + + return dangerousPatterns.some((pattern) => path.toLowerCase().includes(pattern)) +} diff --git a/packages/server/.env.example b/packages/server/.env.example index dabae5a02..54db59268 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -9,6 +9,7 @@ PORT=3000 # SECRETKEY_AWS_ACCESS_KEY= # SECRETKEY_AWS_SECRET_KEY= # SECRETKEY_AWS_REGION=us-west-2 +# SECRETKEY_AWS_NAME=FlowiseEncryptionKey # NUMBER_OF_PROXIES= 1 # CORS_ORIGINS=* @@ -39,8 +40,6 @@ PORT=3000 # LANGCHAIN_API_KEY=your_api_key # LANGCHAIN_PROJECT=your_project -# DISABLE_FLOWISE_TELEMETRY=true - # Uncomment the following line to enable model list config, load the list of models from your local config file # see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format # MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path @@ -53,6 +52,10 @@ PORT=3000 # S3_STORAGE_REGION=us-west-2 # S3_ENDPOINT_URL= # S3_FORCE_PATH_STYLE=false +# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path +# GOOGLE_CLOUD_STORAGE_PROJ_ID= +# GOOGLE_CLOUD_STORAGE_BUCKET_NAME= +# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true # SHOW_COMMUNITY_NODES=true # DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable) @@ -83,6 +86,8 @@ PORT=3000 # QUEUE_NAME=flowise-queue # QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000 # WORKER_CONCURRENCY=100000 +# REMOVE_ON_AGE=86400 +# REMOVE_ON_COUNT=10000 # REDIS_URL= # REDIS_HOST=localhost # REDIS_PORT=6379 @@ -92,3 +97,5 @@ PORT=3000 # REDIS_CERT= # REDIS_KEY= # REDIS_CA= +# REDIS_KEEP_ALIVE= +# ENABLE_BULLMQ_DASHBOARD= \ No newline at end of file diff --git a/packages/server/README-ZH.md b/packages/server/README-ZH.md index a8e381b80..f0d7992e0 100644 --- a/packages/server/README-ZH.md +++ b/packages/server/README-ZH.md @@ -1,12 +1,12 @@ -# Flowise - 低代码 LLM 应用程序构建器 +# Flowise [English](./README.md) | 中文 -![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true) +

以可视化方式构建 AI Agents

-拖放界面来构建自定义的 LLM 流程 +![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true) ## ⚡ 快速入门 diff --git a/packages/server/README.md b/packages/server/README.md index 5f02c13d1..cc52dc522 100644 --- a/packages/server/README.md +++ b/packages/server/README.md @@ -1,12 +1,12 @@ -# Flowise - Low-Code LLM apps builder +# Flowise English | [中文](./README-ZH.md) -![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true) +

Build AI Agents, Visually

-Drag & drop UI to build your customized LLM flow +![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true) ## ⚡Quick Start diff --git a/packages/server/marketplaces/agentflows/Customer Support Team Agents.json b/packages/server/marketplaces/agentflows/Customer Support Team Agents.json index 0dd232132..4db241a11 100644 --- a/packages/server/marketplaces/agentflows/Customer Support Team Agents.json +++ b/packages/server/marketplaces/agentflows/Customer Support Team Agents.json @@ -1,7 +1,7 @@ { "description": "Customer support team consisting of Support Representative and Quality Assurance Specialist to handle support tickets", "framework": ["Langchain"], - "usecases": ["Customer Support"], + "usecases": ["Customer Support", "Hierarchical Agent Teams"], "nodes": [ { "id": "supervisor_0", diff --git a/packages/server/marketplaces/agentflows/Lead Outreach.json b/packages/server/marketplaces/agentflows/Lead Outreach.json index d673388fb..a9173c0d2 100644 --- a/packages/server/marketplaces/agentflows/Lead Outreach.json +++ b/packages/server/marketplaces/agentflows/Lead Outreach.json @@ -1,7 +1,7 @@ { "description": "Research leads and create personalized email drafts for sales team", "framework": ["Langchain"], - "usecases": ["Leads"], + "usecases": ["Leads", "Hierarchical Agent Teams"], "nodes": [ { "id": "supervisor_0", diff --git a/packages/server/marketplaces/agentflows/Portfolio Management Team.json b/packages/server/marketplaces/agentflows/Portfolio Management Team.json index 2da53e411..cf4998f7b 100644 --- a/packages/server/marketplaces/agentflows/Portfolio Management Team.json +++ b/packages/server/marketplaces/agentflows/Portfolio Management Team.json @@ -1,7 +1,7 @@ { "description": "A team of portfolio manager, financial analyst, and risk manager working together to optimize an investment portfolio.", "framework": ["Langchain"], - "usecases": ["Finance & Accounting"], + "usecases": ["Finance & Accounting", "Hierarchical Agent Teams"], "nodes": [ { "id": "supervisor_0", diff --git a/packages/server/marketplaces/agentflows/Prompt Engineering Team.json b/packages/server/marketplaces/agentflows/Prompt Engineering Team.json index a5caa4163..659b6eff8 100644 --- a/packages/server/marketplaces/agentflows/Prompt Engineering Team.json +++ b/packages/server/marketplaces/agentflows/Prompt Engineering Team.json @@ -1,7 +1,7 @@ { "description": "Prompt engineering team working together to craft Worker Prompts for your AgentFlow.", "framework": ["Langchain"], - "usecases": ["Engineering"], + "usecases": ["Engineering", "Hierarchical Agent Teams"], "nodes": [ { "id": "supervisor_0", @@ -167,7 +167,7 @@ ], "inputs": { "workerName": " Prompt Creator", - "workerPrompt": "You are a Prompt Engineer. Your job is to craft system prompts for AI Agents based on user requests.\n\nHere is an example:\n\n1. User asks you to craft two AI Agent prompt messages for \"researching leads and creating personalized email drafts for the sales team\".\n\n2. You generate the following:\n\nAGENT 1\n\nName: \nLead Research\n\nSytyem Prompt: \nAs a member of the sales team at company, your mission is to explore the digital landscape for potential leads. Equipped with advanced tools and a strategic approach, you analyze data, trends, and interactions to discover opportunities that others might miss. Your efforts are vital in creating pathways for meaningful engagements and driving the company's growth.\nYour goal is to identify high-value leads that align with our ideal customer profile.\nPerform a thorough analysis of lead_company, a company that has recently shown interest in our solutions. Use all available data sources to create a detailed profile, concentrating on key decision-makers, recent business developments, and potential needs that match our offerings. This task is essential for effectively customizing our engagement strategy.\nAvoid making assumptions and only use information you are certain about.\nYou should produce a comprehensive report on lead_person, including company background, key personnel, recent milestones, and identified needs. Emphasize potential areas where our solutions can add value and suggest tailored engagement strategies. Pass the info to Lead Sales Representative.\n\nAGENT 2\n\nName: \nLead Sales Representative\n\nSystem Prompt: \nYou play a crucial role within company as the link between potential clients and the solutions they need. By crafting engaging, personalized messages, you not only inform leads about our company offerings but also make them feel valued and understood. Your role is essential in transforming interest into action, guiding leads from initial curiosity to committed engagement.\nYour goal is to nurture leads with tailored, compelling communications.\nLeveraging the insights from the lead profiling report on lead_company, create a personalized outreach campaign targeting lead_person, the position of lead_company. he campaign should highlight their recent lead_activity and demonstrate how our solutions can support their objectives. Your communication should align with lead_company's company culture and values, showcasing a thorough understanding of their business and needs. Avoid making assumptions and use only verified information.\nThe output should be a series of personalized email drafts customized for lead_company, specifically addressing lead_person. Each draft should present a compelling narrative that connects our solutions to their recent accomplishments and future goals. Ensure the tone is engaging, professional, and consistent with lead_company's corporate identity. Keep in natural, don't use strange and fancy words.\n\n3. IMPORTANT: Notice how the prompts in this example work together and are connected by \"Pass the info to Lead Sales Representative.\" The first prompt focuses on researching leads, while the second leverages that information to create personalized email drafts. This creates a cohesive workflow for the AI Agents.\n\n4. If the AI agent needs to use a tool to perform its task, it will indicate this on the system prompt, but you will not write any code for them (they already have the code for the tools they use).", + "workerPrompt": "You are a Prompt Engineer. Your job is to craft system prompts for AI Agents based on user requests.\n\nHere is an example:\n\n1. User asks you to craft two AI Agent prompt messages for \"researching leads and creating personalized email drafts for the sales team\".\n\n2. You generate the following:\n\nAGENT 1\n\nName: \nLead Research\n\nSystem Prompt: \nAs a member of the sales team at company, your mission is to explore the digital landscape for potential leads. Equipped with advanced tools and a strategic approach, you analyze data, trends, and interactions to discover opportunities that others might miss. Your efforts are vital in creating pathways for meaningful engagements and driving the company's growth.\nYour goal is to identify high-value leads that align with our ideal customer profile.\nPerform a thorough analysis of lead_company, a company that has recently shown interest in our solutions. Use all available data sources to create a detailed profile, concentrating on key decision-makers, recent business developments, and potential needs that match our offerings. This task is essential for effectively customizing our engagement strategy.\nAvoid making assumptions and only use information you are certain about.\nYou should produce a comprehensive report on lead_person, including company background, key personnel, recent milestones, and identified needs. Emphasize potential areas where our solutions can add value and suggest tailored engagement strategies. Pass the info to Lead Sales Representative.\n\nAGENT 2\n\nName: \nLead Sales Representative\n\nSystem Prompt: \nYou play a crucial role within company as the link between potential clients and the solutions they need. By crafting engaging, personalized messages, you not only inform leads about our company offerings but also make them feel valued and understood. Your role is essential in transforming interest into action, guiding leads from initial curiosity to committed engagement.\nYour goal is to nurture leads with tailored, compelling communications.\nLeveraging the insights from the lead profiling report on lead_company, create a personalized outreach campaign targeting lead_person, the position of lead_company. The campaign should highlight their recent lead_activity and demonstrate how our solutions can support their objectives. Your communication should align with lead_company's company culture and values, showcasing a thorough understanding of their business and needs. Avoid making assumptions and use only verified information.\nThe output should be a series of personalized email drafts customized for lead_company, specifically addressing lead_person. Each draft should present a compelling narrative that connects our solutions to their recent accomplishments and future goals. Ensure the tone is engaging, professional, and consistent with lead_company's corporate identity. Keep it natural, don't use strange and fancy words.\n\n3. IMPORTANT: Notice how the prompts in this example work together and are connected by \"Pass the info to Lead Sales Representative.\" The first prompt focuses on researching leads, while the second leverages that information to create personalized email drafts. This creates a cohesive workflow for the AI Agents.\n\n4. If the AI agent needs to use a tool to perform its task, it will indicate this on the system prompt, but you will not write any code for them (they already have the code for the tools they use).", "tools": "", "supervisor": "{{supervisor_0.data.instance}}", "model": "", diff --git a/packages/server/marketplaces/agentflows/Software Team.json b/packages/server/marketplaces/agentflows/Software Team.json index 518361c20..b61a63e44 100644 --- a/packages/server/marketplaces/agentflows/Software Team.json +++ b/packages/server/marketplaces/agentflows/Software Team.json @@ -1,7 +1,7 @@ { "description": "Software engineering team working together to build a feature, solve a problem, or complete a task.", "framework": ["Langchain"], - "usecases": ["Engineering"], + "usecases": ["Engineering", "Hierarchical Agent Teams"], "nodes": [ { "id": "supervisor_0", diff --git a/packages/server/marketplaces/agentflows/Text to SQL.json b/packages/server/marketplaces/agentflows/Text to SQL.json index 2c6ccb1a9..427e99814 100644 --- a/packages/server/marketplaces/agentflows/Text to SQL.json +++ b/packages/server/marketplaces/agentflows/Text to SQL.json @@ -1,7 +1,7 @@ { "description": "Text to SQL query process using team of 3 agents: SQL Expert, SQL Reviewer, and SQL Executor", "framework": ["Langchain"], - "usecases": ["SQL"], + "usecases": ["SQL", "Hierarchical Agent Teams"], "nodes": [ { "id": "supervisor_0", diff --git a/packages/server/marketplaces/agentflowsv2/Agentic RAG V2.json b/packages/server/marketplaces/agentflowsv2/Agentic RAG V2.json new file mode 100644 index 000000000..343307f12 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Agentic RAG V2.json @@ -0,0 +1,2159 @@ +{ + "description": "An agent based approach using AgentflowV2 to perform self-correcting question answering over documents", + "usecases": ["Reflective Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -261.54516755177303, + "y": 62.39402454297252 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": [ + { + "key": "query", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -261.54516755177303, + "y": 62.39402454297252 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": -114.84790789259606, + "y": 53.22583468442305 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Check if query valid", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "AI Related" + }, + { + "scenario": "General" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

Check if user is asking about AI related topic, or just general query

", + "conditionAgentInput": "

{{ question }}

", + "conditionAgentScenarios": [ + { + "scenario": "AI Related" + }, + { + "scenario": "General" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": -114.84790789259606, + "y": 53.22583468442305 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 158.29022963739308, + "y": -20.666608318859062 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Generate Query", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

Given the user question and history, construct a short string that can be used for searching vector database. Only generate the query, no meta comments, no explanation

Example:

Question: what are the events happening today?

Query: today's event

Example:

Question: how about the address?

Query: business address of the shop

Question: {{ question }}

Query:

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": [ + { + "key": "query", + "value": "

{{ output }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 158.29022963739308, + "y": -20.666608318859062 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 165.82871786911647, + "y": 92.15131805222342 + }, + "data": { + "id": "llmAgentflow_1", + "label": "General Answer", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 165.82871786911647, + "y": 92.15131805222342 + }, + "dragging": false + }, + { + "id": "retrieverAgentflow_0", + "position": { + "x": 396.87575963946966, + "y": -17.41189617164227 + }, + "data": { + "id": "retrieverAgentflow_0", + "label": "Retriever Vector DB", + "version": 1, + "name": "retrieverAgentflow", + "type": "Retriever", + "color": "#b8bedd", + "baseClasses": ["Retriever"], + "category": "Agent Flows", + "description": "Retrieve information from vector database", + "inputParams": [ + { + "label": "Knowledge (Document Stores)", + "name": "retrieverKnowledgeDocumentStores", + "type": "array", + "description": "Document stores to retrieve information from. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + } + ], + "id": "retrieverAgentflow_0-input-retrieverKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Retriever Query", + "name": "retrieverQuery", + "type": "string", + "placeholder": "Enter your query here", + "rows": 4, + "acceptVariable": true, + "id": "retrieverAgentflow_0-input-retrieverQuery-string", + "display": true + }, + { + "label": "Output Format", + "name": "outputFormat", + "type": "options", + "options": [ + { + "label": "Text", + "name": "text" + }, + { + "label": "Text with Metadata", + "name": "textWithMetadata" + } + ], + "default": "text", + "id": "retrieverAgentflow_0-input-outputFormat-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "retrieverUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "retrieverAgentflow_0-input-retrieverUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "retrieverKnowledgeDocumentStores": [ + { + "documentStore": "570df92b-087b-4d3b-9462-7a11283454a5:ai paper" + } + ], + "retrieverQuery": "

{{ $flow.state.query }}

", + "outputFormat": "text", + "retrieverUpdateState": "" + }, + "outputAnchors": [ + { + "id": "retrieverAgentflow_0-output-retrieverAgentflow", + "label": "Retriever", + "name": "retrieverAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 396.87575963946966, + "y": -17.41189617164227 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_1", + "position": { + "x": 647.9586712853835, + "y": -24.93225611691784 + }, + "data": { + "id": "conditionAgentAgentflow_1", + "label": "Check if docs relevant", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_1-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_1-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_1-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "Relevant" + }, + { + "scenario": "Irrelevant" + } + ], + "id": "conditionAgentAgentflow_1-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

Determine if the document is relevant to user question. User question is {{ question }}

", + "conditionAgentInput": "

{{ retrieverAgentflow_0 }}

", + "conditionAgentScenarios": [ + { + "scenario": "Relevant" + }, + { + "scenario": "Irrelevant" + } + ], + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_1-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_1-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 206, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 647.9586712853835, + "y": -24.93225611691784 + }, + "dragging": false + }, + { + "id": "llmAgentflow_2", + "position": { + "x": 920.5416793343077, + "y": -75.82606372993476 + }, + "data": { + "id": "llmAgentflow_2", + "label": "Generate Response", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_2-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_2-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_2-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_2-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_2-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_2-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_2-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_2-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_2-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Given the question: {{ question }}

And the findings: {{ retrieverAgentflow_0 }}

Output the final response

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_2-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 190, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 920.5416793343077, + "y": -75.82606372993476 + }, + "dragging": false + }, + { + "id": "llmAgentflow_3", + "position": { + "x": 921.1014768144131, + "y": 26.898902739007895 + }, + "data": { + "id": "llmAgentflow_3", + "label": "Regenerate Question", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_3-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_3-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_3-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_3-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_3-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_3-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_3-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_3-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_3-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_3-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a helpful assistant that can transform the query to produce a better question.

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Look at the input and try to reason about the underlying semantic intent / meaning.

Here is the initial question:

{{ $flow.state.query }}

Formulate an improved question:

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": [ + { + "key": "query", + "value": "

{{ output }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_3-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 921.1014768144131, + "y": 26.898902739007895 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 1160.0553838519766, + "y": 30.06685001229809 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop back to Retriever", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "retrieverAgentflow_0-Retriever Vector DB", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 208, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 1160.0553838519766, + "y": 30.06685001229809 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 145.5705985486235, + "y": -116.29641765720946 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "First update of the state.query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 81, + "selected": false, + "positionAbsolute": { + "x": 145.5705985486235, + "y": -116.29641765720946 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": 923.4413972289242, + "y": 110.04672879978278 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Second update of state.query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 81, + "selected": false, + "positionAbsolute": { + "x": 923.4413972289242, + "y": 110.04672879978278 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "retrieverAgentflow_0", + "targetHandle": "retrieverAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#b8bedd", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-retrieverAgentflow_0-retrieverAgentflow_0" + }, + { + "source": "retrieverAgentflow_0", + "sourceHandle": "retrieverAgentflow_0-output-retrieverAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#b8bedd", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "retrieverAgentflow_0-retrieverAgentflow_0-output-retrieverAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "llmAgentflow_3", + "sourceHandle": "llmAgentflow_3-output-llmAgentflow", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_3-llmAgentflow_3-output-llmAgentflow-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-1", + "target": "llmAgentflow_3", + "targetHandle": "llmAgentflow_3", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-1-llmAgentflow_3-llmAgentflow_3" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-0", + "target": "llmAgentflow_2", + "targetHandle": "llmAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-0-llmAgentflow_2-llmAgentflow_2" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Agents Handoff.json b/packages/server/marketplaces/agentflowsv2/Agents Handoff.json new file mode 100644 index 000000000..72b4da969 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Agents Handoff.json @@ -0,0 +1,1474 @@ +{ + "description": "A customer support agent that can handoff tasks to different agents based on scenarios", + "usecases": ["Customer Support"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -162.58207424380598, + "y": 117.81335679543406 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -162.58207424380598, + "y": 117.81335679543406 + }, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": -11.580228601760105, + "y": 99.42548336780041 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Detect User Intention", + "version": 1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": ["ConditionAgent"], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "User is asking for refund" + }, + { + "scenario": "User is looking for item" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "chatOpenAI", + "conditionAgentInstructions": "

You are a customer support agent for ACME Inc.

Follow the following routine with the user:

1. First, greet the user and see how you can help the user

2. If user is looking for items, handoff to the Sales Agent

3. If user is looking for refund, handoff to Refund Agent

4. If user is asking general query, be helpful and answer the query

Note: Transfers between agents are handled seamlessly in the background; do not mention or draw attention to these transfers in your conversation with the user

", + "conditionAgentInput": "

{{ question }}

", + "conditionAgentScenarios": [ + { + "scenario": "User is asking for refund" + }, + { + "scenario": "User is looking for item" + }, + { + "scenario": "User is chatting casually or asking general question" + } + ], + "conditionAgentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": true, + "reasoningEffort": "medium", + "conditionAgentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Condition 2" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 200, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": -11.580228601760105, + "y": 99.42548336780041 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 253.4811075082052, + "y": 17.0330403645183 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Refund Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a refund agent. Help the user with refunds.

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 191, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 253.4811075082052, + "y": 17.0330403645183 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 253.74384888466125, + "y": 113.94007038630222 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Sales Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatAnthropic", + "agentMessages": [ + { + "role": "system", + "content": "

You are a sales assistant. Help user search for the product.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "claude-3-7-sonnet-latest", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "agentModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 231, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": 253.74384888466125, + "y": 113.94007038630222 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": 250.2139715995238, + "y": 234.20808458654034 + }, + "data": { + "id": "agentAgentflow_2", + "label": "General Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "groqChat", + "agentMessages": [ + { + "role": "system", + "content": "

You are helpful assistant

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "llama-3.2-3b-preview", + "temperature": 0.9, + "streaming": true, + "agentModel": "groqChat" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 214, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 250.2139715995238, + "y": 234.20808458654034 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 246.81594867785896, + "y": -103.07943752447065 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "We can improve this by adding necessary tools for agents" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": 246.81594867785896, + "y": -103.07943752447065 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-2", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-2-agentAgentflow_2-agentAgentflow_2" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Deep Research V2.json b/packages/server/marketplaces/agentflowsv2/Deep Research V2.json new file mode 100644 index 000000000..bd58656b8 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Deep Research V2.json @@ -0,0 +1,2142 @@ +{ + "description": "An agent capable of performing research, synthesizing information, and generating in-depth, well-structured white papers on any given topic", + "usecases": ["Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -275.0799323960054, + "y": 31.301887150099603 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "startEphemeralMemory": true, + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -275.0799323960054, + "y": 31.301887150099603 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -59.13383952997965, + "y": 28.495983624910906 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Topic Enhancer", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": false + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": false + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "developer", + "content": "

Your only role is to improve the user query for more clarity. Do not add any meta comments.

" + } + ], + "llmEnableMemory": false, + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": "0.5", + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 175, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -59.13383952997965, + "y": 28.495983624910906 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 209.99147630894493, + "y": 100.7933285478893 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Agent 0", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are Agent 0. Your goal is to explore any topic provided by the user in depth with Agent 1.

  1. Start: Introduce the topic to Agent 1. Share your initial thoughts and any assumptions you have.

  2. Research & Share:

    • Use BraveSearch API to find a range of information and different viewpoints on the topic. Look for URLs that seem promising for more detail.

    • If a URL from BraveSearch API (or one you already know) seems particularly important, use the Web Scraper Tool to get its full content.

    • Present what you find to Agent 1, especially any complexities, counter-arguments, or conflicting data.

    • Clearly state your sources:

      • \"BraveSearch API found...\"

      • \"After scraping [URL], the content shows...\"

  3. Discuss & Deepen:

    • Listen to Agent 1. Ask probing questions.

    • If needed, use your tools again (BraveSearch API to find more, Web Scraper to analyze a specific page) during the conversation to verify points or explore new angles.

  4. Mindset: Be curious, analytical, and open to different perspectives. Aim for a thorough understanding, not just agreement.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "braveSearchAPI", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "agentSelectedTool": "braveSearchAPI" + } + }, + { + "agentSelectedTool": "webScraperTool", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "scrapeMode": "recursive", + "maxDepth": 1, + "maxPages": 10, + "timeoutS": 60, + "description": "", + "agentSelectedTool": "webScraperTool" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "contextCache": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": "0.5", + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": 209.99147630894493, + "y": 100.7933285478893 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 203.50865583557328, + "y": -75.13070214403373 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Agent 1", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatGoogleGenerativeAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are Agent 1. Your goal is to explore a topic in depth with Agent 0.

  1. Respond & Share:

    • Acknowledge the topic Agent 0 introduces.

    • Share your own thoughts and feelings, building on or respectfully challenging Agent 0's points. Consider your own assumptions.

  2. Research & Contribute:

    • Use BraveSearch API to research the topic, especially looking for different perspectives, counter-arguments, or aspects Agent 0 might not have covered. Identify URLs that seem promising for more detail.

    • If a URL from BraveSearch API (or one you already know) seems particularly important for your point or for adding nuance, use the Web Scraper Tool to get its full content.

    • Present your findings, especially any that introduce new angles, conflicts, or alternative views.

    • Clearly state your sources:

      • \"My BraveSearch API tool found...\"

      • \"After scraping [URL], the content suggests...\"

    • If you find conflicting info from different sources, point this out.

  3. Discuss & Deepen:

    • Listen carefully to Agent 0. Ask clarifying questions and questions that challenge their reasoning or explore alternatives.

    • If needed, use your tools again (BraveSearch API to find more, Web Scraper to analyze a specific page) during the conversation to support your points or investigate Agent 0's claims.

  4. Mindset: Be respectful, analytical, and open to different viewpoints. Aim for a thorough exploration and constructive disagreement, backed by research.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "braveSearchAPI", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "agentSelectedTool": "braveSearchAPI" + } + }, + { + "agentSelectedTool": "webScraperTool", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "scrapeMode": "recursive", + "maxDepth": 1, + "maxPages": 10, + "timeoutS": 60, + "description": "", + "agentSelectedTool": "webScraperTool" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "contextCache": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": "0.5", + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "agentModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": 203.50865583557328, + "y": -75.13070214403373 + }, + "dragging": false + }, + { + "id": "conditionAgentflow_0", + "position": { + "x": 497.07879661792845, + "y": 29.068421396935392 + }, + "data": { + "id": "conditionAgentflow_0", + "label": "Condition", + "version": 1, + "name": "conditionAgentflow", + "type": "Condition", + "color": "#FFB938", + "baseClasses": ["Condition"], + "category": "Agent Flows", + "description": "Split flows based on If Else conditions", + "inputParams": [ + { + "label": "Conditions", + "name": "conditions", + "type": "array", + "description": "Values to compare", + "acceptVariable": true, + "default": [ + { + "type": "number", + "value1": "", + "operation": "equal", + "value2": "" + } + ], + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + } + ], + "default": "string" + }, + { + "label": "Value 1", + "name": "value1", + "type": "string", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Contains", + "name": "contains" + }, + { + "label": "Ends With", + "name": "endsWith" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Contains", + "name": "notContains" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Regex", + "name": "regex" + }, + { + "label": "Starts With", + "name": "startsWith" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "string", + "default": "", + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + }, + "hide": { + "conditions[$index].operation": ["isEmpty", "notEmpty"] + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "number", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Smaller", + "name": "smaller" + }, + { + "label": "Smaller Equal", + "name": "smallerEqual" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Larger", + "name": "larger" + }, + { + "label": "Larger Equal", + "name": "largerEqual" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "number", + "default": 0, + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "boolean", + "default": false, + "description": "First value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "boolean", + "default": false, + "description": "Second value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + } + ], + "id": "conditionAgentflow_0-input-conditions-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditions": [ + { + "type": "number", + "value1": "

{{ runtime_messages_length }}

", + "operation": "smallerEqual", + "value2": "

11

" + } + ] + }, + "outputAnchors": [ + { + "id": "conditionAgentflow_0-output-0", + "label": "Condition", + "name": "conditionAgentflow" + }, + { + "id": "conditionAgentflow_0-output-1", + "label": "Condition", + "name": "conditionAgentflow" + } + ], + "outputs": { + "conditionAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 134, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 497.07879661792845, + "y": 29.068421396935392 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 710.6354115635097, + "y": -61.015932400168076 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "agentAgentflow_0-Agent 0", + "maxLoopCount": "10" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 104, + "height": 65, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 710.6354115635097, + "y": -61.015932400168076 + } + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 693.0529196789191, + "y": 133.0683091126315 + }, + "data": { + "id": "llmAgentflow_1", + "label": "Agent 2", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": false + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": false + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatGoogleGenerativeAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are Agent 2. Your role is to transform the deep conversation between Agent 0 and Agent 1 into a comprehensive and extensive white paper on the subject they discussed.

Your goal is to produce an authoritative document that not only captures the essence of their dialogue but also expands upon it, providing a thorough exploration of the topic. This white paper should be suitable for an audience seeking a deep understanding of the subject.

The white paper must include, but is not limited to, the following sections and considerations:

  1. Title: A clear, compelling title for the white paper that reflects the core subject.

  2. Abstract/Executive Summary: A concise overview (approx. 200-300 words) of the white paper's main arguments, scope, and conclusions, derived from the conversation.

  3. Introduction:

    • Set the context and importance of the subject discussed by Agent 0 and Agent 1.

    • Clearly define the central problem, question, or theme that the white paper will address, based on their dialogue.

    • Outline the paper's structure and objectives.

  4. Main Body / Thematic Analysis (Multiple Sections):

    • Deconstruct and Synthesize Key Arguments: Detail the principal arguments, propositions, and evidence presented by both Agent 0 and Agent 1. Go beyond mere listing; analyze the strengths, weaknesses, and underlying assumptions of their positions.

    • Explore Core Themes and Concepts: Identify and elaborate on the major themes and concepts that emerged. For each theme, discuss how Agent 0 and Agent 1 approached it, their points of convergence, and their points of divergence.

    • Analyze the Evolution of the Discussion: Trace how the understanding of the subject evolved throughout their conversation. Highlight any shifts in perspective, critical turning points, challenged assumptions, or moments of significant clarification.

    • Evidence and Examples: Where the agents provided examples or evidence, incorporate and potentially expand upon these to support the white paper's analysis.

  5. Synthesis of Insights and Key Conclusions:

    • Draw together the most significant insights and conclusions that can be derived from the entirety of the conversation.

    • This section should offer a consolidated understanding of the subject, informed by the agents' interaction.

  6. Implications and Future Directions:

    • Discuss the broader implications of the insights and conclusions reached.

    • Identify any unresolved questions, ambiguities, or areas that the conversation indicated require further exploration or research.

    • Suggest potential next steps or future avenues of inquiry.

  7. Conclusion: A strong concluding section summarizing the white paper's main findings, their significance, and a final thought on the subject.

Style and Tone:

  • Extensive and In-depth: The paper should be thorough and detailed.

  • Well-Structured: Use clear headings, subheadings, and logical flow.

  • Analytical and Critical: Do not just report; analyze, interpret, and critically engage with the agents' ideas.

  • Objective and Authoritative: While based on the agents' dialogue, the white paper should present a balanced and well-reasoned perspective.

  • Clear Attribution: When discussing specific viewpoints or arguments, clearly attribute them to Agent 0 or Agent 1.

  • Formal and Professional Language: Maintain a tone appropriate for a white paper.

Your primary source material is the conversation between Agent 0 and Agent 1. Your task is to elevate their discourse into a structured, analytical, and extensive white paper.

" + }, + { + "role": "user", + "content": "

Here is the full conversation between Agent 0 and Agent 1. Please use this as the primary source material for generating the extensive white paper as per your instructions:
--
{{ chat_history }}
--

" + } + ], + "llmEnableMemory": false, + "llmReturnResponseAs": "assistantMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "contextCache": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": "0.5", + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "baseUrl": "", + "allowImageUploads": "", + "llmModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 199, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 693.0529196789191, + "y": 133.0683091126315 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": -320.62033146052283, + "y": -110.15285265313359 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "User provides a topic for research, for example: \"Humans in the Era of an ASI\"" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 203, + "height": 122, + "selected": false, + "positionAbsolute": { + "x": -320.62033146052283, + "y": -110.15285265313359 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": 466.8306744858025, + "y": -189.9009582021492 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Determine the number of back-and-forth exchanges between Agent 0 and Agent 1 in a deep conversation about the user's topic. It is currently set for 5 iterations." + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 203, + "height": 202, + "selected": false, + "positionAbsolute": { + "x": 466.8306744858025, + "y": -189.9009582021492 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_2", + "position": { + "x": 693.7511120802441, + "y": 221.75098356027857 + }, + "data": { + "id": "stickyNoteAgentflow_2", + "label": "Sticky Note (1) (2)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_2-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "This LLM Node transforms the in-depth conversation between Agent 0 and Agent 1 into a comprehensive white paper. It can be replaced with an Agent Node if you need to use tools such as sending the findings to our email, etc." + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_2-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 203, + "height": 283, + "selected": false, + "positionAbsolute": { + "x": 693.7511120802441, + "y": 221.75098356027857 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "conditionAgentflow_0", + "targetHandle": "conditionAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFB938", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-conditionAgentflow_0-conditionAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-0", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#FFA07A", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-0-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-1", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-1-llmAgentflow_1-llmAgentflow_1" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Email Reply HITL Agent.json b/packages/server/marketplaces/agentflowsv2/Email Reply HITL Agent.json new file mode 100644 index 000000000..0174b1a90 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Email Reply HITL Agent.json @@ -0,0 +1,847 @@ +{ + "description": "An email reply HITL (human in the loop) agent that can proceed or refine the email with user input", + "usecases": ["Human In Loop"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -212.0817769699585, + "y": 95.2304753249555 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -212.0817769699585, + "y": 95.2304753249555 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": -62.25, + "y": 76 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Email Reply Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a customer support agent working in Flowise Inc. Write a professional email reply to user's query. Use the web search tools to get more details about the prospect.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + }, + { + "agentSelectedTool": "currentDateTime", + "agentSelectedToolConfig": { + "agentSelectedTool": "currentDateTime" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 182, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": -62.25, + "y": 76 + }, + "dragging": false + }, + { + "id": "humanInputAgentflow_0", + "position": { + "x": 156.05666363734434, + "y": 86.62266545493773 + }, + "data": { + "id": "humanInputAgentflow_0", + "label": "Human Input 0", + "version": 1, + "name": "humanInputAgentflow", + "type": "HumanInput", + "color": "#6E6EFD", + "baseClasses": ["HumanInput"], + "category": "Agent Flows", + "description": "Request human input, approval or rejection during execution", + "inputParams": [ + { + "label": "Description Type", + "name": "humanInputDescriptionType", + "type": "options", + "options": [ + { + "label": "Fixed", + "name": "fixed", + "description": "Specify a fixed description" + }, + { + "label": "Dynamic", + "name": "dynamic", + "description": "Use LLM to generate a description" + } + ], + "id": "humanInputAgentflow_0-input-humanInputDescriptionType-options", + "display": true + }, + { + "label": "Description", + "name": "humanInputDescription", + "type": "string", + "placeholder": "Are you sure you want to proceed?", + "acceptVariable": true, + "rows": 4, + "show": { + "humanInputDescriptionType": "fixed" + }, + "id": "humanInputAgentflow_0-input-humanInputDescription-string", + "display": true + }, + { + "label": "Model", + "name": "humanInputModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "show": { + "humanInputDescriptionType": "dynamic" + }, + "id": "humanInputAgentflow_0-input-humanInputModel-asyncOptions", + "display": false + }, + { + "label": "Prompt", + "name": "humanInputModelPrompt", + "type": "string", + "default": "

Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.

\n
    \n
  • Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed.
  • \n
  • Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced.
  • \n
  • Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message
  • \n
\n

Output Format The output should be structured in three parts in text:

\n
    \n
  • A summary of the conversation (1-3 sentences).
  • \n
  • The last assistant message (exactly as it appeared).
  • \n
  • Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed.
  • \n
\n", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4, + "show": { + "humanInputDescriptionType": "dynamic" + }, + "id": "humanInputAgentflow_0-input-humanInputModelPrompt-string", + "display": false + }, + { + "label": "Enable Feedback", + "name": "humanInputEnableFeedback", + "type": "boolean", + "default": true, + "id": "humanInputAgentflow_0-input-humanInputEnableFeedback-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "humanInputDescriptionType": "fixed", + "humanInputEnableFeedback": true, + "humanInputModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "humanInputModel": "chatOpenAI" + }, + "humanInputDescription": "

Are you sure you want to proceed?

" + }, + "outputAnchors": [ + { + "id": "humanInputAgentflow_0-output-0", + "label": "Human Input", + "name": "humanInputAgentflow" + }, + { + "id": "humanInputAgentflow_0-output-1", + "label": "Human Input", + "name": "humanInputAgentflow" + } + ], + "outputs": { + "humanInputAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 161, + "height": 80, + "selected": false, + "positionAbsolute": { + "x": 156.05666363734434, + "y": 86.62266545493773 + }, + "dragging": false + }, + { + "id": "directReplyAgentflow_0", + "position": { + "x": 363.0101864947954, + "y": 35.15053748988734 + }, + "data": { + "id": "directReplyAgentflow_0", + "label": "Direct Reply 0", + "version": 1, + "name": "directReplyAgentflow", + "type": "DirectReply", + "color": "#4DDBBB", + "hideOutput": true, + "baseClasses": ["DirectReply"], + "category": "Agent Flows", + "description": "Directly reply to the user with a message", + "inputParams": [ + { + "label": "Message", + "name": "directReplyMessage", + "type": "string", + "rows": 4, + "acceptVariable": true, + "id": "directReplyAgentflow_0-input-directReplyMessage-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "directReplyMessage": "

{{ agentAgentflow_0 }}

" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 155, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 363.0101864947954, + "y": 35.15053748988734 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 366.5975521223236, + "y": 130.12266545493773 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop 0", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "agentAgentflow_0-Email Reply Agent", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 113, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 366.5975521223236, + "y": 130.12266545493773 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "humanInputAgentflow_0", + "targetHandle": "humanInputAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#6E6EFD", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-humanInputAgentflow_0-humanInputAgentflow_0" + }, + { + "source": "humanInputAgentflow_0", + "sourceHandle": "humanInputAgentflow_0-output-0", + "target": "directReplyAgentflow_0", + "targetHandle": "directReplyAgentflow_0", + "data": { + "sourceColor": "#6E6EFD", + "targetColor": "#4DDBBB", + "edgeLabel": "proceed", + "isHumanInput": true + }, + "type": "agentFlow", + "id": "humanInputAgentflow_0-humanInputAgentflow_0-output-0-directReplyAgentflow_0-directReplyAgentflow_0" + }, + { + "source": "humanInputAgentflow_0", + "sourceHandle": "humanInputAgentflow_0-output-1", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#6E6EFD", + "targetColor": "#FFA07A", + "edgeLabel": "reject", + "isHumanInput": true + }, + "type": "agentFlow", + "id": "humanInputAgentflow_0-humanInputAgentflow_0-output-1-loopAgentflow_0-loopAgentflow_0" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Financial Research Agent.json b/packages/server/marketplaces/agentflowsv2/Financial Research Agent.json new file mode 100644 index 000000000..f72312e75 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Financial Research Agent.json @@ -0,0 +1,1503 @@ +{ + "description": "A financial research agent that takes in a query, plan the steps, search the web, and return a detailed report", + "usecases": ["Finance & Accounting"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -234.94624728418063, + "y": 84.92919739582129 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": true + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": true + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": true + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "formInput", + "formTitle": "Finanical Research", + "formDescription": "A financial research agent that takes in a query, and return a detailed report", + "formInputTypes": [ + { + "type": "string", + "label": "Query", + "name": "query", + "addOptions": "" + } + ], + "startState": [ + { + "key": "search_key_reason", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -234.94624728418063, + "y": 84.92919739582129 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -92.42002168895628, + "y": 81.69973969492588 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Planner", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a financial research planner. Given a request for financial analysis, produce a set of web searches to gather the context needed. Aim for recent headlines, earnings calls or 10‑K snippets, analyst commentary, and industry background. Output between 1 and 2 search terms to query for.

" + }, + { + "role": "user", + "content": "

Query:

{{ $form.query }}

" + } + ], + "llmEnableMemory": true, + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "searches", + "type": "jsonArray", + "enumValues": "", + "jsonSchema": "{\n \"query\": {\n \"type\": \"string\",\n \"description\": \"The search term to feed into a web (or file) search.\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Your reasoning for why this search is relevant.\"\n }\n}", + "description": "A list of searches to perform" + } + ], + "llmUpdateState": [ + { + "key": "search_key_reason", + "value": "

{{ output.searches }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + }, + "llmUserMessage": "

" + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -92.42002168895628, + "y": 81.69973969492588 + }, + "dragging": false + }, + { + "id": "iterationAgentflow_0", + "position": { + "x": 122.70987564816664, + "y": -7.337791594648152 + }, + "data": { + "id": "iterationAgentflow_0", + "label": "Iteration 0", + "version": 1, + "name": "iterationAgentflow", + "type": "Iteration", + "color": "#9C89B8", + "baseClasses": ["Iteration"], + "category": "Agent Flows", + "description": "Execute the nodes within the iteration block through N iterations", + "inputParams": [ + { + "label": "Array Input", + "name": "iterationInput", + "type": "string", + "description": "The input array to iterate over", + "acceptVariable": true, + "rows": 4, + "id": "iterationAgentflow_0-input-iterationInput-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "iterationInput": "

{{ $flow.state.search_key_reason }}

" + }, + "outputAnchors": [ + { + "id": "iterationAgentflow_0-output-iterationAgentflow", + "label": "Iteration", + "name": "iterationAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "iteration", + "width": 300, + "height": 250, + "selected": false, + "positionAbsolute": { + "x": 122.70987564816664, + "y": -7.337791594648152 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 67.5, + "y": 80.5 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Search Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a research assistant specializing in financial topics. Given a search term, use web search to retrieve up‑to‑date context and produce a short summary of at most 300 words. Focus on key numbers, events, or quotes that will be useful to a financial analyst.

" + } + ], + "agentTools": [ + { + "agentSelectedTool": "googleCustomSearch", + "agentSelectedToolConfig": { + "agentSelectedTool": "googleCustomSearch" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

Search term: {{$iteration.query}}

Reason: {{$iteration.reason}}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 168, + "height": 103, + "selected": false, + "positionAbsolute": { + "x": 190.20987564816664, + "y": 73.16220840535185 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 461.76351005035474, + "y": 81.71183989476083 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Writer Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a senior financial analyst. You will be provided with the original query and a set of raw search summaries. Your task is to synthesize these into a long‑form markdown report (at least several paragraphs) including a short executive summary and follow‑up questions

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

Original query: {{ $form.query }}

Summarized search results: {{ iterationAgentflow_0 }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 461.76351005035474, + "y": 81.71183989476083 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": 214.77714507955716, + "y": -165.2444952661696 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Search Agent will iterate through the search terms and search the web using tool" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 142, + "selected": false, + "positionAbsolute": { + "x": 214.77714507955716, + "y": -165.2444952661696 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": -100.05436009717414, + "y": -45.56902388417101 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note (1)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Planner will generate list of search terms to query for" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": -100.05436009717414, + "y": -45.56902388417101 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_2", + "position": { + "x": 457.98399139175314, + "y": -35.19227767879839 + }, + "data": { + "id": "stickyNoteAgentflow_2", + "label": "Sticky Note (2)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": ["StickyNote"], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_2-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "Generate the final report from the search results" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_2-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 189, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": 457.98399139175314, + "y": -35.19227767879839 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "iterationAgentflow_0", + "targetHandle": "iterationAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#9C89B8", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-iterationAgentflow_0-iterationAgentflow_0" + }, + { + "source": "iterationAgentflow_0", + "sourceHandle": "iterationAgentflow_0-output-iterationAgentflow", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#9C89B8", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "iterationAgentflow_0-iterationAgentflow_0-output-iterationAgentflow-agentAgentflow_1-agentAgentflow_1" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Iterations.json b/packages/server/marketplaces/agentflowsv2/Iterations.json new file mode 100644 index 000000000..b33dd1a54 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Iterations.json @@ -0,0 +1,1278 @@ +{ + "description": "An agent that can iterate over a list of items and perform actions on each item", + "usecases": ["Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -157.7434917749852, + "y": 100.77695246750446 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -157.7434917749852, + "y": 100.77695246750446 + }, + "dragging": false + }, + { + "id": "iterationAgentflow_0", + "position": { + "x": -13.75, + "y": 8.5 + }, + "data": { + "id": "iterationAgentflow_0", + "label": "Iteration 0", + "version": 1, + "name": "iterationAgentflow", + "type": "Iteration", + "color": "#9C89B8", + "baseClasses": ["Iteration"], + "category": "Agent Flows", + "description": "Execute the nodes within the iteration block through N iterations", + "inputParams": [ + { + "label": "Array Input", + "name": "iterationInput", + "type": "string", + "description": "The input array to iterate over", + "acceptVariable": true, + "rows": 4, + "id": "iterationAgentflow_0-input-iterationInput-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "iterationInput": "

[{\"item\": \"abc\"}, {\"item\": \"def\"}]

" + }, + "outputAnchors": [ + { + "id": "iterationAgentflow_0-output-iterationAgentflow", + "label": "Iteration", + "name": "iterationAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "iteration", + "width": 481, + "height": 250, + "selected": false, + "positionAbsolute": { + "x": -13.75, + "y": 8.5 + }, + "dragging": false, + "style": { + "width": 481, + "height": 250 + }, + "resizing": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": 56, + "y": 92 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Gemini Agent", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatGoogleGenerativeAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Reply only:

{{$iteration.item}}

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gemini-2.0-flash", + "customModelName": "", + "temperature": 0.9, + "streaming": true, + "maxOutputTokens": "", + "topP": "", + "topK": "", + "harmCategory": "", + "harmBlockThreshold": "", + "allowImageUploads": "", + "llmModel": "chatGoogleGenerativeAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 191, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 42.25, + "y": 100.5 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 287.9621736478904, + "y": 92.25785828325522 + }, + "data": { + "id": "llmAgentflow_1", + "label": "Ollama Agent", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOllama", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Reply only:

{{$iteration.item}}

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "baseUrl": "http://localhost:11434", + "modelName": "llama3.2", + "temperature": 0.9, + "allowImageUploads": "", + "streaming": true, + "jsonMode": "", + "keepAlive": "5m", + "topP": "", + "topK": "", + "mirostat": "", + "mirostatEta": "", + "mirostatTau": "", + "numCtx": "", + "numGpu": "", + "numThread": "", + "repeatLastN": "", + "repeatPenalty": "", + "stop": "", + "tfsZ": "", + "llmModel": "chatOllama" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "parentNode": "iterationAgentflow_0", + "extent": "parent", + "width": 154, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 274.2121736478904, + "y": 100.75785828325522 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 509.27738295829977, + "y": 97.28505776122253 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Agent", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatOpenAI", + "agentMessages": "", + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 509.27738295829977, + "y": 97.28505776122253 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "iterationAgentflow_0", + "targetHandle": "iterationAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#9C89B8", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-iterationAgentflow_0-iterationAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "zIndex": 9999, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "iterationAgentflow_0", + "sourceHandle": "iterationAgentflow_0-output-iterationAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#9C89B8", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "iterationAgentflow_0-iterationAgentflow_0-output-iterationAgentflow-agentAgentflow_0-agentAgentflow_0" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Slack Agent.json b/packages/server/marketplaces/agentflowsv2/Slack Agent.json new file mode 100644 index 000000000..cd30db646 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Slack Agent.json @@ -0,0 +1,718 @@ +{ + "description": "An agent that can post message to Slack channel", + "usecases": ["Agent"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -192.5, + "y": 68 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -192.5, + "y": 68 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -31.25, + "y": 64.5 + }, + "data": { + "id": "llmAgentflow_0", + "label": "General Agent", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": "", + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -31.25, + "y": 64.5 + }, + "dragging": false + }, + { + "id": "toolAgentflow_0", + "position": { + "x": 182.75, + "y": 64.5 + }, + "data": { + "id": "toolAgentflow_0", + "label": "Slack Reply", + "version": 1, + "name": "toolAgentflow", + "type": "Tool", + "color": "#d4a373", + "baseClasses": ["Tool"], + "category": "Agent Flows", + "description": "Tools allow LLM to interact with external systems", + "inputParams": [ + { + "label": "Tool", + "name": "selectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true, + "id": "toolAgentflow_0-input-selectedTool-asyncOptions", + "display": true + }, + { + "label": "Tool Input Arguments", + "name": "toolInputArgs", + "type": "array", + "acceptVariable": true, + "refresh": true, + "array": [ + { + "label": "Input Argument Name", + "name": "inputArgName", + "type": "asyncOptions", + "loadMethod": "listToolInputArgs", + "refresh": true + }, + { + "label": "Input Argument Value", + "name": "inputArgValue", + "type": "string", + "acceptVariable": true + } + ], + "show": { + "selectedTool": ".+" + }, + "id": "toolAgentflow_0-input-toolInputArgs-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "toolUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "toolAgentflow_0-input-toolUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "selectedTool": "slackMCP", + "toolInputArgs": [ + { + "inputArgName": "channel_id", + "inputArgValue": "

ABCDEFG

" + }, + { + "inputArgName": "text", + "inputArgValue": "

{{ llmAgentflow_0 }}

" + } + ], + "toolUpdateState": "", + "selectedToolConfig": { + "mcpActions": "[\"slack_post_message\"]", + "selectedTool": "slackMCP" + } + }, + "outputAnchors": [ + { + "id": "toolAgentflow_0-output-toolAgentflow", + "label": "Tool", + "name": "toolAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 142, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 182.75, + "y": 64.5 + }, + "dragging": false + }, + { + "id": "directReplyAgentflow_0", + "position": { + "x": 366.75, + "y": 67.5 + }, + "data": { + "id": "directReplyAgentflow_0", + "label": "Direct Reply To Chat", + "version": 1, + "name": "directReplyAgentflow", + "type": "DirectReply", + "color": "#4DDBBB", + "hideOutput": true, + "baseClasses": ["DirectReply"], + "category": "Agent Flows", + "description": "Directly reply to the user with a message", + "inputParams": [ + { + "label": "Message", + "name": "directReplyMessage", + "type": "string", + "rows": 4, + "acceptVariable": true, + "id": "directReplyAgentflow_0-input-directReplyMessage-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "directReplyMessage": "

{{ llmAgentflow_0 }}

" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 194, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": 366.75, + "y": 67.5 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "toolAgentflow_0", + "targetHandle": "toolAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#d4a373", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-toolAgentflow_0-toolAgentflow_0" + }, + { + "source": "toolAgentflow_0", + "sourceHandle": "toolAgentflow_0-output-toolAgentflow", + "target": "directReplyAgentflow_0", + "targetHandle": "directReplyAgentflow_0", + "data": { + "sourceColor": "#d4a373", + "targetColor": "#4DDBBB", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "toolAgentflow_0-toolAgentflow_0-output-toolAgentflow-directReplyAgentflow_0-directReplyAgentflow_0" + } + ] +} diff --git a/packages/server/marketplaces/agentflowsv2/Supervisor Worker.json b/packages/server/marketplaces/agentflowsv2/Supervisor Worker.json new file mode 100644 index 000000000..dbf60b335 --- /dev/null +++ b/packages/server/marketplaces/agentflowsv2/Supervisor Worker.json @@ -0,0 +1,2080 @@ +{ + "description": "A hierarchical supervisor agent that plan the steps, and delegate tasks to worker agents based on user query", + "usecases": ["Hierarchical Agent Teams"], + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -234.25083179589828, + "y": 89.8928676312403 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": ["Start"], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": [ + { + "key": "next", + "value": "" + }, + { + "key": "instruction", + "value": "" + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 101, + "height": 65, + "selected": false, + "positionAbsolute": { + "x": -234.25083179589828, + "y": 89.8928676312403 + }, + "dragging": false + }, + { + "id": "conditionAgentflow_0", + "position": { + "x": 128.47781848153903, + "y": 73.36847122134466 + }, + "data": { + "id": "conditionAgentflow_0", + "label": "Check next worker", + "version": 1, + "name": "conditionAgentflow", + "type": "Condition", + "color": "#FFB938", + "baseClasses": ["Condition"], + "category": "Agent Flows", + "description": "Split flows based on If Else conditions", + "inputParams": [ + { + "label": "Conditions", + "name": "conditions", + "type": "array", + "description": "Values to compare", + "acceptVariable": true, + "default": [ + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

SOFTWARE

" + } + ], + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + } + ], + "default": "string" + }, + { + "label": "Value 1", + "name": "value1", + "type": "string", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Contains", + "name": "contains" + }, + { + "label": "Ends With", + "name": "endsWith" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Contains", + "name": "notContains" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Regex", + "name": "regex" + }, + { + "label": "Starts With", + "name": "startsWith" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "string", + "default": "", + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + }, + "hide": { + "conditions[$index].operation": ["isEmpty", "notEmpty"] + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "number", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Smaller", + "name": "smaller" + }, + { + "label": "Smaller Equal", + "name": "smallerEqual" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Larger", + "name": "larger" + }, + { + "label": "Larger Equal", + "name": "largerEqual" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "number", + "default": 0, + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "boolean", + "default": false, + "description": "First value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "boolean", + "default": false, + "description": "Second value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + } + ], + "id": "conditionAgentflow_0-input-conditions-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditions": [ + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

SOFTWARE

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

REVIEWER

" + } + ] + }, + "outputAnchors": [ + { + "id": "conditionAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Else" + } + ], + "outputs": { + "conditionAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 184, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 128.47781848153903, + "y": 73.36847122134466 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 352.5679347768288, + "y": -23.510778245391947 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Software Engineer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

As a Senior Software Engineer, you are a pivotal part of our innovative development team. Your expertise and leadership drive the creation of robust, scalable software solutions that meet the needs of our diverse clientele. By applying best practices in software development, you ensure that our products are reliable, efficient, and maintainable.

Your goal is to lead the development of high-quality software solutions.

Utilize your deep technical knowledge and experience to architect, design, and implement software systems that address complex problems. Collaborate closely with other engineers, reviewers to ensure that the solutions you develop align with business objectives and user needs.

Design and implement new feature for the given task, ensuring it integrates seamlessly with existing systems and meets performance requirements. Use your understanding of {technology} to build this feature. Make sure to adhere to our coding standards and follow best practices.

The output should be a fully functional, well-documented feature that enhances our product's capabilities. Include detailed comments in the code. Pass the code to Quality Assurance Engineer for review if neccessary. Once ther review is good enough, produce a finalized version of the code.

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 183, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 352.5679347768288, + "y": -23.510778245391947 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": 358.5981605238689, + "y": 87.38558154725587 + }, + "data": { + "id": "agentAgentflow_2", + "label": "Code Reviewer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatDeepseek", + "agentMessages": [ + { + "role": "system", + "content": "

As a Quality Assurance Engineer, you are an integral part of our development team, ensuring that our software products are of the highest quality. Your meticulous attention to detail and expertise in testing methodologies are crucial in identifying defects and ensuring that our code meets the highest standards.

Your goal is to ensure the delivery of high-quality software through thorough code review and testing.

Review the codebase for the new feature designed and implemented by the Senior Software Engineer. Your expertise goes beyond mere code inspection; you are adept at ensuring that developments not only function as intended but also adhere to the team's coding standards, enhance maintainability, and seamlessly integrate with existing systems.

With a deep appreciation for collaborative development, you provide constructive feedback, guiding contributors towards best practices and fostering a culture of continuous improvement. Your meticulous approach to reviewing code, coupled with your ability to foresee potential issues and recommend proactive solutions, ensures the delivery of high-quality software that is robust, scalable, and aligned with the team's strategic goals.

Always pass back the review and feedback to Senior Software Engineer.

" + } + ], + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "deepseek-reasoner", + "temperature": 0.7, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "stopSequence": "", + "baseOptions": "", + "agentModel": "chatDeepseek" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 206, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 358.5981605238689, + "y": 87.38558154725587 + }, + "dragging": false + }, + { + "id": "agentAgentflow_3", + "position": { + "x": 357.60470406099364, + "y": 192.61532204982643 + }, + "data": { + "id": "agentAgentflow_3", + "label": "Generate Final Answer", + "version": 1, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": ["Agent"], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_3-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_3-input-agentMessages-array", + "display": true + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_3-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_3-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_3-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_3-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_3-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_3-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "chatAnthropic", + "agentMessages": "", + "agentTools": "", + "agentKnowledgeDocumentStores": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentUserMessage": "

Given the above conversations, generate a detail solution developed by the software engineer and code reviewer. Include full code, improvements and review.

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "claude-3-7-sonnet-latest", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "", + "agentModel": "chatAnthropic" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_3-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 231, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": 357.60470406099364, + "y": 192.61532204982643 + }, + "dragging": false + }, + { + "id": "loopAgentflow_0", + "position": { + "x": 574.050701666824, + "y": -20.0960840521807 + }, + "data": { + "id": "loopAgentflow_0", + "label": "Loop to Supervisor", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_0-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_0-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 186, + "height": 65, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 574.050701666824, + "y": -20.0960840521807 + } + }, + { + "id": "loopAgentflow_1", + "position": { + "x": 600.379151793432, + "y": 90.25732743474846 + }, + "data": { + "id": "loopAgentflow_1", + "label": "Loop to Supervisor", + "version": 1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": ["Loop"], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_1-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_1-input-maxLoopCount-number", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": 5 + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 186, + "height": 65, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 600.379151793432, + "y": 90.25732743474846 + } + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -78.28788541792727, + "y": 87.1528514813091 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Supervisor", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": ["LLM"], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "chatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a supervisor tasked with managing a conversation between the following workers:

- Software Engineer

- Code Reviewer

Given the following user request, respond with the worker to act next.

Each worker will perform a task and respond with their results and status.

When finished, respond with FINISH.

Select strategically to minimize the number of steps taken.

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

Given the conversation above, who should act next? Or should we FINISH? Select one of: SOFTWARE, REVIEWER

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "next", + "type": "enum", + "enumValues": "FINISH, SOFTWARE, REVIEWER", + "jsonSchema": "", + "description": "next worker to act" + }, + { + "key": "instructions", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The specific instructions of the sub-task the next worker should accomplish." + }, + { + "key": "reasoning", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The reason why next worker is tasked to do the job" + } + ], + "llmUpdateState": [ + { + "key": "next", + "value": "

{{ output.next }}

" + }, + { + "key": "instruction", + "value": "

{{ output.instructions }}

" + } + ], + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4o-mini", + "temperature": 0.9, + "streaming": true, + "maxTokens": "", + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "strictToolCalling": "", + "stopSequence": "", + "basepath": "", + "proxyUrl": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoningEffort": "medium", + "llmModel": "chatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 168, + "height": 71, + "selected": false, + "positionAbsolute": { + "x": -78.28788541792727, + "y": 87.1528514813091 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "conditionAgentflow_0", + "targetHandle": "conditionAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFB938", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-conditionAgentflow_0-conditionAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-0", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-0-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-1", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-1-agentAgentflow_2-agentAgentflow_2" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-2", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-2-agentAgentflow_3-agentAgentflow_3" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "loopAgentflow_0", + "targetHandle": "loopAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-loopAgentflow_0-loopAgentflow_0" + }, + { + "source": "agentAgentflow_2", + "sourceHandle": "agentAgentflow_2-output-agentAgentflow", + "target": "loopAgentflow_1", + "targetHandle": "loopAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_2-agentAgentflow_2-output-agentAgentflow-loopAgentflow_1-loopAgentflow_1" + } + ] +} diff --git a/packages/server/marketplaces/chatflows/API Agent.json b/packages/server/marketplaces/chatflows/API Agent.json index 154fd3caa..5d3084bec 100644 --- a/packages/server/marketplaces/chatflows/API Agent.json +++ b/packages/server/marketplaces/chatflows/API Agent.json @@ -4,175 +4,116 @@ "usecases": ["Interacting with API"], "nodes": [ { - "width": 300, - "height": 460, - "id": "getApiChain_0", + "id": "toolAgent_0", "position": { - "x": 1222.6923202234623, - "y": 359.97676456347756 + "x": 2142.702888476286, + "y": 52.064582962824204 }, "type": "customNode", "data": { - "id": "getApiChain_0", - "label": "GET API Chain", - "version": 1, - "name": "getApiChain", - "type": "GETApiChain", - "baseClasses": ["GETApiChain", "BaseChain", "BaseLangChain"], - "category": "Chains", - "description": "Chain to run queries against GET API", + "id": "toolAgent_0", + "label": "Tool Agent", + "version": 2, + "name": "toolAgent", + "type": "AgentExecutor", + "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], + "category": "Agents", + "description": "Agent that uses Function Calling to pick the tools and args to call", "inputParams": [ { - "label": "API Documentation", - "name": "apiDocs", + "label": "System Message", + "name": "systemMessage", "type": "string", - "description": "Description of how API works. Please refer to more examples", + "default": "You are a helpful AI assistant.", + "description": "If Chat Prompt Template is provided, this will be ignored", "rows": 4, - "id": "getApiChain_0-input-apiDocs-string" - }, - { - "label": "Headers", - "name": "headers", - "type": "json", - "additionalParams": true, "optional": true, - "id": "getApiChain_0-input-headers-json" + "additionalParams": true, + "id": "toolAgent_0-input-systemMessage-string" }, { - "label": "URL Prompt", - "name": "urlPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}", - "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:", - "rows": 4, + "label": "Max Iterations", + "name": "maxIterations", + "type": "number", + "optional": true, "additionalParams": true, - "id": "getApiChain_0-input-urlPrompt-string" - }, - { - "label": "Answer Prompt", - "name": "ansPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}", - "default": "Given this {api_response} response for {api_url}. use the given response to answer this {question}", - "rows": 4, - "additionalParams": true, - "id": "getApiChain_0-input-ansPrompt-string" + "id": "toolAgent_0-input-maxIterations-number" } ], "inputAnchors": [ { - "label": "Language Model", + "label": "Tools", + "name": "tools", + "type": "Tool", + "list": true, + "id": "toolAgent_0-input-tools-Tool" + }, + { + "label": "Memory", + "name": "memory", + "type": "BaseChatMemory", + "id": "toolAgent_0-input-memory-BaseChatMemory" + }, + { + "label": "Tool Calling Chat Model", "name": "model", - "type": "BaseLanguageModel", - "id": "getApiChain_0-input-model-BaseLanguageModel" - } - ], - "inputs": { - "model": "{{chatOpenAI_1.data.instance}}", - "apiDocs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t°C (°F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0°C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.", - "headers": "", - "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:", - "ansPrompt": "Given this {api_response} response for {api_url}. use the given response to answer this {question}" - }, - "outputAnchors": [ - { - "id": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain", - "name": "getApiChain", - "label": "GETApiChain", - "type": "GETApiChain | BaseChain | BaseLangChain" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1222.6923202234623, - "y": 359.97676456347756 - }, - "dragging": false - }, - { - "width": 300, - "height": 603, - "id": "chainTool_0", - "position": { - "x": 1600.1485877701232, - "y": 276.38970893436533 - }, - "type": "customNode", - "data": { - "id": "chainTool_0", - "label": "Chain Tool", - "version": 1, - "name": "chainTool", - "type": "ChainTool", - "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"], - "category": "Tools", - "description": "Use a chain as allowed tool for agent", - "inputParams": [ - { - "label": "Chain Name", - "name": "name", - "type": "string", - "placeholder": "state-of-union-qa", - "id": "chainTool_0-input-name-string" + "type": "BaseChatModel", + "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", + "id": "toolAgent_0-input-model-BaseChatModel" }, { - "label": "Chain Description", - "name": "description", - "type": "string", - "rows": 3, - "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", - "id": "chainTool_0-input-description-string" - }, - { - "label": "Return Direct", - "name": "returnDirect", - "type": "boolean", + "label": "Chat Prompt Template", + "name": "chatPromptTemplate", + "type": "ChatPromptTemplate", + "description": "Override existing prompt with Chat Prompt Template. Human Message must includes {input} variable", "optional": true, - "id": "chainTool_0-input-returnDirect-boolean" - } - ], - "inputAnchors": [ + "id": "toolAgent_0-input-chatPromptTemplate-ChatPromptTemplate" + }, { - "label": "Base Chain", - "name": "baseChain", - "type": "BaseChain", - "id": "chainTool_0-input-baseChain-BaseChain" + "label": "Input Moderation", + "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", + "name": "inputModeration", + "type": "Moderation", + "optional": true, + "list": true, + "id": "toolAgent_0-input-inputModeration-Moderation" } ], "inputs": { - "name": "weather-qa", - "description": "useful for when you need to ask question about weather", - "returnDirect": false, - "baseChain": "{{getApiChain_0.data.instance}}" + "tools": ["{{openAPIToolkit_0.data.instance}}"], + "memory": "{{bufferMemory_0.data.instance}}", + "model": "{{chatAnthropic_0.data.instance}}", + "chatPromptTemplate": "", + "systemMessage": "You are an agent that can interact with the API to perform specific tasks based on user requests.\n\nYour main goal is to understand the user's needs, make appropriate API calls, and return the results in a clear format. Ensure you verify inputs before making API requests and handle errors gracefully if the API fails.\n\n# Steps\n\n1. **Receive User Input:** Listen carefully to the user's request and identify key parameters needed for the API call.\n2. **Validate Input:** Ensure that the user input is in the correct format and contains all necessary information.\n3. **Make API Call:** Use the provided OpenAPI tools to call appropriate API endpoint with the validated input.\n", + "inputModeration": "", + "maxIterations": "" }, "outputAnchors": [ { - "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", - "name": "chainTool", - "label": "ChainTool", - "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain" + "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable", + "name": "toolAgent", + "label": "AgentExecutor", + "description": "Agent that uses Function Calling to pick the tools and args to call", + "type": "AgentExecutor | BaseChain | Runnable" } ], "outputs": {}, "selected": false }, + "width": 300, + "height": 483, "selected": false, "positionAbsolute": { - "x": 1600.1485877701232, - "y": 276.38970893436533 + "x": 2142.702888476286, + "y": 52.064582962824204 }, "dragging": false }, { - "width": 300, - "height": 253, "id": "bufferMemory_0", "position": { - "x": 1642.0644080121785, - "y": 1715.6131926891728 + "x": 1017.5366991719394, + "y": 70.40237946649512 }, "type": "customNode", "data": { @@ -214,909 +155,282 @@ "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", "name": "bufferMemory", "label": "BufferMemory", + "description": "Retrieve chat messages stored in database", "type": "BufferMemory | BaseChatMemory | BaseMemory" } ], "outputs": {}, "selected": false }, + "width": 300, + "height": 250, "selected": false, "positionAbsolute": { - "x": 1642.0644080121785, - "y": 1715.6131926891728 + "x": 1017.5366991719394, + "y": 70.40237946649512 }, "dragging": false }, { - "width": 300, - "height": 603, - "id": "chainTool_1", + "id": "chatAnthropic_0", "position": { - "x": 1284.7746596034926, - "y": 895.1444797047182 + "x": 1782.2489802995697, + "y": -97.03292069533617 }, "type": "customNode", "data": { - "id": "chainTool_1", - "label": "Chain Tool", - "version": 1, - "name": "chainTool", - "type": "ChainTool", - "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"], - "category": "Tools", - "description": "Use a chain as allowed tool for agent", + "id": "chatAnthropic_0", + "label": "ChatAnthropic", + "version": 8, + "name": "chatAnthropic", + "type": "ChatAnthropic", + "baseClasses": ["ChatAnthropic", "ChatAnthropicMessages", "BaseChatModel", "BaseLanguageModel", "Runnable"], + "category": "Chat Models", + "description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint", "inputParams": [ { - "label": "Chain Name", - "name": "name", - "type": "string", - "placeholder": "state-of-union-qa", - "id": "chainTool_1-input-name-string" + "label": "Connect Credential", + "name": "credential", + "type": "credential", + "credentialNames": ["anthropicApi"], + "id": "chatAnthropic_0-input-credential-credential", + "display": true }, { - "label": "Chain Description", - "name": "description", - "type": "string", - "rows": 3, - "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", - "id": "chainTool_1-input-description-string" + "label": "Model Name", + "name": "modelName", + "type": "asyncOptions", + "loadMethod": "listModels", + "default": "claude-3-haiku", + "id": "chatAnthropic_0-input-modelName-asyncOptions", + "display": true }, { - "label": "Return Direct", - "name": "returnDirect", - "type": "boolean", + "label": "Temperature", + "name": "temperature", + "type": "number", + "step": 0.1, + "default": 0.9, "optional": true, - "id": "chainTool_1-input-returnDirect-boolean" + "id": "chatAnthropic_0-input-temperature-number", + "display": true + }, + { + "label": "Streaming", + "name": "streaming", + "type": "boolean", + "default": true, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-streaming-boolean", + "display": true + }, + { + "label": "Max Tokens", + "name": "maxTokensToSample", + "type": "number", + "step": 1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-maxTokensToSample-number", + "display": true + }, + { + "label": "Top P", + "name": "topP", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-topP-number", + "display": true + }, + { + "label": "Top K", + "name": "topK", + "type": "number", + "step": 0.1, + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-topK-number", + "display": true + }, + { + "label": "Extended Thinking", + "name": "extendedThinking", + "type": "boolean", + "description": "Enable extended thinking for reasoning model such as Claude Sonnet 3.7", + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-extendedThinking-boolean", + "display": true + }, + { + "label": "Budget Tokens", + "name": "budgetTokens", + "type": "number", + "step": 1, + "default": 1024, + "description": "Maximum number of tokens Claude is allowed use for its internal reasoning process", + "optional": true, + "additionalParams": true, + "id": "chatAnthropic_0-input-budgetTokens-number", + "display": true + }, + { + "label": "Allow Image Uploads", + "name": "allowImageUploads", + "type": "boolean", + "description": "Allow image input. Refer to the docs for more details.", + "default": false, + "optional": true, + "id": "chatAnthropic_0-input-allowImageUploads-boolean", + "display": true } ], "inputAnchors": [ { - "label": "Base Chain", - "name": "baseChain", - "type": "BaseChain", - "id": "chainTool_1-input-baseChain-BaseChain" + "label": "Cache", + "name": "cache", + "type": "BaseCache", + "optional": true, + "id": "chatAnthropic_0-input-cache-BaseCache", + "display": true } ], "inputs": { - "name": "discord-bot", - "description": "useful for when you need to send message to Discord", - "returnDirect": "", - "baseChain": "{{postApiChain_0.data.instance}}" + "cache": "", + "modelName": "claude-3-5-haiku-latest", + "temperature": 0.9, + "streaming": true, + "maxTokensToSample": "", + "topP": "", + "topK": "", + "extendedThinking": "", + "budgetTokens": 1024, + "allowImageUploads": "" }, "outputAnchors": [ { - "id": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", - "name": "chainTool", - "label": "ChainTool", - "type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain" + "id": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable", + "name": "chatAnthropic", + "label": "ChatAnthropic", + "description": "Wrapper around ChatAnthropic large language models that use the Chat endpoint", + "type": "ChatAnthropic | ChatAnthropicMessages | BaseChatModel | BaseLanguageModel | Runnable" } ], "outputs": {}, "selected": false }, + "width": 300, + "height": 668, "selected": false, "positionAbsolute": { - "x": 1284.7746596034926, - "y": 895.1444797047182 + "x": 1782.2489802995697, + "y": -97.03292069533617 }, "dragging": false }, { - "width": 300, - "height": 460, - "id": "postApiChain_0", + "id": "openAPIToolkit_0", "position": { - "x": 933.3631140153886, - "y": 974.8756002461283 + "x": 1406.3474125716532, + "y": -26.543208700976493 }, "type": "customNode", "data": { - "id": "postApiChain_0", - "label": "POST API Chain", - "version": 1, - "name": "postApiChain", - "type": "POSTApiChain", - "baseClasses": ["POSTApiChain", "BaseChain", "BaseLangChain"], - "category": "Chains", - "description": "Chain to run queries against POST API", + "id": "openAPIToolkit_0", + "label": "OpenAPI Toolkit", + "version": 2, + "name": "openAPIToolkit", + "type": "OpenAPIToolkit", + "baseClasses": ["OpenAPIToolkit", "Tool"], + "category": "Tools", + "description": "Load OpenAPI specification, and converts each API endpoint to a tool", "inputParams": [ { - "label": "API Documentation", - "name": "apiDocs", - "type": "string", - "description": "Description of how API works. Please refer to more examples", - "rows": 4, - "id": "postApiChain_0-input-apiDocs-string" + "label": "YAML File", + "name": "yamlFile", + "type": "file", + "fileType": ".yaml", + "id": "openAPIToolkit_0-input-yamlFile-file", + "display": true + }, + { + "label": "Return Direct", + "name": "returnDirect", + "description": "Return the output of the tool directly to the user", + "type": "boolean", + "optional": true, + "id": "openAPIToolkit_0-input-returnDirect-boolean", + "display": true }, { "label": "Headers", "name": "headers", "type": "json", + "description": "Request headers to be sent with the API request. For example, {\"Authorization\": \"Bearer token\"}", "additionalParams": true, "optional": true, - "id": "postApiChain_0-input-headers-json" + "id": "openAPIToolkit_0-input-headers-json", + "display": true }, { - "label": "URL Prompt", - "name": "urlPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}", - "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:", - "rows": 4, - "additionalParams": true, - "id": "postApiChain_0-input-urlPrompt-string" - }, - { - "label": "Answer Prompt", - "name": "ansPrompt", - "type": "string", - "description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}", - "default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:", - "rows": 4, - "additionalParams": true, - "id": "postApiChain_0-input-ansPrompt-string" - } - ], - "inputAnchors": [ - { - "label": "Language Model", - "name": "model", - "type": "BaseLanguageModel", - "id": "postApiChain_0-input-model-BaseLanguageModel" - } - ], - "inputs": { - "model": "{{chatOpenAI_2.data.instance}}", - "apiDocs": "API documentation:\nEndpoint: https://some-discord-webhook.com\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string", - "headers": "", - "urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:", - "ansPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:" - }, - "outputAnchors": [ - { - "id": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain", - "name": "postApiChain", - "label": "POSTApiChain", - "type": "POSTApiChain | BaseChain | BaseLangChain" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 933.3631140153886, - "y": 974.8756002461283 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_2", - "position": { - "x": 572.8941615312035, - "y": 937.8425220917356 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_2", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_2-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_2-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_2-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", + "label": "Remove null parameters", + "name": "removeNulls", "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, "optional": true, - "id": "chatOpenAI_2-input-allowImageUploads-boolean" + "description": "Remove all keys with null values from the parsed arguments", + "id": "openAPIToolkit_0-input-removeNulls-boolean", + "display": true }, { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, + "label": "Custom Code", + "name": "customCode", + "type": "code", + "hint": { + "label": "How to use", + "value": "- **Libraries:** \n You can use any libraries imported in Flowise.\n\n- **Tool Input Arguments:** \n Tool input arguments are available as the following variables:\n - `$PathParameters`\n - `$QueryParameters`\n - `$RequestBody`\n\n- **HTTP Requests:** \n By default, you can get the following values for making HTTP requests:\n - `$url`\n - `$options`\n\n- **Default Flow Config:** \n You can access the default flow configuration using these variables:\n - `$flow.sessionId`\n - `$flow.chatId`\n - `$flow.chatflowId`\n - `$flow.input`\n - `$flow.state`\n\n- **Custom Variables:** \n You can get custom variables using the syntax:\n - `$vars.`\n\n- **Return Value:** \n The function must return a **string** value at the end.\n\n```js\nconst fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n\n```\n" + }, + "codeExample": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n", + "description": "Custom code to return the output of the tool. The code should be a function that takes in the input and returns a string", + "hideCodeExecute": true, + "default": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n", "additionalParams": true, - "id": "chatOpenAI_2-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_2-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 572.8941615312035, - "y": 937.8425220917356 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_1", - "position": { - "x": 859.9597222599807, - "y": 163.26344718821986 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_1", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_1-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_1-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_1-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_1-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_1-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_1-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo", - "temperature": "0.6", - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 859.9597222599807, - "y": 163.26344718821986 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_3", - "position": { - "x": 1148.338912314111, - "y": 1561.0888070167944 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_3", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_3-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_3-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_3-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_3-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_3-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_3-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_3-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo-16k", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1148.338912314111, - "y": 1561.0888070167944 - }, - "dragging": false - }, - { - "id": "toolAgent_0", - "position": { - "x": 2087.462952706838, - "y": 974.6001334100872 - }, - "type": "customNode", - "data": { - "id": "toolAgent_0", - "label": "Tool Agent", - "version": 1, - "name": "toolAgent", - "type": "AgentExecutor", - "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], - "category": "Agents", - "description": "Agent that uses Function Calling to pick the tools and args to call", - "inputParams": [ - { - "label": "System Message", - "name": "systemMessage", - "type": "string", - "default": "You are a helpful AI assistant.", - "rows": 4, - "optional": true, - "additionalParams": true, - "id": "toolAgent_0-input-systemMessage-string" - }, - { - "label": "Max Iterations", - "name": "maxIterations", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "toolAgent_0-input-maxIterations-number" - } - ], - "inputAnchors": [ - { - "label": "Tools", - "name": "tools", - "type": "Tool", - "list": true, - "id": "toolAgent_0-input-tools-Tool" - }, - { - "label": "Memory", - "name": "memory", - "type": "BaseChatMemory", - "id": "toolAgent_0-input-memory-BaseChatMemory" - }, - { - "label": "Tool Calling Chat Model", - "name": "model", - "type": "BaseChatModel", - "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", - "id": "toolAgent_0-input-model-BaseChatModel" - }, - { - "label": "Input Moderation", - "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", - "name": "inputModeration", - "type": "Moderation", - "optional": true, - "list": true, - "id": "toolAgent_0-input-inputModeration-Moderation" - } - ], - "inputs": { - "tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"], - "memory": "{{bufferMemory_0.data.instance}}", - "model": "{{chatOpenAI_3.data.instance}}", - "systemMessage": "You are a helpful AI assistant.", - "inputModeration": "", - "maxIterations": "" - }, - "outputAnchors": [ - { - "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable", - "name": "toolAgent", - "label": "AgentExecutor", - "description": "Agent that uses Function Calling to pick the tools and args to call", - "type": "AgentExecutor | BaseChain | Runnable" - } - ], - "outputs": {}, - "selected": false - }, - "width": 300, - "height": 435, - "selected": false, - "positionAbsolute": { - "x": 2087.462952706838, - "y": 974.6001334100872 - }, - "dragging": false - }, - { - "id": "stickyNote_0", - "position": { - "x": 2081.8371244608006, - "y": 595.924073574161 - }, - "type": "stickyNote", - "data": { - "id": "stickyNote_0", - "label": "Sticky Note", - "version": 2, - "name": "stickyNote", - "type": "StickyNote", - "baseClasses": ["StickyNote"], - "tags": ["Utilities"], - "category": "Utilities", - "description": "Add a sticky note", - "inputParams": [ - { - "label": "", - "name": "note", - "type": "string", - "rows": 1, - "placeholder": "Type something here", - "optional": true, - "id": "stickyNote_0-input-note-string" + "id": "openAPIToolkit_0-input-customCode-code", + "display": true } ], "inputAnchors": [], "inputs": { - "note": "Using agent, we give it 2 tools that is each attached to a GET/POST API Chain.\n\nThe goal is to have the agent to decide when to use which tool. \n\nWhen the tool is being used, API Chain's task is to figure out the correct URL and params to make the HTTP call.\n\nHowever, it is recommended to use OpenAPI YML to give a more structured input to LLM, for better quality output.\n\nExample question:\nSend me the weather of SF today to my discord" + "returnDirect": "", + "headers": "", + "removeNulls": "", + "customCode": "const fetch = require('node-fetch');\nconst url = $url;\nconst options = $options;\n\ntry {\n\tconst response = await fetch(url, options);\n\tconst resp = await response.json();\n\treturn JSON.stringify(resp);\n} catch (error) {\n\tconsole.error(error);\n\treturn '';\n}\n" }, "outputAnchors": [ { - "id": "stickyNote_0-output-stickyNote-StickyNote", - "name": "stickyNote", - "label": "StickyNote", - "description": "Add a sticky note", - "type": "StickyNote" + "id": "openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool", + "name": "openAPIToolkit", + "label": "OpenAPIToolkit", + "description": "Load OpenAPI specification, and converts each API endpoint to a tool", + "type": "OpenAPIToolkit | Tool" } ], "outputs": {}, "selected": false }, "width": 300, - "height": 364, + "height": 552, "selected": false, "positionAbsolute": { - "x": 2081.8371244608006, - "y": 595.924073574161 + "x": 1406.3474125716532, + "y": -26.543208700976493 }, "dragging": false } ], "edges": [ - { - "source": "getApiChain_0", - "sourceHandle": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain", - "target": "chainTool_0", - "targetHandle": "chainTool_0-input-baseChain-BaseChain", - "type": "buttonedge", - "id": "getApiChain_0-getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain-chainTool_0-chainTool_0-input-baseChain-BaseChain", - "data": { - "label": "" - } - }, - { - "source": "postApiChain_0", - "sourceHandle": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain", - "target": "chainTool_1", - "targetHandle": "chainTool_1-input-baseChain-BaseChain", - "type": "buttonedge", - "id": "postApiChain_0-postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain-chainTool_1-chainTool_1-input-baseChain-BaseChain", - "data": { - "label": "" - } - }, - { - "source": "chatOpenAI_2", - "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "postApiChain_0", - "targetHandle": "postApiChain_0-input-model-BaseLanguageModel", - "type": "buttonedge", - "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-postApiChain_0-postApiChain_0-input-model-BaseLanguageModel", - "data": { - "label": "" - } - }, - { - "source": "chatOpenAI_1", - "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "getApiChain_0", - "targetHandle": "getApiChain_0-input-model-BaseLanguageModel", - "type": "buttonedge", - "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-getApiChain_0-getApiChain_0-input-model-BaseLanguageModel", - "data": { - "label": "" - } - }, - { - "source": "chainTool_0", - "sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", - "target": "toolAgent_0", - "targetHandle": "toolAgent_0-input-tools-Tool", - "type": "buttonedge", - "id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-toolAgent_0-toolAgent_0-input-tools-Tool" - }, - { - "source": "chainTool_1", - "sourceHandle": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain", - "target": "toolAgent_0", - "targetHandle": "toolAgent_0-input-tools-Tool", - "type": "buttonedge", - "id": "chainTool_1-chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-toolAgent_0-toolAgent_0-input-tools-Tool" - }, { "source": "bufferMemory_0", "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", @@ -1126,12 +440,20 @@ "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-toolAgent_0-toolAgent_0-input-memory-BaseChatMemory" }, { - "source": "chatOpenAI_3", - "sourceHandle": "chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", + "source": "openAPIToolkit_0", + "sourceHandle": "openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool", + "target": "toolAgent_0", + "targetHandle": "toolAgent_0-input-tools-Tool", + "type": "buttonedge", + "id": "openAPIToolkit_0-openAPIToolkit_0-output-openAPIToolkit-OpenAPIToolkit|Tool-toolAgent_0-toolAgent_0-input-tools-Tool" + }, + { + "source": "chatAnthropic_0", + "sourceHandle": "chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable", "target": "toolAgent_0", "targetHandle": "toolAgent_0-input-model-BaseChatModel", "type": "buttonedge", - "id": "chatOpenAI_3-chatOpenAI_3-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-toolAgent_0-toolAgent_0-input-model-BaseChatModel" + "id": "chatAnthropic_0-chatAnthropic_0-output-chatAnthropic-ChatAnthropic|ChatAnthropicMessages|BaseChatModel|BaseLanguageModel|Runnable-toolAgent_0-toolAgent_0-input-model-BaseChatModel" } ] } diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json deleted file mode 100644 index b61aa980c..000000000 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ /dev/null @@ -1,701 +0,0 @@ -{ - "description": "AutoGPT - Autonomous agent with chain of thoughts for self-guided task completion", - "framework": ["Langchain"], - "usecases": ["Reflective Agent"], - "nodes": [ - { - "width": 300, - "height": 679, - "id": "autoGPT_0", - "position": { - "x": 1566.5228556278, - "y": 48.800017192230115 - }, - "type": "customNode", - "data": { - "id": "autoGPT_0", - "label": "AutoGPT", - "version": 2, - "name": "autoGPT", - "type": "AutoGPT", - "baseClasses": ["AutoGPT"], - "category": "Agents", - "description": "Autonomous agent with chain of thoughts by GPT4", - "inputParams": [ - { - "label": "AutoGPT Name", - "name": "aiName", - "type": "string", - "placeholder": "Tom", - "optional": true, - "id": "autoGPT_0-input-aiName-string" - }, - { - "label": "AutoGPT Role", - "name": "aiRole", - "type": "string", - "placeholder": "Assistant", - "optional": true, - "id": "autoGPT_0-input-aiRole-string" - }, - { - "label": "Maximum Loop", - "name": "maxLoop", - "type": "number", - "default": 5, - "optional": true, - "id": "autoGPT_0-input-maxLoop-number" - } - ], - "inputAnchors": [ - { - "label": "Allowed Tools", - "name": "tools", - "type": "Tool", - "list": true, - "id": "autoGPT_0-input-tools-Tool" - }, - { - "label": "Chat Model", - "name": "model", - "type": "BaseChatModel", - "id": "autoGPT_0-input-model-BaseChatModel" - }, - { - "label": "Vector Store Retriever", - "name": "vectorStoreRetriever", - "type": "BaseRetriever", - "id": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever" - }, - { - "label": "Input Moderation", - "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", - "name": "inputModeration", - "type": "Moderation", - "optional": true, - "list": true, - "id": "autoGPT_0-input-inputModeration-Moderation" - } - ], - "inputs": { - "inputModeration": "", - "tools": ["{{serpAPI_0.data.instance}}"], - "model": "{{chatOpenAI_0.data.instance}}", - "vectorStoreRetriever": "{{pinecone_0.data.instance}}", - "aiName": "", - "aiRole": "", - "maxLoop": 5 - }, - "outputAnchors": [ - { - "id": "autoGPT_0-output-autoGPT-AutoGPT", - "name": "autoGPT", - "label": "AutoGPT", - "type": "AutoGPT" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1566.5228556278, - "y": 48.800017192230115 - }, - "dragging": false - }, - { - "width": 300, - "height": 276, - "id": "serpAPI_0", - "position": { - "x": 1207.9685973743674, - "y": -216.77363417201138 - }, - "type": "customNode", - "data": { - "id": "serpAPI_0", - "label": "Serp API", - "version": 1, - "name": "serpAPI", - "type": "SerpAPI", - "baseClasses": ["SerpAPI", "Tool", "StructuredTool"], - "category": "Tools", - "description": "Wrapper around SerpAPI - a real-time API to access Google search results", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["serpApi"], - "id": "serpAPI_0-input-credential-credential" - } - ], - "inputAnchors": [], - "inputs": {}, - "outputAnchors": [ - { - "id": "serpAPI_0-output-serpAPI-SerpAPI|Tool|StructuredTool", - "name": "serpAPI", - "label": "SerpAPI", - "type": "SerpAPI | Tool | StructuredTool" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1207.9685973743674, - "y": -216.77363417201138 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_0", - "position": { - "x": 861.5955028972123, - "y": -322.72984118549857 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_0", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_0-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_0-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_0-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_0-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_0-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_0-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 861.5955028972123, - "y": -322.72984118549857 - }, - "dragging": false - }, - { - "width": 300, - "height": 424, - "id": "openAIEmbeddings_0", - "position": { - "x": 116.62153412789377, - "y": 52.465581131402246 - }, - "type": "customNode", - "data": { - "id": "openAIEmbeddings_0", - "label": "OpenAI Embeddings", - "version": 4, - "name": "openAIEmbeddings", - "type": "OpenAIEmbeddings", - "baseClasses": ["OpenAIEmbeddings", "Embeddings"], - "category": "Embeddings", - "description": "OpenAI API to generate embeddings for a given text", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "openAIEmbeddings_0-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "text-embedding-ada-002", - "id": "openAIEmbeddings_0-input-modelName-asyncOptions" - }, - { - "label": "Strip New Lines", - "name": "stripNewLines", - "type": "boolean", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-stripNewLines-boolean" - }, - { - "label": "Batch Size", - "name": "batchSize", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-batchSize-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-basepath-string" - }, - { - "label": "Dimensions", - "name": "dimensions", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-dimensions-number" - } - ], - "inputAnchors": [], - "inputs": { - "modelName": "text-embedding-ada-002", - "stripNewLines": "", - "batchSize": "", - "timeout": "", - "basepath": "", - "dimensions": "" - }, - "outputAnchors": [ - { - "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", - "name": "openAIEmbeddings", - "label": "OpenAIEmbeddings", - "description": "OpenAI API to generate embeddings for a given text", - "type": "OpenAIEmbeddings | Embeddings" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 116.62153412789377, - "y": 52.465581131402246 - }, - "dragging": false - }, - { - "width": 300, - "height": 606, - "id": "pinecone_0", - "position": { - "x": 512.2389361920059, - "y": -36.80102752360557 - }, - "type": "customNode", - "data": { - "id": "pinecone_0", - "label": "Pinecone", - "version": 3, - "name": "pinecone", - "type": "Pinecone", - "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], - "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["pineconeApi"], - "id": "pinecone_0-input-credential-credential" - }, - { - "label": "Pinecone Index", - "name": "pineconeIndex", - "type": "string", - "id": "pinecone_0-input-pineconeIndex-string" - }, - { - "label": "Pinecone Namespace", - "name": "pineconeNamespace", - "type": "string", - "placeholder": "my-first-namespace", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-pineconeNamespace-string" - }, - { - "label": "Pinecone Metadata Filter", - "name": "pineconeMetadataFilter", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "pinecone_0-input-pineconeMetadataFilter-json" - }, - { - "label": "Top K", - "name": "topK", - "description": "Number of top results to fetch. Default to 4", - "placeholder": "4", - "type": "number", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-topK-number" - }, - { - "label": "Search Type", - "name": "searchType", - "type": "options", - "default": "similarity", - "options": [ - { - "label": "Similarity", - "name": "similarity" - }, - { - "label": "Max Marginal Relevance", - "name": "mmr" - } - ], - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-searchType-options" - }, - { - "label": "Fetch K (for MMR Search)", - "name": "fetchK", - "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", - "placeholder": "20", - "type": "number", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-fetchK-number" - }, - { - "label": "Lambda (for MMR Search)", - "name": "lambda", - "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", - "placeholder": "0.5", - "type": "number", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-lambda-number" - } - ], - "inputAnchors": [ - { - "label": "Document", - "name": "document", - "type": "Document", - "list": true, - "optional": true, - "id": "pinecone_0-input-document-Document" - }, - { - "label": "Embeddings", - "name": "embeddings", - "type": "Embeddings", - "id": "pinecone_0-input-embeddings-Embeddings" - }, - { - "label": "Record Manager", - "name": "recordManager", - "type": "RecordManager", - "description": "Keep track of the record to prevent duplication", - "optional": true, - "id": "pinecone_0-input-recordManager-RecordManager" - } - ], - "inputs": { - "document": "", - "embeddings": "{{openAIEmbeddings_0.data.instance}}", - "recordManager": "", - "pineconeIndex": "", - "pineconeNamespace": "", - "pineconeMetadataFilter": "", - "topK": "", - "searchType": "similarity", - "fetchK": "", - "lambda": "" - }, - "outputAnchors": [ - { - "name": "output", - "label": "Output", - "type": "options", - "description": "", - "options": [ - { - "id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", - "name": "retriever", - "label": "Pinecone Retriever", - "description": "", - "type": "Pinecone | VectorStoreRetriever | BaseRetriever" - }, - { - "id": "pinecone_0-output-vectorStore-Pinecone|VectorStore", - "name": "vectorStore", - "label": "Pinecone Vector Store", - "description": "", - "type": "Pinecone | VectorStore" - } - ], - "default": "retriever" - } - ], - "outputs": { - "output": "retriever" - }, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 512.2389361920059, - "y": -36.80102752360557 - }, - "dragging": false - }, - { - "id": "stickyNote_0", - "position": { - "x": 1565.5672914362437, - "y": -138.9994972608436 - }, - "type": "stickyNote", - "data": { - "id": "stickyNote_0", - "label": "Sticky Note", - "version": 2, - "name": "stickyNote", - "type": "StickyNote", - "baseClasses": ["StickyNote"], - "tags": ["Utilities"], - "category": "Utilities", - "description": "Add a sticky note", - "inputParams": [ - { - "label": "", - "name": "note", - "type": "string", - "rows": 1, - "placeholder": "Type something here", - "optional": true, - "id": "stickyNote_0-input-note-string" - } - ], - "inputAnchors": [], - "inputs": { - "note": "An agent that uses long-term memory (Pinecone in this example) together with a prompt for self-guided task completion.\n\nAgent has access to Serp API tool to search the web, and store the continuous results to Pinecone" - }, - "outputAnchors": [ - { - "id": "stickyNote_0-output-stickyNote-StickyNote", - "name": "stickyNote", - "label": "StickyNote", - "description": "Add a sticky note", - "type": "StickyNote" - } - ], - "outputs": {}, - "selected": false - }, - "width": 300, - "height": 163, - "selected": false, - "positionAbsolute": { - "x": 1565.5672914362437, - "y": -138.9994972608436 - }, - "dragging": false - } - ], - "edges": [ - { - "source": "chatOpenAI_0", - "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "autoGPT_0", - "targetHandle": "autoGPT_0-input-model-BaseChatModel", - "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-autoGPT_0-autoGPT_0-input-model-BaseChatModel", - "data": { - "label": "" - } - }, - { - "source": "serpAPI_0", - "sourceHandle": "serpAPI_0-output-serpAPI-SerpAPI|Tool|StructuredTool", - "target": "autoGPT_0", - "targetHandle": "autoGPT_0-input-tools-Tool", - "type": "buttonedge", - "id": "serpAPI_0-serpAPI_0-output-serpAPI-SerpAPI|Tool|StructuredTool-autoGPT_0-autoGPT_0-input-tools-Tool", - "data": { - "label": "" - } - }, - { - "source": "openAIEmbeddings_0", - "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", - "target": "pinecone_0", - "targetHandle": "pinecone_0-input-embeddings-Embeddings", - "type": "buttonedge", - "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings", - "data": { - "label": "" - } - }, - { - "source": "pinecone_0", - "sourceHandle": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", - "target": "autoGPT_0", - "targetHandle": "autoGPT_0-input-vectorStoreRetriever-BaseRetriever", - "type": "buttonedge", - "id": "pinecone_0-pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever-autoGPT_0-autoGPT_0-input-vectorStoreRetriever-BaseRetriever", - "data": { - "label": "" - } - } - ] -} diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json deleted file mode 100644 index 61e3a6ca9..000000000 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ /dev/null @@ -1,623 +0,0 @@ -{ - "description": "Use BabyAGI to create tasks and reprioritize for a given objective", - "framework": ["Langchain"], - "usecases": ["Reflective Agent"], - "nodes": [ - { - "width": 300, - "height": 431, - "id": "babyAGI_1", - "position": { - "x": 950.8042093214954, - "y": 66.00028106865324 - }, - "type": "customNode", - "data": { - "id": "babyAGI_1", - "label": "BabyAGI", - "version": 2, - "name": "babyAGI", - "type": "BabyAGI", - "baseClasses": ["BabyAGI"], - "category": "Agents", - "description": "Task Driven Autonomous Agent which creates new task and reprioritizes task list based on objective", - "inputParams": [ - { - "label": "Task Loop", - "name": "taskLoop", - "type": "number", - "default": 3, - "id": "babyAGI_1-input-taskLoop-number" - } - ], - "inputAnchors": [ - { - "label": "Chat Model", - "name": "model", - "type": "BaseChatModel", - "id": "babyAGI_1-input-model-BaseChatModel" - }, - { - "label": "Vector Store", - "name": "vectorStore", - "type": "VectorStore", - "id": "babyAGI_1-input-vectorStore-VectorStore" - }, - { - "label": "Input Moderation", - "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", - "name": "inputModeration", - "type": "Moderation", - "optional": true, - "list": true, - "id": "babyAGI_1-input-inputModeration-Moderation" - } - ], - "inputs": { - "inputModeration": "", - "model": "{{chatOpenAI_0.data.instance}}", - "vectorStore": "{{pinecone_0.data.instance}}", - "taskLoop": 3 - }, - "outputAnchors": [ - { - "id": "babyAGI_1-output-babyAGI-BabyAGI", - "name": "babyAGI", - "label": "BabyAGI", - "type": "BabyAGI" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "dragging": false, - "positionAbsolute": { - "x": 950.8042093214954, - "y": 66.00028106865324 - } - }, - { - "width": 300, - "height": 424, - "id": "openAIEmbeddings_0", - "position": { - "x": -111.82510263637522, - "y": -224.88655030419665 - }, - "type": "customNode", - "data": { - "id": "openAIEmbeddings_0", - "label": "OpenAI Embeddings", - "version": 4, - "name": "openAIEmbeddings", - "type": "OpenAIEmbeddings", - "baseClasses": ["OpenAIEmbeddings", "Embeddings"], - "category": "Embeddings", - "description": "OpenAI API to generate embeddings for a given text", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "openAIEmbeddings_0-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "text-embedding-ada-002", - "id": "openAIEmbeddings_0-input-modelName-asyncOptions" - }, - { - "label": "Strip New Lines", - "name": "stripNewLines", - "type": "boolean", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-stripNewLines-boolean" - }, - { - "label": "Batch Size", - "name": "batchSize", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-batchSize-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-basepath-string" - }, - { - "label": "Dimensions", - "name": "dimensions", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-dimensions-number" - } - ], - "inputAnchors": [], - "inputs": { - "modelName": "text-embedding-ada-002", - "stripNewLines": "", - "batchSize": "", - "timeout": "", - "basepath": "", - "dimensions": "" - }, - "outputAnchors": [ - { - "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", - "name": "openAIEmbeddings", - "label": "OpenAIEmbeddings", - "description": "OpenAI API to generate embeddings for a given text", - "type": "OpenAIEmbeddings | Embeddings" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": -111.82510263637522, - "y": -224.88655030419665 - }, - "dragging": false - }, - { - "width": 300, - "height": 606, - "id": "pinecone_0", - "position": { - "x": 245.707825551803, - "y": -176.9243551667388 - }, - "type": "customNode", - "data": { - "id": "pinecone_0", - "label": "Pinecone", - "version": 3, - "name": "pinecone", - "type": "Pinecone", - "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], - "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["pineconeApi"], - "id": "pinecone_0-input-credential-credential" - }, - { - "label": "Pinecone Index", - "name": "pineconeIndex", - "type": "string", - "id": "pinecone_0-input-pineconeIndex-string" - }, - { - "label": "Pinecone Namespace", - "name": "pineconeNamespace", - "type": "string", - "placeholder": "my-first-namespace", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-pineconeNamespace-string" - }, - { - "label": "Pinecone Metadata Filter", - "name": "pineconeMetadataFilter", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "pinecone_0-input-pineconeMetadataFilter-json" - }, - { - "label": "Top K", - "name": "topK", - "description": "Number of top results to fetch. Default to 4", - "placeholder": "4", - "type": "number", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-topK-number" - }, - { - "label": "Search Type", - "name": "searchType", - "type": "options", - "default": "similarity", - "options": [ - { - "label": "Similarity", - "name": "similarity" - }, - { - "label": "Max Marginal Relevance", - "name": "mmr" - } - ], - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-searchType-options" - }, - { - "label": "Fetch K (for MMR Search)", - "name": "fetchK", - "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", - "placeholder": "20", - "type": "number", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-fetchK-number" - }, - { - "label": "Lambda (for MMR Search)", - "name": "lambda", - "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", - "placeholder": "0.5", - "type": "number", - "additionalParams": true, - "optional": true, - "id": "pinecone_0-input-lambda-number" - } - ], - "inputAnchors": [ - { - "label": "Document", - "name": "document", - "type": "Document", - "list": true, - "optional": true, - "id": "pinecone_0-input-document-Document" - }, - { - "label": "Embeddings", - "name": "embeddings", - "type": "Embeddings", - "id": "pinecone_0-input-embeddings-Embeddings" - }, - { - "label": "Record Manager", - "name": "recordManager", - "type": "RecordManager", - "description": "Keep track of the record to prevent duplication", - "optional": true, - "id": "pinecone_0-input-recordManager-RecordManager" - } - ], - "inputs": { - "document": "", - "embeddings": "{{openAIEmbeddings_0.data.instance}}", - "recordManager": "", - "pineconeIndex": "", - "pineconeNamespace": "", - "pineconeMetadataFilter": "", - "topK": "", - "searchType": "similarity", - "fetchK": "", - "lambda": "" - }, - "outputAnchors": [ - { - "name": "output", - "label": "Output", - "type": "options", - "description": "", - "options": [ - { - "id": "pinecone_0-output-retriever-Pinecone|VectorStoreRetriever|BaseRetriever", - "name": "retriever", - "label": "Pinecone Retriever", - "description": "", - "type": "Pinecone | VectorStoreRetriever | BaseRetriever" - }, - { - "id": "pinecone_0-output-vectorStore-Pinecone|VectorStore", - "name": "vectorStore", - "label": "Pinecone Vector Store", - "description": "", - "type": "Pinecone | VectorStore" - } - ], - "default": "retriever" - } - ], - "outputs": { - "output": "vectorStore" - }, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 245.707825551803, - "y": -176.9243551667388 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_0", - "position": { - "x": 597.7565040390853, - "y": -381.01461408909825 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_0", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "Runnable"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_0-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_0-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "step": 0.1, - "default": 0.9, - "optional": true, - "id": "chatOpenAI_0-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "step": 1, - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "step": 0.1, - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "step": 0.1, - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "step": 0.1, - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "step": 1, - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_0-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_0-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_0-input-cache-BaseCache" - } - ], - "inputs": { - "cache": "", - "modelName": "gpt-3.5-turbo", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | Runnable" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 597.7565040390853, - "y": -381.01461408909825 - }, - "dragging": false - }, - { - "id": "stickyNote_0", - "position": { - "x": 949.0763123880214, - "y": -172.0310628893923 - }, - "type": "stickyNote", - "data": { - "id": "stickyNote_0", - "label": "Sticky Note", - "version": 2, - "name": "stickyNote", - "type": "StickyNote", - "baseClasses": ["StickyNote"], - "tags": ["Utilities"], - "category": "Utilities", - "description": "Add a sticky note", - "inputParams": [ - { - "label": "", - "name": "note", - "type": "string", - "rows": 1, - "placeholder": "Type something here", - "optional": true, - "id": "stickyNote_0-input-note-string" - } - ], - "inputAnchors": [], - "inputs": { - "note": "BabyAGI is made up of 3 components:\n\n- A chain responsible for creating tasks\n- A chain responsible for prioritising tasks\n- A chain responsible for executing tasks\n\nThese chains are executed in sequence until the task list is empty or the maximum number of iterations is reached" - }, - "outputAnchors": [ - { - "id": "stickyNote_0-output-stickyNote-StickyNote", - "name": "stickyNote", - "label": "StickyNote", - "description": "Add a sticky note", - "type": "StickyNote" - } - ], - "outputs": {}, - "selected": false - }, - "width": 300, - "height": 203, - "selected": false, - "positionAbsolute": { - "x": 949.0763123880214, - "y": -172.0310628893923 - }, - "dragging": false - } - ], - "edges": [ - { - "source": "openAIEmbeddings_0", - "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", - "target": "pinecone_0", - "targetHandle": "pinecone_0-input-embeddings-Embeddings", - "type": "buttonedge", - "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-pinecone_0-pinecone_0-input-embeddings-Embeddings", - "data": { - "label": "" - } - }, - { - "source": "chatOpenAI_0", - "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable", - "target": "babyAGI_1", - "targetHandle": "babyAGI_1-input-model-BaseChatModel", - "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-babyAGI_1-babyAGI_1-input-model-BaseChatModel", - "data": { - "label": "" - } - }, - { - "source": "pinecone_0", - "sourceHandle": "pinecone_0-output-vectorStore-Pinecone|VectorStore", - "target": "babyAGI_1", - "targetHandle": "babyAGI_1-input-vectorStore-VectorStore", - "type": "buttonedge", - "id": "pinecone_0-pinecone_0-output-vectorStore-Pinecone|VectorStore-babyAGI_1-babyAGI_1-input-vectorStore-VectorStore", - "data": { - "label": "" - } - } - ] -} diff --git a/packages/server/marketplaces/chatflows/OpenAPI YAML Agent.json b/packages/server/marketplaces/chatflows/OpenAPI YAML Agent.json deleted file mode 100644 index 2e587a457..000000000 --- a/packages/server/marketplaces/chatflows/OpenAPI YAML Agent.json +++ /dev/null @@ -1,767 +0,0 @@ -{ - "description": "Tool Agent using OpenAPI yaml to automatically decide which API to call, generating url and body request from conversation", - "framework": ["Langchain"], - "usecases": ["Interacting with API"], - "nodes": [ - { - "width": 300, - "height": 544, - "id": "openApiChain_1", - "position": { - "x": 1203.1825726424859, - "y": 300.7226683414998 - }, - "type": "customNode", - "data": { - "id": "openApiChain_1", - "label": "OpenAPI Chain", - "version": 2, - "name": "openApiChain", - "type": "OpenAPIChain", - "baseClasses": ["OpenAPIChain", "BaseChain"], - "category": "Chains", - "description": "Chain that automatically select and call APIs based only on an OpenAPI spec", - "inputParams": [ - { - "label": "YAML Link", - "name": "yamlLink", - "type": "string", - "placeholder": "https://api.speak.com/openapi.yaml", - "description": "If YAML link is provided, uploaded YAML File will be ignored and YAML link will be used instead", - "id": "openApiChain_1-input-yamlLink-string" - }, - { - "label": "YAML File", - "name": "yamlFile", - "type": "file", - "fileType": ".yaml", - "description": "If YAML link is provided, uploaded YAML File will be ignored and YAML link will be used instead", - "id": "openApiChain_1-input-yamlFile-file" - }, - { - "label": "Headers", - "name": "headers", - "type": "json", - "additionalParams": true, - "optional": true, - "id": "openApiChain_1-input-headers-json" - } - ], - "inputAnchors": [ - { - "label": "ChatOpenAI Model", - "name": "model", - "type": "ChatOpenAI", - "id": "openApiChain_1-input-model-ChatOpenAI" - }, - { - "label": "Input Moderation", - "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", - "name": "inputModeration", - "type": "Moderation", - "optional": true, - "list": true, - "id": "openApiChain_1-input-inputModeration-Moderation" - } - ], - "inputs": { - "inputModeration": "", - "model": "{{chatOpenAI_1.data.instance}}", - "yamlLink": "https://gist.githubusercontent.com/HenryHengZJ/b60f416c42cb9bcd3160fe797421119a/raw/0ef05b3aaf142e0423f71c19dec866178487dc10/klarna.yml", - "headers": "" - }, - "outputAnchors": [ - { - "id": "openApiChain_1-output-openApiChain-OpenAPIChain|BaseChain", - "name": "openApiChain", - "label": "OpenAPIChain", - "type": "OpenAPIChain | BaseChain" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1203.1825726424859, - "y": 300.7226683414998 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_1", - "position": { - "x": 792.3201947594027, - "y": 293.61889966751846 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_1", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_1-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_1-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_1-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_1-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_1-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_1-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 792.3201947594027, - "y": 293.61889966751846 - }, - "dragging": false - }, - { - "width": 300, - "height": 603, - "id": "chainTool_0", - "position": { - "x": 1635.3466862861876, - "y": 272.3189405402944 - }, - "type": "customNode", - "data": { - "id": "chainTool_0", - "label": "Chain Tool", - "version": 1, - "name": "chainTool", - "type": "ChainTool", - "baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool"], - "category": "Tools", - "description": "Use a chain as allowed tool for agent", - "inputParams": [ - { - "label": "Chain Name", - "name": "name", - "type": "string", - "placeholder": "state-of-union-qa", - "id": "chainTool_0-input-name-string" - }, - { - "label": "Chain Description", - "name": "description", - "type": "string", - "rows": 3, - "placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.", - "id": "chainTool_0-input-description-string" - }, - { - "label": "Return Direct", - "name": "returnDirect", - "type": "boolean", - "optional": true, - "id": "chainTool_0-input-returnDirect-boolean" - } - ], - "inputAnchors": [ - { - "label": "Base Chain", - "name": "baseChain", - "type": "BaseChain", - "id": "chainTool_0-input-baseChain-BaseChain" - } - ], - "inputs": { - "name": "shopping-qa", - "description": "useful for when you need to search for e-commerce products like shirt, pants, dress, glasses, etc.", - "returnDirect": false, - "baseChain": "{{openApiChain_1.data.instance}}" - }, - "outputAnchors": [ - { - "id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool", - "name": "chainTool", - "label": "ChainTool", - "type": "ChainTool | DynamicTool | Tool | StructuredTool" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1635.3466862861876, - "y": 272.3189405402944 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_2", - "position": { - "x": 1566.5049234393214, - "y": 920.3787183665902 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_2", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_2-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_2-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_2-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_2-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_2-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_2-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_2-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1566.5049234393214, - "y": 920.3787183665902 - }, - "dragging": false - }, - { - "width": 300, - "height": 253, - "id": "bufferMemory_0", - "position": { - "x": 1148.8461056155377, - "y": 967.8215757228843 - }, - "type": "customNode", - "data": { - "id": "bufferMemory_0", - "label": "Buffer Memory", - "version": 2, - "name": "bufferMemory", - "type": "BufferMemory", - "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], - "category": "Memory", - "description": "Retrieve chat messages stored in database", - "inputParams": [ - { - "label": "Session Id", - "name": "sessionId", - "type": "string", - "description": "If not specified, a random id will be used. Learn more", - "default": "", - "additionalParams": true, - "optional": true, - "id": "bufferMemory_0-input-sessionId-string" - }, - { - "label": "Memory Key", - "name": "memoryKey", - "type": "string", - "default": "chat_history", - "additionalParams": true, - "id": "bufferMemory_0-input-memoryKey-string" - } - ], - "inputAnchors": [], - "inputs": { - "sessionId": "", - "memoryKey": "chat_history" - }, - "outputAnchors": [ - { - "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", - "name": "bufferMemory", - "label": "BufferMemory", - "type": "BufferMemory | BaseChatMemory | BaseMemory" - } - ], - "outputs": {}, - "selected": false - }, - "positionAbsolute": { - "x": 1148.8461056155377, - "y": 967.8215757228843 - }, - "selected": false - }, - { - "id": "toolAgent_0", - "position": { - "x": 2054.7555242376347, - "y": 710.4140533942601 - }, - "type": "customNode", - "data": { - "id": "toolAgent_0", - "label": "Tool Agent", - "version": 1, - "name": "toolAgent", - "type": "AgentExecutor", - "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], - "category": "Agents", - "description": "Agent that uses Function Calling to pick the tools and args to call", - "inputParams": [ - { - "label": "System Message", - "name": "systemMessage", - "type": "string", - "default": "You are a helpful AI assistant.", - "rows": 4, - "optional": true, - "additionalParams": true, - "id": "toolAgent_0-input-systemMessage-string" - }, - { - "label": "Max Iterations", - "name": "maxIterations", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "toolAgent_0-input-maxIterations-number" - } - ], - "inputAnchors": [ - { - "label": "Tools", - "name": "tools", - "type": "Tool", - "list": true, - "id": "toolAgent_0-input-tools-Tool" - }, - { - "label": "Memory", - "name": "memory", - "type": "BaseChatMemory", - "id": "toolAgent_0-input-memory-BaseChatMemory" - }, - { - "label": "Tool Calling Chat Model", - "name": "model", - "type": "BaseChatModel", - "description": "Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat", - "id": "toolAgent_0-input-model-BaseChatModel" - }, - { - "label": "Input Moderation", - "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", - "name": "inputModeration", - "type": "Moderation", - "optional": true, - "list": true, - "id": "toolAgent_0-input-inputModeration-Moderation" - } - ], - "inputs": { - "tools": ["{{chainTool_0.data.instance}}"], - "memory": "{{bufferMemory_0.data.instance}}", - "model": "{{chatOpenAI_2.data.instance}}", - "systemMessage": "You are a helpful AI assistant.", - "inputModeration": "", - "maxIterations": "" - }, - "outputAnchors": [ - { - "id": "toolAgent_0-output-toolAgent-AgentExecutor|BaseChain|Runnable", - "name": "toolAgent", - "label": "AgentExecutor", - "description": "Agent that uses Function Calling to pick the tools and args to call", - "type": "AgentExecutor | BaseChain | Runnable" - } - ], - "outputs": {}, - "selected": false - }, - "width": 300, - "height": 435, - "selected": false, - "positionAbsolute": { - "x": 2054.7555242376347, - "y": 710.4140533942601 - }, - "dragging": false - }, - { - "id": "stickyNote_0", - "position": { - "x": 2046.8203973748023, - "y": 399.1483966834255 - }, - "type": "stickyNote", - "data": { - "id": "stickyNote_0", - "label": "Sticky Note", - "version": 2, - "name": "stickyNote", - "type": "StickyNote", - "baseClasses": ["StickyNote"], - "tags": ["Utilities"], - "category": "Utilities", - "description": "Add a sticky note", - "inputParams": [ - { - "label": "", - "name": "note", - "type": "string", - "rows": 1, - "placeholder": "Type something here", - "optional": true, - "id": "stickyNote_0-input-note-string" - } - ], - "inputAnchors": [], - "inputs": { - "note": "Using agent, we give it a tool that is attached to an OpenAPI Chain.\n\nOpenAPI Chain uses a LLM to automatically figure out what is the correct URL and params to call given the YML spec file.\n\nResults are then fetched back to agent.\n\nExample question:\nI am looking for some blue tshirt, can u help me find some?" - }, - "outputAnchors": [ - { - "id": "stickyNote_0-output-stickyNote-StickyNote", - "name": "stickyNote", - "label": "StickyNote", - "description": "Add a sticky note", - "type": "StickyNote" - } - ], - "outputs": {}, - "selected": false - }, - "width": 300, - "height": 284, - "selected": false, - "positionAbsolute": { - "x": 2046.8203973748023, - "y": 399.1483966834255 - }, - "dragging": false - } - ], - "edges": [ - { - "source": "chatOpenAI_1", - "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "openApiChain_1", - "targetHandle": "openApiChain_1-input-model-ChatOpenAI", - "type": "buttonedge", - "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-openApiChain_1-openApiChain_1-input-model-ChatOpenAI", - "data": { - "label": "" - } - }, - { - "source": "openApiChain_1", - "sourceHandle": "openApiChain_1-output-openApiChain-OpenAPIChain|BaseChain", - "target": "chainTool_0", - "targetHandle": "chainTool_0-input-baseChain-BaseChain", - "type": "buttonedge", - "id": "openApiChain_1-openApiChain_1-output-openApiChain-OpenAPIChain|BaseChain-chainTool_0-chainTool_0-input-baseChain-BaseChain", - "data": { - "label": "" - } - }, - { - "source": "chainTool_0", - "sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool", - "target": "toolAgent_0", - "targetHandle": "toolAgent_0-input-tools-Tool", - "type": "buttonedge", - "id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool-toolAgent_0-toolAgent_0-input-tools-Tool" - }, - { - "source": "chatOpenAI_2", - "sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "toolAgent_0", - "targetHandle": "toolAgent_0-input-model-BaseChatModel", - "type": "buttonedge", - "id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-toolAgent_0-toolAgent_0-input-model-BaseChatModel" - }, - { - "source": "bufferMemory_0", - "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", - "target": "toolAgent_0", - "targetHandle": "toolAgent_0-input-memory-BaseChatMemory", - "type": "buttonedge", - "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-toolAgent_0-toolAgent_0-input-memory-BaseChatMemory" - } - ] -} diff --git a/packages/server/marketplaces/chatflows/WebBrowser.json b/packages/server/marketplaces/chatflows/WebBrowser.json deleted file mode 100644 index 68ce1b222..000000000 --- a/packages/server/marketplaces/chatflows/WebBrowser.json +++ /dev/null @@ -1,773 +0,0 @@ -{ - "description": "Conversational Agent with ability to visit a website and extract information", - "usecases": ["Agent"], - "framework": ["Langchain"], - "nodes": [ - { - "width": 300, - "height": 253, - "id": "bufferMemory_0", - "position": { - "x": 457.04304716743604, - "y": 362.4048129799687 - }, - "type": "customNode", - "data": { - "id": "bufferMemory_0", - "label": "Buffer Memory", - "version": 2, - "name": "bufferMemory", - "type": "BufferMemory", - "baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"], - "category": "Memory", - "description": "Retrieve chat messages stored in database", - "inputParams": [ - { - "label": "Session Id", - "name": "sessionId", - "type": "string", - "description": "If not specified, a random id will be used. Learn more", - "default": "", - "additionalParams": true, - "optional": true, - "id": "bufferMemory_0-input-sessionId-string" - }, - { - "label": "Memory Key", - "name": "memoryKey", - "type": "string", - "default": "chat_history", - "additionalParams": true, - "id": "bufferMemory_0-input-memoryKey-string" - } - ], - "inputAnchors": [], - "inputs": { - "sessionId": "", - "memoryKey": "chat_history" - }, - "outputAnchors": [ - { - "id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", - "name": "bufferMemory", - "label": "BufferMemory", - "type": "BufferMemory | BaseChatMemory | BaseMemory" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 457.04304716743604, - "y": 362.4048129799687 - }, - "dragging": false - }, - { - "width": 300, - "height": 281, - "id": "webBrowser_0", - "position": { - "x": 1091.0866823400172, - "y": -16.43806989958216 - }, - "type": "customNode", - "data": { - "id": "webBrowser_0", - "label": "Web Browser", - "version": 1, - "name": "webBrowser", - "type": "WebBrowser", - "baseClasses": ["WebBrowser", "Tool", "StructuredTool", "BaseLangChain"], - "category": "Tools", - "description": "Gives agent the ability to visit a website and extract information", - "inputParams": [], - "inputAnchors": [ - { - "label": "Language Model", - "name": "model", - "type": "BaseLanguageModel", - "id": "webBrowser_0-input-model-BaseLanguageModel" - }, - { - "label": "Embeddings", - "name": "embeddings", - "type": "Embeddings", - "id": "webBrowser_0-input-embeddings-Embeddings" - } - ], - "inputs": { - "model": "{{chatOpenAI_0.data.instance}}", - "embeddings": "{{openAIEmbeddings_0.data.instance}}" - }, - "outputAnchors": [ - { - "id": "webBrowser_0-output-webBrowser-WebBrowser|Tool|StructuredTool|BaseLangChain", - "name": "webBrowser", - "label": "WebBrowser", - "type": "WebBrowser | Tool | StructuredTool | BaseLangChain" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1091.0866823400172, - "y": -16.43806989958216 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_0", - "position": { - "x": 741.9540879250319, - "y": -534.6535148852278 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_0", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_0-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_0-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_0-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_0-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_0-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_0-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_0-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 741.9540879250319, - "y": -534.6535148852278 - }, - "dragging": false - }, - { - "width": 300, - "height": 424, - "id": "openAIEmbeddings_0", - "position": { - "x": 403.72014625628697, - "y": -103.82540449681527 - }, - "type": "customNode", - "data": { - "id": "openAIEmbeddings_0", - "label": "OpenAI Embeddings", - "version": 4, - "name": "openAIEmbeddings", - "type": "OpenAIEmbeddings", - "baseClasses": ["OpenAIEmbeddings", "Embeddings"], - "category": "Embeddings", - "description": "OpenAI API to generate embeddings for a given text", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "openAIEmbeddings_0-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "text-embedding-ada-002", - "id": "openAIEmbeddings_0-input-modelName-asyncOptions" - }, - { - "label": "Strip New Lines", - "name": "stripNewLines", - "type": "boolean", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-stripNewLines-boolean" - }, - { - "label": "Batch Size", - "name": "batchSize", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-batchSize-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-basepath-string" - }, - { - "label": "Dimensions", - "name": "dimensions", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "openAIEmbeddings_0-input-dimensions-number" - } - ], - "inputAnchors": [], - "inputs": { - "modelName": "text-embedding-ada-002", - "stripNewLines": "", - "batchSize": "", - "timeout": "", - "basepath": "", - "dimensions": "" - }, - "outputAnchors": [ - { - "id": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", - "name": "openAIEmbeddings", - "label": "OpenAIEmbeddings", - "description": "OpenAI API to generate embeddings for a given text", - "type": "OpenAIEmbeddings | Embeddings" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 403.72014625628697, - "y": -103.82540449681527 - }, - "dragging": false - }, - { - "width": 300, - "height": 670, - "id": "chatOpenAI_1", - "position": { - "x": 68.312124033115, - "y": -239.65476709991256 - }, - "type": "customNode", - "data": { - "id": "chatOpenAI_1", - "label": "ChatOpenAI", - "version": 6, - "name": "chatOpenAI", - "type": "ChatOpenAI", - "baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"], - "category": "Chat Models", - "description": "Wrapper around OpenAI large language models that use the Chat endpoint", - "inputParams": [ - { - "label": "Connect Credential", - "name": "credential", - "type": "credential", - "credentialNames": ["openAIApi"], - "id": "chatOpenAI_1-input-credential-credential" - }, - { - "label": "Model Name", - "name": "modelName", - "type": "asyncOptions", - "loadMethod": "listModels", - "default": "gpt-3.5-turbo", - "id": "chatOpenAI_1-input-modelName-options" - }, - { - "label": "Temperature", - "name": "temperature", - "type": "number", - "default": 0.9, - "optional": true, - "id": "chatOpenAI_1-input-temperature-number" - }, - { - "label": "Max Tokens", - "name": "maxTokens", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-maxTokens-number" - }, - { - "label": "Top Probability", - "name": "topP", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-topP-number" - }, - { - "label": "Frequency Penalty", - "name": "frequencyPenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-frequencyPenalty-number" - }, - { - "label": "Presence Penalty", - "name": "presencePenalty", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-presencePenalty-number" - }, - { - "label": "Timeout", - "name": "timeout", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-timeout-number" - }, - { - "label": "BasePath", - "name": "basepath", - "type": "string", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-basepath-string" - }, - { - "label": "BaseOptions", - "name": "baseOptions", - "type": "json", - "optional": true, - "additionalParams": true, - "id": "chatOpenAI_1-input-baseOptions-json" - }, - { - "label": "Allow Image Uploads", - "name": "allowImageUploads", - "type": "boolean", - "description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent", - "default": false, - "optional": true, - "id": "chatOpenAI_1-input-allowImageUploads-boolean" - }, - { - "label": "Image Resolution", - "description": "This parameter controls the resolution in which the model views the image.", - "name": "imageResolution", - "type": "options", - "options": [ - { - "label": "Low", - "name": "low" - }, - { - "label": "High", - "name": "high" - }, - { - "label": "Auto", - "name": "auto" - } - ], - "default": "low", - "optional": false, - "additionalParams": true, - "id": "chatOpenAI_1-input-imageResolution-options" - } - ], - "inputAnchors": [ - { - "label": "Cache", - "name": "cache", - "type": "BaseCache", - "optional": true, - "id": "chatOpenAI_1-input-cache-BaseCache" - } - ], - "inputs": { - "modelName": "gpt-3.5-turbo-16k", - "temperature": 0.9, - "maxTokens": "", - "topP": "", - "frequencyPenalty": "", - "presencePenalty": "", - "timeout": "", - "basepath": "", - "baseOptions": "", - "allowImageUploads": true, - "imageResolution": "low" - }, - "outputAnchors": [ - { - "id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "name": "chatOpenAI", - "label": "ChatOpenAI", - "type": "ChatOpenAI | BaseChatModel | BaseLanguageModel" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 68.312124033115, - "y": -239.65476709991256 - }, - "dragging": false - }, - { - "width": 300, - "height": 435, - "id": "conversationalAgent_0", - "position": { - "x": 1518.944765840293, - "y": 212.2513364217197 - }, - "type": "customNode", - "data": { - "id": "conversationalAgent_0", - "label": "Conversational Agent", - "version": 3, - "name": "conversationalAgent", - "type": "AgentExecutor", - "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], - "category": "Agents", - "description": "Conversational agent for a chat model. It will utilize chat specific prompts", - "inputParams": [ - { - "label": "System Message", - "name": "systemMessage", - "type": "string", - "rows": 4, - "default": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.", - "optional": true, - "additionalParams": true, - "id": "conversationalAgent_0-input-systemMessage-string" - }, - { - "label": "Max Iterations", - "name": "maxIterations", - "type": "number", - "optional": true, - "additionalParams": true, - "id": "conversationalAgent_0-input-maxIterations-number" - } - ], - "inputAnchors": [ - { - "label": "Allowed Tools", - "name": "tools", - "type": "Tool", - "list": true, - "id": "conversationalAgent_0-input-tools-Tool" - }, - { - "label": "Chat Model", - "name": "model", - "type": "BaseChatModel", - "id": "conversationalAgent_0-input-model-BaseChatModel" - }, - { - "label": "Memory", - "name": "memory", - "type": "BaseChatMemory", - "id": "conversationalAgent_0-input-memory-BaseChatMemory" - }, - { - "label": "Input Moderation", - "description": "Detect text that could generate harmful output and prevent it from being sent to the language model", - "name": "inputModeration", - "type": "Moderation", - "optional": true, - "list": true, - "id": "conversationalAgent_0-input-inputModeration-Moderation" - } - ], - "inputs": { - "inputModeration": "", - "tools": ["{{webBrowser_0.data.instance}}"], - "model": "{{chatOpenAI_1.data.instance}}", - "memory": "{{bufferMemory_0.data.instance}}", - "systemMessage": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist." - }, - "outputAnchors": [ - { - "id": "conversationalAgent_0-output-conversationalAgent-AgentExecutor|BaseChain|Runnable", - "name": "conversationalAgent", - "label": "AgentExecutor", - "type": "AgentExecutor | BaseChain | Runnable" - } - ], - "outputs": {}, - "selected": false - }, - "selected": false, - "positionAbsolute": { - "x": 1518.944765840293, - "y": 212.2513364217197 - }, - "dragging": false - }, - { - "id": "stickyNote_0", - "position": { - "x": 1086.284843942572, - "y": -110.93321070573408 - }, - "type": "stickyNote", - "data": { - "id": "stickyNote_0", - "label": "Sticky Note", - "version": 2, - "name": "stickyNote", - "type": "StickyNote", - "baseClasses": ["StickyNote"], - "tags": ["Utilities"], - "category": "Utilities", - "description": "Add a sticky note", - "inputParams": [ - { - "label": "", - "name": "note", - "type": "string", - "rows": 1, - "placeholder": "Type something here", - "optional": true, - "id": "stickyNote_0-input-note-string" - } - ], - "inputAnchors": [], - "inputs": { - "note": "Web Browser Tool gives agent the ability to visit a website and extract information" - }, - "outputAnchors": [ - { - "id": "stickyNote_0-output-stickyNote-StickyNote", - "name": "stickyNote", - "label": "StickyNote", - "description": "Add a sticky note", - "type": "StickyNote" - } - ], - "outputs": {}, - "selected": false - }, - "width": 300, - "height": 62, - "selected": false, - "positionAbsolute": { - "x": 1086.284843942572, - "y": -110.93321070573408 - }, - "dragging": false - } - ], - "edges": [ - { - "source": "openAIEmbeddings_0", - "sourceHandle": "openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings", - "target": "webBrowser_0", - "targetHandle": "webBrowser_0-input-embeddings-Embeddings", - "type": "buttonedge", - "id": "openAIEmbeddings_0-openAIEmbeddings_0-output-openAIEmbeddings-OpenAIEmbeddings|Embeddings-webBrowser_0-webBrowser_0-input-embeddings-Embeddings", - "data": { - "label": "" - } - }, - { - "source": "chatOpenAI_0", - "sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "webBrowser_0", - "targetHandle": "webBrowser_0-input-model-BaseLanguageModel", - "type": "buttonedge", - "id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-webBrowser_0-webBrowser_0-input-model-BaseLanguageModel", - "data": { - "label": "" - } - }, - { - "source": "webBrowser_0", - "sourceHandle": "webBrowser_0-output-webBrowser-WebBrowser|Tool|StructuredTool|BaseLangChain", - "target": "conversationalAgent_0", - "targetHandle": "conversationalAgent_0-input-tools-Tool", - "type": "buttonedge", - "id": "webBrowser_0-webBrowser_0-output-webBrowser-WebBrowser|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool", - "data": { - "label": "" - } - }, - { - "source": "chatOpenAI_1", - "sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel", - "target": "conversationalAgent_0", - "targetHandle": "conversationalAgent_0-input-model-BaseChatModel", - "type": "buttonedge", - "id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalAgent_0-conversationalAgent_0-input-model-BaseChatModel", - "data": { - "label": "" - } - }, - { - "source": "bufferMemory_0", - "sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory", - "target": "conversationalAgent_0", - "targetHandle": "conversationalAgent_0-input-memory-BaseChatMemory", - "type": "buttonedge", - "id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationalAgent_0-conversationalAgent_0-input-memory-BaseChatMemory", - "data": { - "label": "" - } - } - ] -} diff --git a/packages/server/nodemon.json b/packages/server/nodemon.json index d4688a54d..d9f26ec57 100644 --- a/packages/server/nodemon.json +++ b/packages/server/nodemon.json @@ -1,6 +1,6 @@ { "ignore": ["**/*.spec.ts", ".git", "node_modules"], - "watch": ["commands", "index.ts", "src", "../components/nodes", "../components/src"], + "watch": ["commands", "index.ts", "src"], "exec": "pnpm start", "ext": "ts" } diff --git a/packages/server/package.json b/packages/server/package.json index ad002dc1c..2075f3949 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "2.2.5", + "version": "3.0.0", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", @@ -28,7 +28,7 @@ "start:default": "cd bin && ./run start", "start-worker:windows": "cd bin && run worker", "start-worker:default": "cd bin && ./run worker", - "dev": "tsc-watch --noClear -p ./tsconfig.json --onSuccess \"pnpm start\"", + "dev": "nodemon", "oclif-dev": "run-script-os", "oclif-dev:windows": "cd bin && dev start", "oclif-dev:default": "cd bin && ./dev start", @@ -57,6 +57,7 @@ "license": "SEE LICENSE IN LICENSE.md", "dependencies": { "@aws-sdk/client-secrets-manager": "^3.699.0", + "@google-cloud/logging-winston": "^6.0.0", "@oclif/core": "4.0.7", "@opentelemetry/api": "^1.3.0", "@opentelemetry/auto-instrumentations-node": "^0.52.0", @@ -75,9 +76,9 @@ "@types/lodash": "^4.14.202", "@types/uuid": "^9.0.7", "async-mutex": "^0.4.0", - "axios": "1.6.2", + "axios": "1.7.9", "bull-board": "^2.1.3", - "bullmq": "^5.13.2", + "bullmq": "^5.42.0", "content-disposition": "0.5.4", "cors": "^2.8.5", "crypto-js": "^4.1.1", @@ -95,9 +96,11 @@ "moment": "^2.29.3", "moment-timezone": "^0.5.34", "multer": "^1.4.5-lts.1", + "multer-cloud-storage": "^4.0.0", "multer-s3": "^3.0.1", "mysql2": "^3.11.3", - "openai": "^4.82.0", + "flowise-nim-container-manager": "^1.0.11", + "openai": "^4.96.0", "pg": "^8.11.1", "posthog-node": "^3.5.0", "prom-client": "^15.1.3", @@ -106,6 +109,7 @@ "s3-streamlogger": "^1.11.0", "sanitize-html": "^2.11.0", "sqlite3": "^5.1.6", + "turndown": "^7.2.0", "typeorm": "^0.3.6", "uuid": "^9.0.1", "winston": "^3.9.0" @@ -117,6 +121,7 @@ "@types/multer": "^1.4.7", "@types/multer-s3": "^3.0.3", "@types/sanitize-html": "^2.9.5", + "@types/turndown": "^5.0.5", "concurrently": "^7.1.0", "cypress": "^13.13.0", "nodemon": "^2.0.22", diff --git a/packages/server/src/CachePool.ts b/packages/server/src/CachePool.ts index b8662a8e9..e978d89de 100644 --- a/packages/server/src/CachePool.ts +++ b/packages/server/src/CachePool.ts @@ -12,7 +12,12 @@ export class CachePool { constructor() { if (process.env.MODE === MODE.QUEUE) { if (process.env.REDIS_URL) { - this.redisClient = new Redis(process.env.REDIS_URL) + this.redisClient = new Redis(process.env.REDIS_URL, { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + }) } else { this.redisClient = new Redis({ host: process.env.REDIS_HOST || 'localhost', @@ -26,6 +31,10 @@ export class CachePool { key: process.env.REDIS_KEY ? Buffer.from(process.env.REDIS_KEY, 'base64') : undefined, ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined } + : undefined, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) : undefined }) } diff --git a/packages/server/src/Interface.DocumentStore.ts b/packages/server/src/Interface.DocumentStore.ts index e882e0052..34d92978f 100644 --- a/packages/server/src/Interface.DocumentStore.ts +++ b/packages/server/src/Interface.DocumentStore.ts @@ -188,6 +188,7 @@ export const addLoaderSource = (loader: IDocumentStoreLoader, isGetFileNameOnly switch (loader.loaderId) { case 'pdfFile': + case 'docxFile': case 'jsonFile': case 'csvFile': case 'file': diff --git a/packages/server/src/Interface.ts b/packages/server/src/Interface.ts index fbf4ea129..7865727ce 100644 --- a/packages/server/src/Interface.ts +++ b/packages/server/src/Interface.ts @@ -2,8 +2,10 @@ import { IAction, ICommonObject, IFileUpload, + IHumanInput, INode, INodeData as INodeDataFromComponent, + INodeExecutionData, INodeParams, IServerSideEventStreamer } from 'flowise-components' @@ -13,10 +15,12 @@ import { Telemetry } from './utils/telemetry' export type MessageType = 'apiMessage' | 'userMessage' -export type ChatflowType = 'CHATFLOW' | 'MULTIAGENT' | 'ASSISTANT' +export type ChatflowType = 'CHATFLOW' | 'MULTIAGENT' | 'ASSISTANT' | 'AGENTFLOW' export type AssistantType = 'CUSTOM' | 'OPENAI' | 'AZURE' +export type ExecutionState = 'INPROGRESS' | 'FINISHED' | 'ERROR' | 'TERMINATED' | 'TIMEOUT' | 'STOPPED' + export enum MODE { QUEUE = 'queue', MAIN = 'main' @@ -57,6 +61,7 @@ export interface IChatMessage { role: MessageType content: string chatflowid: string + executionId?: string sourceDocuments?: string usedTools?: string fileAnnotations?: string @@ -140,6 +145,19 @@ export interface IUpsertHistory { date: Date } +export interface IExecution { + id: string + executionData: string + state: ExecutionState + agentflowId: string + sessionId: string + isPublic?: boolean + action?: string + createdDate: Date + updatedDate: Date + stoppedDate: Date +} + export interface IComponentNodes { [key: string]: INode } @@ -187,6 +205,8 @@ export interface IReactFlowNode { height: number selected: boolean dragging: boolean + parentNode?: string + extent?: string } export interface IReactFlowEdge { @@ -227,6 +247,14 @@ export interface IDepthQueue { [key: string]: number } +export interface IAgentflowExecutedData { + nodeLabel: string + nodeId: string + data: INodeExecutionData + previousNodeIds: string[] + status?: ExecutionState +} + export interface IMessage { message: string type: MessageType @@ -238,6 +266,7 @@ export interface IncomingInput { question: string overrideConfig?: ICommonObject chatId?: string + sessionId?: string stopNodeId?: string uploads?: IFileUpload[] leadEmail?: string @@ -246,6 +275,12 @@ export interface IncomingInput { streaming?: boolean } +export interface IncomingAgentflowInput extends Omit { + question?: string + form?: Record + humanInput?: IHumanInput +} + export interface IActiveChatflows { [key: string]: { startingNodes: IReactFlowNode[] @@ -266,6 +301,7 @@ export interface IOverrideConfig { label: string name: string type: string + schema?: ICommonObject[] } export type ICredentialDataDecrypted = ICommonObject @@ -315,6 +351,8 @@ export interface IFlowConfig { chatHistory: IMessage[] apiMessageId: string overrideConfig?: ICommonObject + state?: ICommonObject + runtimeChatHistoryLength?: number } export interface IPredictionQueueAppServer { @@ -333,7 +371,13 @@ export interface IExecuteFlowParams extends IPredictionQueueAppServer { isInternal: boolean signal?: AbortController files?: Express.Multer.File[] + fileUploads?: IFileUpload[] + uploadedFilesContent?: string isUpsert?: boolean + isRecursive?: boolean + parentExecutionId?: string + iterationContext?: ICommonObject + isTool?: boolean } export interface INodeOverrides { diff --git a/packages/server/src/commands/base.ts b/packages/server/src/commands/base.ts index 5bed81e56..9f0cd46f2 100644 --- a/packages/server/src/commands/base.ts +++ b/packages/server/src/commands/base.ts @@ -40,7 +40,6 @@ export abstract class BaseCommand extends Command { LANGCHAIN_ENDPOINT: Flags.string(), LANGCHAIN_API_KEY: Flags.string(), LANGCHAIN_PROJECT: Flags.string(), - DISABLE_FLOWISE_TELEMETRY: Flags.string(), MODEL_LIST_CONFIG_JSON: Flags.string(), STORAGE_TYPE: Flags.string(), S3_STORAGE_BUCKET_NAME: Flags.string(), @@ -49,6 +48,10 @@ export abstract class BaseCommand extends Command { S3_STORAGE_REGION: Flags.string(), S3_ENDPOINT_URL: Flags.string(), S3_FORCE_PATH_STYLE: Flags.string(), + GOOGLE_CLOUD_STORAGE_CREDENTIAL: Flags.string(), + GOOGLE_CLOUD_STORAGE_PROJ_ID: Flags.string(), + GOOGLE_CLOUD_STORAGE_BUCKET_NAME: Flags.string(), + GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS: Flags.string(), SHOW_COMMUNITY_NODES: Flags.string(), SECRETKEY_STORAGE_TYPE: Flags.string(), SECRETKEY_PATH: Flags.string(), @@ -61,6 +64,8 @@ export abstract class BaseCommand extends Command { WORKER_CONCURRENCY: Flags.string(), QUEUE_NAME: Flags.string(), QUEUE_REDIS_EVENT_STREAM_MAX_LEN: Flags.string(), + REMOVE_ON_AGE: Flags.string(), + REMOVE_ON_COUNT: Flags.string(), REDIS_URL: Flags.string(), REDIS_HOST: Flags.string(), REDIS_PORT: Flags.string(), @@ -69,7 +74,9 @@ export abstract class BaseCommand extends Command { REDIS_TLS: Flags.string(), REDIS_CERT: Flags.string(), REDIS_KEY: Flags.string(), - REDIS_CA: Flags.string() + REDIS_CA: Flags.string(), + REDIS_KEEP_ALIVE: Flags.string(), + ENABLE_BULLMQ_DASHBOARD: Flags.string() } protected async stopProcess() { @@ -167,9 +174,6 @@ export abstract class BaseCommand extends Command { if (flags.LANGCHAIN_API_KEY) process.env.LANGCHAIN_API_KEY = flags.LANGCHAIN_API_KEY if (flags.LANGCHAIN_PROJECT) process.env.LANGCHAIN_PROJECT = flags.LANGCHAIN_PROJECT - // Telemetry - if (flags.DISABLE_FLOWISE_TELEMETRY) process.env.DISABLE_FLOWISE_TELEMETRY = flags.DISABLE_FLOWISE_TELEMETRY - // Model list config if (flags.MODEL_LIST_CONFIG_JSON) process.env.MODEL_LIST_CONFIG_JSON = flags.MODEL_LIST_CONFIG_JSON @@ -182,6 +186,11 @@ export abstract class BaseCommand extends Command { if (flags.S3_STORAGE_REGION) process.env.S3_STORAGE_REGION = flags.S3_STORAGE_REGION if (flags.S3_ENDPOINT_URL) process.env.S3_ENDPOINT_URL = flags.S3_ENDPOINT_URL if (flags.S3_FORCE_PATH_STYLE) process.env.S3_FORCE_PATH_STYLE = flags.S3_FORCE_PATH_STYLE + if (flags.GOOGLE_CLOUD_STORAGE_CREDENTIAL) process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL = flags.GOOGLE_CLOUD_STORAGE_CREDENTIAL + if (flags.GOOGLE_CLOUD_STORAGE_PROJ_ID) process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID = flags.GOOGLE_CLOUD_STORAGE_PROJ_ID + if (flags.GOOGLE_CLOUD_STORAGE_BUCKET_NAME) process.env.GOOGLE_CLOUD_STORAGE_BUCKET_NAME = flags.GOOGLE_CLOUD_STORAGE_BUCKET_NAME + if (flags.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS) + process.env.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS = flags.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS // Queue if (flags.MODE) process.env.MODE = flags.MODE @@ -196,6 +205,10 @@ export abstract class BaseCommand extends Command { if (flags.REDIS_CA) process.env.REDIS_CA = flags.REDIS_CA if (flags.WORKER_CONCURRENCY) process.env.WORKER_CONCURRENCY = flags.WORKER_CONCURRENCY if (flags.QUEUE_NAME) process.env.QUEUE_NAME = flags.QUEUE_NAME - if (flags.QUEUE_REDIS_EVENT_STREAM_MAX_LEN) process.env.QUEUE_REDIS_EVENT_STREAM_MAX_LEN = flags.QUEUE_REDIS_EVENT_STREAM + if (flags.QUEUE_REDIS_EVENT_STREAM_MAX_LEN) process.env.QUEUE_REDIS_EVENT_STREAM_MAX_LEN = flags.QUEUE_REDIS_EVENT_STREAM_MAX_LEN + if (flags.REMOVE_ON_AGE) process.env.REMOVE_ON_AGE = flags.REMOVE_ON_AGE + if (flags.REMOVE_ON_COUNT) process.env.REMOVE_ON_COUNT = flags.REMOVE_ON_COUNT + if (flags.REDIS_KEEP_ALIVE) process.env.REDIS_KEEP_ALIVE = flags.REDIS_KEEP_ALIVE + if (flags.ENABLE_BULLMQ_DASHBOARD) process.env.ENABLE_BULLMQ_DASHBOARD = flags.ENABLE_BULLMQ_DASHBOARD } } diff --git a/packages/server/src/controllers/agentflowv2-generator/index.ts b/packages/server/src/controllers/agentflowv2-generator/index.ts new file mode 100644 index 000000000..7d4100403 --- /dev/null +++ b/packages/server/src/controllers/agentflowv2-generator/index.ts @@ -0,0 +1,18 @@ +import { Request, Response, NextFunction } from 'express' +import agentflowv2Service from '../../services/agentflowv2-generator' + +const generateAgentflowv2 = async (req: Request, res: Response, next: NextFunction) => { + try { + if (!req.body.question || !req.body.selectedChatModel) { + throw new Error('Question and selectedChatModel are required') + } + const apiResponse = await agentflowv2Service.generateAgentflowv2(req.body.question, req.body.selectedChatModel) + return res.json(apiResponse) + } catch (error) { + next(error) + } +} + +export default { + generateAgentflowv2 +} diff --git a/packages/server/src/controllers/documentstore/index.ts b/packages/server/src/controllers/documentstore/index.ts index ccf451ac9..36b1402e1 100644 --- a/packages/server/src/controllers/documentstore/index.ts +++ b/packages/server/src/controllers/documentstore/index.ts @@ -201,7 +201,8 @@ const processLoader = async (req: Request, res: Response, next: NextFunction) => } const docLoaderId = req.params.loaderId const body = req.body - const apiResponse = await documentStoreService.processLoaderMiddleware(body, docLoaderId) + const isInternalRequest = req.headers['x-request-from'] === 'internal' + const apiResponse = await documentStoreService.processLoaderMiddleware(body, docLoaderId, isInternalRequest) return res.json(apiResponse) } catch (error) { next(error) @@ -334,8 +335,7 @@ const saveVectorStoreConfig = async (req: Request, res: Response, next: NextFunc } const body = req.body const appDataSource = getRunningExpressApp().AppDataSource - const componentNodes = getRunningExpressApp().nodesPool.componentNodes - const apiResponse = await documentStoreService.saveVectorStoreConfig(appDataSource, componentNodes, body) + const apiResponse = await documentStoreService.saveVectorStoreConfig(appDataSource, body) return res.json(apiResponse) } catch (error) { next(error) diff --git a/packages/server/src/controllers/executions/index.ts b/packages/server/src/controllers/executions/index.ts new file mode 100644 index 000000000..85ba3c729 --- /dev/null +++ b/packages/server/src/controllers/executions/index.ts @@ -0,0 +1,114 @@ +import { Request, Response, NextFunction } from 'express' +import executionsService from '../../services/executions' +import { ExecutionState } from '../../Interface' + +const getExecutionById = async (req: Request, res: Response, next: NextFunction) => { + try { + const executionId = req.params.id + const execution = await executionsService.getExecutionById(executionId) + return res.json(execution) + } catch (error) { + next(error) + } +} + +const getPublicExecutionById = async (req: Request, res: Response, next: NextFunction) => { + try { + const executionId = req.params.id + const execution = await executionsService.getPublicExecutionById(executionId) + return res.json(execution) + } catch (error) { + next(error) + } +} + +const updateExecution = async (req: Request, res: Response, next: NextFunction) => { + try { + const executionId = req.params.id + const execution = await executionsService.updateExecution(executionId, req.body) + return res.json(execution) + } catch (error) { + next(error) + } +} + +const getAllExecutions = async (req: Request, res: Response, next: NextFunction) => { + try { + // Extract all possible filters from query params + const filters: any = {} + + // ID filter + if (req.query.id) filters.id = req.query.id as string + + // Flow and session filters + if (req.query.agentflowId) filters.agentflowId = req.query.agentflowId as string + if (req.query.sessionId) filters.sessionId = req.query.sessionId as string + + // State filter + if (req.query.state) { + const stateValue = req.query.state as string + if (['INPROGRESS', 'FINISHED', 'ERROR', 'TERMINATED', 'TIMEOUT', 'STOPPED'].includes(stateValue)) { + filters.state = stateValue as ExecutionState + } + } + + // Date filters + if (req.query.startDate) { + filters.startDate = new Date(req.query.startDate as string) + } + + if (req.query.endDate) { + filters.endDate = new Date(req.query.endDate as string) + } + + // Pagination + if (req.query.page) { + filters.page = parseInt(req.query.page as string, 10) + } + + if (req.query.limit) { + filters.limit = parseInt(req.query.limit as string, 10) + } + + const apiResponse = await executionsService.getAllExecutions(filters) + + return res.json(apiResponse) + } catch (error) { + next(error) + } +} + +/** + * Delete multiple executions by their IDs + * If a single ID is provided in the URL params, it will delete that execution + * If an array of IDs is provided in the request body, it will delete all those executions + */ +const deleteExecutions = async (req: Request, res: Response, next: NextFunction) => { + try { + let executionIds: string[] = [] + + // Check if we're deleting a single execution from URL param + if (req.params.id) { + executionIds = [req.params.id] + } + // Check if we're deleting multiple executions from request body + else if (req.body.executionIds && Array.isArray(req.body.executionIds)) { + executionIds = req.body.executionIds + } else { + return res.status(400).json({ success: false, message: 'No execution IDs provided' }) + } + + const result = await executionsService.deleteExecutions(executionIds) + return res.json(result) + } catch (error) { + next(error) + } +} + +export default { + getAllExecutions, + deleteExecutions, + getExecutionById, + getPublicExecutionById, + updateExecution +} diff --git a/packages/server/src/controllers/feedback/index.ts b/packages/server/src/controllers/feedback/index.ts index 936a3b879..a7286cf15 100644 --- a/packages/server/src/controllers/feedback/index.ts +++ b/packages/server/src/controllers/feedback/index.ts @@ -1,5 +1,6 @@ import { Request, Response, NextFunction } from 'express' import feedbackService from '../../services/feedback' +import { validateFeedbackForCreation, validateFeedbackForUpdate } from '../../services/feedback/validation' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { StatusCodes } from 'http-status-codes' @@ -31,6 +32,7 @@ const createChatMessageFeedbackForChatflow = async (req: Request, res: Response, `Error: feedbackController.createChatMessageFeedbackForChatflow - body not provided!` ) } + await validateFeedbackForCreation(req.body) const apiResponse = await feedbackService.createChatMessageFeedbackForChatflow(req.body) return res.json(apiResponse) } catch (error) { @@ -52,6 +54,7 @@ const updateChatMessageFeedbackForChatflow = async (req: Request, res: Response, `Error: feedbackController.updateChatMessageFeedbackForChatflow - id not provided!` ) } + await validateFeedbackForUpdate(req.params.id, req.body) const apiResponse = await feedbackService.updateChatMessageFeedbackForChatflow(req.params.id, req.body) return res.json(apiResponse) } catch (error) { diff --git a/packages/server/src/controllers/nvidia-nim/index.ts b/packages/server/src/controllers/nvidia-nim/index.ts new file mode 100644 index 000000000..54bc4a3f6 --- /dev/null +++ b/packages/server/src/controllers/nvidia-nim/index.ts @@ -0,0 +1,148 @@ +import axios from 'axios' +import { NextFunction, Request, Response } from 'express' + +const { NimContainerManager } = require('flowise-nim-container-manager') + +const getToken = async (req: Request, res: Response, next: NextFunction) => { + try { + const headers = { + 'Content-Type': 'application/json', + Accept: 'application/json' + } + const data = { + client_id: 'Flowise', + pdi: '0x1234567890abcdeg', + access_policy_name: 'nim-dev' + } + const response = await axios.post('https://nts.ngc.nvidia.com/v1/token', data, { headers }) + const responseJson = response.data + return res.json(responseJson) + } catch (error) { + next(error) + } +} + +const preload = async (req: Request, res: Response, next: NextFunction) => { + try { + await NimContainerManager.preload() + return res.send('Preloaded NIM') + } catch (error) { + next(error) + } +} + +const downloadInstaller = async (req: Request, res: Response, next: NextFunction) => { + try { + await NimContainerManager.downloadInstaller() + return res.send('NIM Installer completed successfully!') + } catch (error) { + next(error) + } +} + +const pullImage = async (req: Request, res: Response, next: NextFunction) => { + try { + const imageTag = req.body.imageTag + const apiKey = req.body.apiKey + await NimContainerManager.pullImage(imageTag, apiKey) + return res.send(`Pulling image ${imageTag}`) + } catch (error) { + next(error) + } +} + +const startContainer = async (req: Request, res: Response, next: NextFunction) => { + try { + const imageTag = req.body.imageTag + const apiKey = req.body.apiKey + const hostPort = req.body.hostPort + const nimRelaxMemConstraints = parseInt(req.body.nimRelaxMemConstraints) + // Validate nimRelaxMemConstraints + if (isNaN(nimRelaxMemConstraints) || (nimRelaxMemConstraints !== 0 && nimRelaxMemConstraints !== 1)) { + return res.status(400).send('nimRelaxMemConstraints must be 0 or 1') + } + await NimContainerManager.startContainer(imageTag, apiKey, hostPort, nimRelaxMemConstraints) + return res.send(`Starting container ${imageTag}`) + } catch (error) { + next(error) + } +} + +const getImage = async (req: Request, res: Response, next: NextFunction) => { + try { + const imageTag = req.body.imageTag + const images = await NimContainerManager.userImageLibrary() + const image = images.find((img: any) => img.tag === imageTag) + if (!image) { + return res.status(404).send(`Image ${imageTag} not found`) + } + return res.json(image) + } catch (error) { + next(error) + } +} + +const getContainer = async (req: Request, res: Response, next: NextFunction) => { + try { + const imageTag = req.body.imageTag + const port = req.body.port + + // First check if the image exists + const images = await NimContainerManager.userImageLibrary() + const image = images.find((img: any) => img.tag === imageTag) + if (!image) { + return res.status(404).send(`Image ${imageTag} not found`) + } + + const containers = await NimContainerManager.listRunningContainers() + const portInUse = containers.find((cont: any) => cont.port === port) + if (portInUse) { + const isModelContainer = portInUse.image === image.tag + if (isModelContainer) { + portInUse.image = image.name + return res.json(portInUse) + } else { + return res.status(409).send({ + message: `Port ${port} is already in use by another container`, + container: portInUse + }) + } + } + + // If no container found with matching port, return 404 + return res.status(404).send(`Container of ${imageTag} with port ${port} not found`) + } catch (error) { + next(error) + } +} + +const listRunningContainers = async (req: Request, res: Response, next: NextFunction) => { + try { + const containers = await NimContainerManager.listRunningContainers() + return res.json(containers) + } catch (error) { + next(error) + } +} + +const stopContainer = async (req: Request, res: Response, next: NextFunction) => { + try { + const containerId = req.body.containerId + const containerInfo = await NimContainerManager.stopContainer(containerId) + return res.json(containerInfo) + } catch (error) { + next(error) + } +} + +export default { + preload, + getToken, + downloadInstaller, + pullImage, + startContainer, + getImage, + getContainer, + listRunningContainers, + stopContainer +} diff --git a/packages/server/src/controllers/validation/index.ts b/packages/server/src/controllers/validation/index.ts new file mode 100644 index 000000000..a73c5c71e --- /dev/null +++ b/packages/server/src/controllers/validation/index.ts @@ -0,0 +1,24 @@ +import { Request, Response, NextFunction } from 'express' +import validationService from '../../services/validation' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { StatusCodes } from 'http-status-codes' + +const checkFlowValidation = async (req: Request, res: Response, next: NextFunction) => { + try { + const flowId = req.params?.id as string | undefined + if (!flowId) { + throw new InternalFlowiseError( + StatusCodes.PRECONDITION_FAILED, + `Error: validationController.checkFlowValidation - id not provided!` + ) + } + const apiResponse = await validationService.checkFlowValidation(flowId) + return res.json(apiResponse) + } catch (error) { + next(error) + } +} + +export default { + checkFlowValidation +} diff --git a/packages/server/src/database/entities/ChatMessage.ts b/packages/server/src/database/entities/ChatMessage.ts index 5ab161afe..e44f05a9d 100644 --- a/packages/server/src/database/entities/ChatMessage.ts +++ b/packages/server/src/database/entities/ChatMessage.ts @@ -1,6 +1,7 @@ /* eslint-disable */ -import { Entity, Column, CreateDateColumn, PrimaryGeneratedColumn, Index } from 'typeorm' +import { Entity, Column, CreateDateColumn, PrimaryGeneratedColumn, Index, JoinColumn, OneToOne } from 'typeorm' import { IChatMessage, MessageType } from '../../Interface' +import { Execution } from './Execution' @Entity() export class ChatMessage implements IChatMessage { @@ -14,6 +15,13 @@ export class ChatMessage implements IChatMessage { @Column({ type: 'uuid' }) chatflowid: string + @Column({ nullable: true, type: 'uuid' }) + executionId?: string + + @OneToOne(() => Execution) + @JoinColumn({ name: 'executionId' }) + execution: Execution + @Column({ type: 'text' }) content: string diff --git a/packages/server/src/database/entities/Execution.ts b/packages/server/src/database/entities/Execution.ts new file mode 100644 index 000000000..483a10ff1 --- /dev/null +++ b/packages/server/src/database/entities/Execution.ts @@ -0,0 +1,44 @@ +import { Entity, Column, Index, PrimaryGeneratedColumn, CreateDateColumn, UpdateDateColumn, ManyToOne, JoinColumn } from 'typeorm' +import { IExecution, ExecutionState } from '../../Interface' +import { ChatFlow } from './ChatFlow' + +@Entity() +export class Execution implements IExecution { + @PrimaryGeneratedColumn('uuid') + id: string + + @Column({ type: 'text' }) + executionData: string + + @Column() + state: ExecutionState + + @Index() + @Column({ type: 'uuid' }) + agentflowId: string + + @Index() + @Column({ type: 'uuid' }) + sessionId: string + + @Column({ nullable: true, type: 'text' }) + action?: string + + @Column({ nullable: true }) + isPublic?: boolean + + @Column({ type: 'timestamp' }) + @CreateDateColumn() + createdDate: Date + + @Column({ type: 'timestamp' }) + @UpdateDateColumn() + updatedDate: Date + + @Column() + stoppedDate: Date + + @ManyToOne(() => ChatFlow) + @JoinColumn({ name: 'agentflowId' }) + agentflow: ChatFlow +} diff --git a/packages/server/src/database/entities/index.ts b/packages/server/src/database/entities/index.ts index 4cb079b8b..c9152a1d7 100644 --- a/packages/server/src/database/entities/index.ts +++ b/packages/server/src/database/entities/index.ts @@ -11,6 +11,7 @@ import { Lead } from './Lead' import { UpsertHistory } from './UpsertHistory' import { ApiKey } from './ApiKey' import { CustomTemplate } from './CustomTemplate' +import { Execution } from './Execution' export const entities = { ChatFlow, @@ -25,5 +26,6 @@ export const entities = { Lead, UpsertHistory, ApiKey, - CustomTemplate + CustomTemplate, + Execution } diff --git a/packages/server/src/database/migrations/mariadb/1738090872625-AddExecutionEntity.ts b/packages/server/src/database/migrations/mariadb/1738090872625-AddExecutionEntity.ts new file mode 100644 index 000000000..3dd2cdcf8 --- /dev/null +++ b/packages/server/src/database/migrations/mariadb/1738090872625-AddExecutionEntity.ts @@ -0,0 +1,31 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddExecutionEntity1738090872625 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + await queryRunner.query( + `CREATE TABLE IF NOT EXISTS \`execution\` ( + \`id\` varchar(36) NOT NULL, + \`executionData\` text NOT NULL, + \`action\` text, + \`state\` varchar(255) NOT NULL, + \`agentflowId\` varchar(255) NOT NULL, + \`sessionId\` varchar(255) NOT NULL, + \`isPublic\` boolean, + \`createdDate\` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + \`updatedDate\` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), + \`stoppedDate\` datetime(6), + PRIMARY KEY (\`id\`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_520_ci;` + ) + + const columnExists = await queryRunner.hasColumn('chat_message', 'executionId') + if (!columnExists) { + await queryRunner.query(`ALTER TABLE \`chat_message\` ADD COLUMN \`executionId\` TEXT;`) + } + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`DROP TABLE IF EXISTS \`execution\``) + await queryRunner.query(`ALTER TABLE \`chat_message\` DROP COLUMN \`executionId\`;`) + } +} diff --git a/packages/server/src/database/migrations/mariadb/index.ts b/packages/server/src/database/migrations/mariadb/index.ts index 00e1237e4..11d774178 100644 --- a/packages/server/src/database/migrations/mariadb/index.ts +++ b/packages/server/src/database/migrations/mariadb/index.ts @@ -28,6 +28,7 @@ import { AddCustomTemplate1725629836652 } from './1725629836652-AddCustomTemplat import { AddArtifactsToChatMessage1726156258465 } from './1726156258465-AddArtifactsToChatMessage' import { AddFollowUpPrompts1726666318346 } from './1726666318346-AddFollowUpPrompts' import { AddTypeToAssistant1733011290987 } from './1733011290987-AddTypeToAssistant' +import { AddExecutionEntity1738090872625 } from './1738090872625-AddExecutionEntity' export const mariadbMigrations = [ Init1693840429259, @@ -59,5 +60,6 @@ export const mariadbMigrations = [ AddCustomTemplate1725629836652, AddArtifactsToChatMessage1726156258465, AddFollowUpPrompts1726666318346, - AddTypeToAssistant1733011290987 + AddTypeToAssistant1733011290987, + AddExecutionEntity1738090872625 ] diff --git a/packages/server/src/database/migrations/mysql/1738090872625-AddExecutionEntity.ts b/packages/server/src/database/migrations/mysql/1738090872625-AddExecutionEntity.ts new file mode 100644 index 000000000..3364d3e25 --- /dev/null +++ b/packages/server/src/database/migrations/mysql/1738090872625-AddExecutionEntity.ts @@ -0,0 +1,31 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddExecutionEntity1738090872625 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + await queryRunner.query( + `CREATE TABLE IF NOT EXISTS \`execution\` ( + \`id\` varchar(36) NOT NULL, + \`executionData\` text NOT NULL, + \`action\` text, + \`state\` varchar(255) NOT NULL, + \`agentflowId\` varchar(255) NOT NULL, + \`sessionId\` varchar(255) NOT NULL, + \`isPublic\` boolean, + \`createdDate\` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6), + \`updatedDate\` datetime(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), + \`stoppedDate\` datetime(6), + PRIMARY KEY (\`id\`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;` + ) + + const columnExists = await queryRunner.hasColumn('chat_message', 'executionId') + if (!columnExists) { + await queryRunner.query(`ALTER TABLE \`chat_message\` ADD COLUMN \`executionId\` TEXT;`) + } + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`DROP TABLE IF EXISTS \`execution\``) + await queryRunner.query(`ALTER TABLE \`chat_message\` DROP COLUMN \`executionId\`;`) + } +} diff --git a/packages/server/src/database/migrations/mysql/index.ts b/packages/server/src/database/migrations/mysql/index.ts index 4b0bab415..fcd054131 100644 --- a/packages/server/src/database/migrations/mysql/index.ts +++ b/packages/server/src/database/migrations/mysql/index.ts @@ -28,6 +28,7 @@ import { AddCustomTemplate1725629836652 } from './1725629836652-AddCustomTemplat import { AddArtifactsToChatMessage1726156258465 } from './1726156258465-AddArtifactsToChatMessage' import { AddFollowUpPrompts1726666302024 } from './1726666302024-AddFollowUpPrompts' import { AddTypeToAssistant1733011290987 } from './1733011290987-AddTypeToAssistant' +import { AddExecutionEntity1738090872625 } from './1738090872625-AddExecutionEntity' export const mysqlMigrations = [ Init1693840429259, @@ -59,5 +60,6 @@ export const mysqlMigrations = [ AddCustomTemplate1725629836652, AddArtifactsToChatMessage1726156258465, AddFollowUpPrompts1726666302024, - AddTypeToAssistant1733011290987 + AddTypeToAssistant1733011290987, + AddExecutionEntity1738090872625 ] diff --git a/packages/server/src/database/migrations/postgres/1738090872625-AddExecutionEntity.ts b/packages/server/src/database/migrations/postgres/1738090872625-AddExecutionEntity.ts new file mode 100644 index 000000000..a463fb254 --- /dev/null +++ b/packages/server/src/database/migrations/postgres/1738090872625-AddExecutionEntity.ts @@ -0,0 +1,31 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddExecutionEntity1738090872625 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + await queryRunner.query( + `CREATE TABLE IF NOT EXISTS execution ( + id uuid NOT NULL DEFAULT uuid_generate_v4(), + "executionData" text NOT NULL, + "action" text, + "state" varchar NOT NULL, + "agentflowId" uuid NOT NULL, + "sessionId" uuid NOT NULL, + "isPublic" boolean, + "createdDate" timestamp NOT NULL DEFAULT now(), + "updatedDate" timestamp NOT NULL DEFAULT now(), + "stoppedDate" timestamp, + CONSTRAINT "PK_936a419c3b8044598d72d95da61" PRIMARY KEY (id) + );` + ) + + const columnExists = await queryRunner.hasColumn('chat_message', 'executionId') + if (!columnExists) { + await queryRunner.query(`ALTER TABLE "chat_message" ADD COLUMN "executionId" uuid;`) + } + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`DROP TABLE execution`) + await queryRunner.query(`ALTER TABLE "chat_message" DROP COLUMN "executionId";`) + } +} diff --git a/packages/server/src/database/migrations/postgres/index.ts b/packages/server/src/database/migrations/postgres/index.ts index 0b5718db6..ac8fef734 100644 --- a/packages/server/src/database/migrations/postgres/index.ts +++ b/packages/server/src/database/migrations/postgres/index.ts @@ -28,6 +28,7 @@ import { AddCustomTemplate1725629836652 } from './1725629836652-AddCustomTemplat import { AddArtifactsToChatMessage1726156258465 } from './1726156258465-AddArtifactsToChatMessage' import { AddFollowUpPrompts1726666309552 } from './1726666309552-AddFollowUpPrompts' import { AddTypeToAssistant1733011290987 } from './1733011290987-AddTypeToAssistant' +import { AddExecutionEntity1738090872625 } from './1738090872625-AddExecutionEntity' export const postgresMigrations = [ Init1693891895163, @@ -59,5 +60,6 @@ export const postgresMigrations = [ AddCustomTemplate1725629836652, AddArtifactsToChatMessage1726156258465, AddFollowUpPrompts1726666309552, - AddTypeToAssistant1733011290987 + AddTypeToAssistant1733011290987, + AddExecutionEntity1738090872625 ] diff --git a/packages/server/src/database/migrations/sqlite/1738090872625-AddExecutionEntity.ts b/packages/server/src/database/migrations/sqlite/1738090872625-AddExecutionEntity.ts new file mode 100644 index 000000000..ffec43990 --- /dev/null +++ b/packages/server/src/database/migrations/sqlite/1738090872625-AddExecutionEntity.ts @@ -0,0 +1,15 @@ +import { MigrationInterface, QueryRunner } from 'typeorm' + +export class AddExecutionEntity1738090872625 implements MigrationInterface { + public async up(queryRunner: QueryRunner): Promise { + await queryRunner.query( + `CREATE TABLE IF NOT EXISTS "execution" ("id" varchar PRIMARY KEY NOT NULL, "executionData" text NOT NULL, "action" text, "state" varchar NOT NULL, "agentflowId" varchar NOT NULL, "sessionId" varchar NOT NULL, "isPublic" boolean, "createdDate" datetime NOT NULL DEFAULT (datetime('now')), "updatedDate" datetime NOT NULL DEFAULT (datetime('now')), "stoppedDate" datetime);` + ) + await queryRunner.query(`ALTER TABLE "chat_message" ADD COLUMN "executionId" varchar;`) + } + + public async down(queryRunner: QueryRunner): Promise { + await queryRunner.query(`DROP TABLE execution`) + await queryRunner.query(`ALTER TABLE "chat_message" DROP COLUMN "executionId";`) + } +} diff --git a/packages/server/src/database/migrations/sqlite/index.ts b/packages/server/src/database/migrations/sqlite/index.ts index b7b1d2b7d..4ebcbb921 100644 --- a/packages/server/src/database/migrations/sqlite/index.ts +++ b/packages/server/src/database/migrations/sqlite/index.ts @@ -27,6 +27,7 @@ import { AddArtifactsToChatMessage1726156258465 } from './1726156258465-AddArtif import { AddCustomTemplate1725629836652 } from './1725629836652-AddCustomTemplate' import { AddFollowUpPrompts1726666294213 } from './1726666294213-AddFollowUpPrompts' import { AddTypeToAssistant1733011290987 } from './1733011290987-AddTypeToAssistant' +import { AddExecutionEntity1738090872625 } from './1738090872625-AddExecutionEntity' export const sqliteMigrations = [ Init1693835579790, @@ -57,5 +58,6 @@ export const sqliteMigrations = [ AddArtifactsToChatMessage1726156258465, AddCustomTemplate1725629836652, AddFollowUpPrompts1726666294213, - AddTypeToAssistant1733011290987 + AddTypeToAssistant1733011290987, + AddExecutionEntity1738090872625 ] diff --git a/packages/server/src/index.ts b/packages/server/src/index.ts index 40007677d..fb7618b5f 100644 --- a/packages/server/src/index.ts +++ b/packages/server/src/index.ts @@ -251,7 +251,7 @@ export class App { }) }) - if (process.env.MODE === MODE.QUEUE) { + if (process.env.MODE === MODE.QUEUE && process.env.ENABLE_BULLMQ_DASHBOARD === 'true') { this.app.use('/admin/queues', this.queueManager.getBullBoardRouter()) } diff --git a/packages/server/src/metrics/OpenTelemetry.ts b/packages/server/src/metrics/OpenTelemetry.ts index 7686225db..a9a3d9c4f 100644 --- a/packages/server/src/metrics/OpenTelemetry.ts +++ b/packages/server/src/metrics/OpenTelemetry.ts @@ -6,6 +6,9 @@ import { diag, DiagLogLevel, DiagConsoleLogger, Attributes, Counter } from '@ope import { getVersion } from 'flowise-components' import express from 'express' +// Create a static map to track created metrics and prevent duplicates +const createdMetrics = new Map() + export class OpenTelemetry implements IMetricsProvider { private app: express.Application private resource: Resource @@ -30,6 +33,9 @@ export class OpenTelemetry implements IMetricsProvider { if (process.env.METRICS_OPEN_TELEMETRY_DEBUG === 'true') { diag.setLogger(new DiagConsoleLogger(), DiagLogLevel.DEBUG) } + + // Clear metrics tracking on new instance + createdMetrics.clear() } public getName(): string { @@ -37,121 +43,215 @@ export class OpenTelemetry implements IMetricsProvider { } async initializeCounters(): Promise { - // Define the resource with the service name for trace grouping - const flowiseVersion = await getVersion() + try { + // Define the resource with the service name for trace grouping + const flowiseVersion = await getVersion() - this.resource = new Resource({ - [ATTR_SERVICE_NAME]: process.env.METRICS_SERVICE_NAME || 'FlowiseAI', - [ATTR_SERVICE_VERSION]: flowiseVersion.version // Version as a label - }) + this.resource = new Resource({ + [ATTR_SERVICE_NAME]: process.env.METRICS_SERVICE_NAME || 'FlowiseAI', + [ATTR_SERVICE_VERSION]: flowiseVersion.version // Version as a label + }) - const metricProtocol = process.env.METRICS_OPEN_TELEMETRY_PROTOCOL || 'http' // Default to 'http' - // Conditionally import the correct OTLP exporters based on protocol - let OTLPMetricExporter - if (metricProtocol === 'http') { - OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-http').OTLPMetricExporter - } else if (metricProtocol === 'grpc') { - OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-grpc').OTLPMetricExporter - } else if (metricProtocol === 'proto') { - OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-proto').OTLPMetricExporter - } else { - console.error('Invalid METRICS_OPEN_TELEMETRY_PROTOCOL specified. Please set it to "http", "grpc", or "proto".') - process.exit(1) // Exit if invalid protocol type is specified + const metricProtocol = process.env.METRICS_OPEN_TELEMETRY_PROTOCOL || 'http' // Default to 'http' + // Conditionally import the correct OTLP exporters based on protocol + let OTLPMetricExporter + if (metricProtocol === 'http') { + OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-http').OTLPMetricExporter + } else if (metricProtocol === 'grpc') { + OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-grpc').OTLPMetricExporter + } else if (metricProtocol === 'proto') { + OTLPMetricExporter = require('@opentelemetry/exporter-metrics-otlp-proto').OTLPMetricExporter + } else { + console.error('Invalid METRICS_OPEN_TELEMETRY_PROTOCOL specified. Please set it to "http", "grpc", or "proto".') + process.exit(1) // Exit if invalid protocol type is specified + } + + // Handle any existing metric exporter + if (this.otlpMetricExporter) { + try { + await this.otlpMetricExporter.shutdown() + } catch (error) { + // Ignore shutdown errors + } + } + + this.otlpMetricExporter = new OTLPMetricExporter({ + url: process.env.METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT // OTLP endpoint for metrics + }) + + // Clean up any existing metric reader + if (this.metricReader) { + try { + await this.metricReader.shutdown() + } catch (error) { + // Ignore shutdown errors + } + } + + this.metricReader = new PeriodicExportingMetricReader({ + exporter: this.otlpMetricExporter, + exportIntervalMillis: 5000 // Export metrics every 5 seconds + }) + + // Clean up any existing meter provider + if (this.meterProvider) { + try { + await this.meterProvider.shutdown() + } catch (error) { + // Ignore shutdown errors + } + } + + this.meterProvider = new MeterProvider({ resource: this.resource, readers: [this.metricReader] }) + + const meter = this.meterProvider.getMeter('flowise-metrics') + // look at the FLOWISE_COUNTER enum in Interface.Metrics.ts and get all values + // for each counter in the enum, create a new promClient.Counter and add it to the registry + const enumEntries = Object.entries(FLOWISE_METRIC_COUNTERS) + enumEntries.forEach(([name, value]) => { + try { + // Check if we've already created this metric + if (!createdMetrics.has(value)) { + // derive proper counter name from the enum value (chatflow_created = Chatflow Created) + const properCounterName: string = name.replace(/_/g, ' ').replace(/\b\w/g, (l) => l.toUpperCase()) + this.counters.set( + value, + meter.createCounter(value, { + description: properCounterName + }) + ) + createdMetrics.set(value, true) + } + } catch (error) { + // Log error but continue with other metrics + console.error(`Error creating metric ${value}:`, error) + } + }) + + try { + // Add version gauge if not already created + if (!createdMetrics.has('flowise_version')) { + const versionGuage = meter.createGauge('flowise_version', { + description: 'Flowise version' + }) + // remove the last dot from the version string, e.g. 2.1.3 -> 2.13 (gauge needs a number - float) + const formattedVersion = flowiseVersion.version.replace(/\.(\d+)$/, '$1') + versionGuage.record(parseFloat(formattedVersion)) + createdMetrics.set('flowise_version', true) + } + } catch (error) { + console.error('Error creating version gauge:', error) + } + + try { + // HTTP requests counter + if (!createdMetrics.has('http_requests_total')) { + this.httpRequestCounter = meter.createCounter('http_requests_total', { + description: 'Counts the number of HTTP requests received' + }) + createdMetrics.set('http_requests_total', true) + } + } catch (error) { + console.error('Error creating HTTP request counter:', error) + } + + try { + // HTTP request duration histogram + if (!createdMetrics.has('http_request_duration_ms')) { + this.httpRequestDuration = meter.createHistogram('http_request_duration_ms', { + description: 'Records the duration of HTTP requests in ms' + }) + createdMetrics.set('http_request_duration_ms', true) + } + } catch (error) { + console.error('Error creating HTTP request duration histogram:', error) + } + + await this.setupMetricsEndpoint() + } catch (error) { + console.error('Error initializing OpenTelemetry metrics:', error) + // Don't throw - allow app to continue without metrics } - - this.otlpMetricExporter = new OTLPMetricExporter({ - url: process.env.METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT // OTLP endpoint for metrics - }) - - this.metricReader = new PeriodicExportingMetricReader({ - exporter: this.otlpMetricExporter, - exportIntervalMillis: 5000 // Export metrics every 5 seconds - }) - this.meterProvider = new MeterProvider({ resource: this.resource, readers: [this.metricReader] }) - - const meter = this.meterProvider.getMeter('flowise-metrics') - // look at the FLOWISE_COUNTER enum in Interface.Metrics.ts and get all values - // for each counter in the enum, create a new promClient.Counter and add it to the registry - const enumEntries = Object.entries(FLOWISE_METRIC_COUNTERS) - enumEntries.forEach(([name, value]) => { - // derive proper counter name from the enum value (chatflow_created = Chatflow Created) - const properCounterName: string = name.replace(/_/g, ' ').replace(/\b\w/g, (l) => l.toUpperCase()) - this.counters.set( - value, - meter.createCounter(value, { - description: properCounterName - }) - ) - }) - - // in addition to the enum counters, add a few more custom counters - - const versionGuage = meter.createGauge('flowise_version', { - description: 'Flowise version' - }) - // remove the last dot from the version string, e.g. 2.1.3 -> 2.13 (guage needs a number - float) - const formattedVersion = flowiseVersion.version.replace(/\.(\d+)$/, '$1') - versionGuage.record(parseFloat(formattedVersion)) - - // Counter for HTTP requests with method, path, and status as labels - this.httpRequestCounter = meter.createCounter('http_requests_total', { - description: 'Counts the number of HTTP requests received' - }) - - // Histogram to measure HTTP request duration in milliseconds - this.httpRequestDuration = meter.createHistogram('http_request_duration_ms', { - description: 'Records the duration of HTTP requests in ms' - }) } // Function to record HTTP request duration private recordHttpRequestDuration(durationMs: number, method: string, path: string, status: number) { - this.httpRequestDuration.record(durationMs, { - method, - path, - status: status.toString() - }) + try { + if (this.httpRequestDuration) { + this.httpRequestDuration.record(durationMs, { + method, + path, + status: status.toString() + }) + } + } catch (error) { + // Log error but don't crash the application + console.error('Error recording HTTP request duration:', error) + } } // Function to record HTTP requests with specific labels private recordHttpRequest(method: string, path: string, status: number) { - this.httpRequestCounter.add(1, { - method, - path, - status: status.toString() - }) + try { + if (this.httpRequestCounter) { + this.httpRequestCounter.add(1, { + method, + path, + status: status.toString() + }) + } + } catch (error) { + // Log error but don't crash the application + console.error('Error recording HTTP request:', error) + } } async setupMetricsEndpoint(): Promise { - // Graceful shutdown for telemetry data flushing - process.on('SIGTERM', async () => { - await this.metricReader.shutdown() - await this.meterProvider.shutdown() - }) - - // Runs before each requests - this.app.use((req, res, next) => { - res.locals.startEpoch = Date.now() - next() - }) - - // Runs after each requests - this.app.use((req, res, next) => { - res.on('finish', async () => { - if (res.locals.startEpoch) { - const responseTimeInMs = Date.now() - res.locals.startEpoch - this.recordHttpRequest(req.method, req.path, res.statusCode) - this.recordHttpRequestDuration(responseTimeInMs, req.method, req.path, res.statusCode) + try { + // Graceful shutdown for telemetry data flushing + process.on('SIGTERM', async () => { + try { + if (this.metricReader) await this.metricReader.shutdown() + if (this.meterProvider) await this.meterProvider.shutdown() + } catch (error) { + console.error('Error during metrics shutdown:', error) } }) - next() - }) + + // Runs before each requests + this.app.use((req, res, next) => { + res.locals.startEpoch = Date.now() + next() + }) + + // Runs after each requests + this.app.use((req, res, next) => { + res.on('finish', async () => { + try { + if (res.locals.startEpoch) { + const responseTimeInMs = Date.now() - res.locals.startEpoch + this.recordHttpRequest(req.method, req.path, res.statusCode) + this.recordHttpRequestDuration(responseTimeInMs, req.method, req.path, res.statusCode) + } + } catch (error) { + console.error('Error in metrics middleware:', error) + } + }) + next() + }) + } catch (error) { + console.error('Error setting up metrics endpoint:', error) + } } async incrementCounter(counter: string, payload: any): Promise { - // Increment OpenTelemetry counter with the payload - if (this.counters.has(counter)) { - ;(this.counters.get(counter) as Counter).add(1, payload) + try { + // Increment OpenTelemetry counter with the payload + if (this.counters.has(counter)) { + ;(this.counters.get(counter) as Counter).add(1, payload) + } + } catch (error) { + console.error(`Error incrementing counter ${counter}:`, error) } } } diff --git a/packages/server/src/metrics/Prometheus.ts b/packages/server/src/metrics/Prometheus.ts index 15eaafeac..56b4da3ff 100644 --- a/packages/server/src/metrics/Prometheus.ts +++ b/packages/server/src/metrics/Prometheus.ts @@ -12,6 +12,9 @@ export class Prometheus implements IMetricsProvider { constructor(app: express.Application) { this.app = app + // Clear any existing default registry metrics to avoid conflicts + promClient.register.clear() + // Create a separate registry for our metrics this.register = new promClient.Registry() } @@ -27,48 +30,87 @@ export class Prometheus implements IMetricsProvider { // look at the FLOWISE_COUNTER enum in Interface.Metrics.ts and get all values // for each counter in the enum, create a new promClient.Counter and add it to the registry - this.counters = new Map>() + this.counters = new Map | promClient.Gauge | promClient.Histogram>() const enumEntries = Object.entries(FLOWISE_METRIC_COUNTERS) enumEntries.forEach(([name, value]) => { // derive proper counter name from the enum value (chatflow_created = Chatflow Created) const properCounterName: string = name.replace(/_/g, ' ').replace(/\b\w/g, (l) => l.toUpperCase()) - this.counters.set( - value, - new promClient.Counter({ - name: value, - help: `Total number of ${properCounterName}`, - labelNames: ['status'] - }) - ) + try { + this.counters.set( + value, + new promClient.Counter({ + name: value, + help: `Total number of ${properCounterName}`, + labelNames: ['status'], + registers: [this.register] // Explicitly set the registry + }) + ) + } catch (error) { + // If metric already exists, get it from the registry instead + const existingMetrics = this.register.getSingleMetric(value) + if (existingMetrics) { + this.counters.set(value, existingMetrics as promClient.Counter) + } + } }) // in addition to the enum counters, add a few more custom counters // version, http_request_duration_ms, http_requests_total - const versionGaugeCounter = new promClient.Gauge({ - name: 'flowise_version_info', - help: 'Flowise version info.', - labelNames: ['version'] - }) + try { + const versionGaugeCounter = new promClient.Gauge({ + name: 'flowise_version_info', + help: 'Flowise version info.', + labelNames: ['version'], + registers: [this.register] // Explicitly set the registry + }) - const { version } = await getVersion() - versionGaugeCounter.set({ version: 'v' + version }, 1) - this.counters.set('flowise_version', versionGaugeCounter) + const { version } = await getVersion() + versionGaugeCounter.set({ version: 'v' + version }, 1) + this.counters.set('flowise_version', versionGaugeCounter) + } catch (error) { + // If metric already exists, get it from the registry + const existingMetric = this.register.getSingleMetric('flowise_version') + if (existingMetric) { + this.counters.set('flowise_version', existingMetric as promClient.Gauge) + } + } - this.httpRequestDurationMicroseconds = new promClient.Histogram({ - name: 'http_request_duration_ms', - help: 'Duration of HTTP requests in ms', - labelNames: ['method', 'route', 'code'], - buckets: [1, 5, 15, 50, 100, 200, 300, 400, 500] // buckets for response time from 0.1ms to 500ms - }) - this.counters.set('http_request_duration_ms', this.httpRequestDurationMicroseconds) + try { + this.httpRequestDurationMicroseconds = new promClient.Histogram({ + name: 'http_request_duration_ms', + help: 'Duration of HTTP requests in ms', + labelNames: ['method', 'route', 'code'], + buckets: [1, 5, 15, 50, 100, 200, 300, 400, 500], // buckets for response time from 0.1ms to 500ms + registers: [this.register] // Explicitly set the registry + }) + this.counters.set('http_request_duration_ms', this.httpRequestDurationMicroseconds) + } catch (error) { + // If metric already exists, get it from the registry + const existingMetric = this.register.getSingleMetric('http_request_duration_ms') + if (existingMetric) { + this.httpRequestDurationMicroseconds = existingMetric as Histogram + this.counters.set('http_request_duration_ms', this.httpRequestDurationMicroseconds) + } + } - this.requestCounter = new Counter({ - name: 'http_requests_total', - help: 'Total number of HTTP requests', - labelNames: ['method', 'path', 'status'] - }) - this.counters.set('http_requests_total', this.requestCounter) + try { + this.requestCounter = new Counter({ + name: 'http_requests_total', + help: 'Total number of HTTP requests', + labelNames: ['method', 'path', 'status'], + registers: [this.register] // Explicitly set the registry + }) + this.counters.set('http_requests_total', this.requestCounter) + } catch (error) { + // If metric already exists, get it from the registry + const existingMetric = this.register.getSingleMetric('http_requests_total') + if (existingMetric) { + this.requestCounter = existingMetric as Counter + this.counters.set('http_requests_total', this.requestCounter) + } + } + // Only register metrics that aren't already in the registry this.registerMetrics() await this.setupMetricsEndpoint() } @@ -111,12 +153,28 @@ export class Prometheus implements IMetricsProvider { private registerMetrics() { if (process.env.METRICS_INCLUDE_NODE_METRICS !== 'false') { + // Clear any existing default metrics to avoid conflicts + promClient.register.clear() // enable default metrics like CPU usage, memory usage, etc. - promClient.collectDefaultMetrics({ register: this.register }) + // and ensure they're only registered with our custom registry + promClient.collectDefaultMetrics({ + register: this.register, + prefix: 'flowise_' // Add a prefix to avoid conflicts + }) } - // Add our custom metrics to the registry + + // Add only the custom metrics that haven't been registered yet for (const counter of this.counters.values()) { - this.register.registerMetric(counter) + try { + // Type assertion to access the name property + const metricName = (counter as any).name + if (!this.register.getSingleMetric(metricName)) { + this.register.registerMetric(counter) + } + } catch (error) { + // If we can't register the metric, it probably already exists + // Just continue with the next one + } } } } diff --git a/packages/server/src/middlewares/errors/index.ts b/packages/server/src/middlewares/errors/index.ts index 75cd2c21b..88b3dd80c 100644 --- a/packages/server/src/middlewares/errors/index.ts +++ b/packages/server/src/middlewares/errors/index.ts @@ -5,6 +5,8 @@ import { InternalFlowiseError } from '../../errors/internalFlowiseError' // we need eslint because we have to pass next arg for the error middleware // eslint-disable-next-line async function errorHandlerMiddleware(err: InternalFlowiseError, req: Request, res: Response, next: NextFunction) { + if (err.message.includes('401 Incorrect API key provided')) + err.message = '401 Invalid model key or Incorrect local model configuration.' let displayedError = { statusCode: err.statusCode || StatusCodes.INTERNAL_SERVER_ERROR, success: false, @@ -12,7 +14,7 @@ async function errorHandlerMiddleware(err: InternalFlowiseError, req: Request, r // Provide error stack trace only in development stack: process.env.NODE_ENV === 'development' ? err.stack : {} } - if (!req.body.streaming || req.body.streaming === 'false') { + if (!req.body || !req.body.streaming || req.body.streaming === 'false') { res.setHeader('Content-Type', 'application/json') res.status(displayedError.statusCode).json(displayedError) } diff --git a/packages/server/src/queue/BaseQueue.ts b/packages/server/src/queue/BaseQueue.ts index 0c3003ea6..d3bf18d29 100644 --- a/packages/server/src/queue/BaseQueue.ts +++ b/packages/server/src/queue/BaseQueue.ts @@ -1,4 +1,4 @@ -import { Queue, Worker, Job, QueueEvents, RedisOptions } from 'bullmq' +import { Queue, Worker, Job, QueueEvents, RedisOptions, KeepJobs } from 'bullmq' import { v4 as uuidv4 } from 'uuid' import logger from '../utils/logger' @@ -6,6 +6,8 @@ const QUEUE_REDIS_EVENT_STREAM_MAX_LEN = process.env.QUEUE_REDIS_EVENT_STREAM_MA ? parseInt(process.env.QUEUE_REDIS_EVENT_STREAM_MAX_LEN) : 10000 const WORKER_CONCURRENCY = process.env.WORKER_CONCURRENCY ? parseInt(process.env.WORKER_CONCURRENCY) : 100000 +const REMOVE_ON_AGE = process.env.REMOVE_ON_AGE ? parseInt(process.env.REMOVE_ON_AGE) : -1 +const REMOVE_ON_COUNT = process.env.REMOVE_ON_COUNT ? parseInt(process.env.REMOVE_ON_COUNT) : -1 export abstract class BaseQueue { protected queue: Queue @@ -34,7 +36,24 @@ export abstract class BaseQueue { public async addJob(jobData: any): Promise { const jobId = jobData.id || uuidv4() - return await this.queue.add(jobId, jobData, { removeOnFail: true }) + + let removeOnFail: number | boolean | KeepJobs | undefined = true + let removeOnComplete: number | boolean | KeepJobs | undefined = undefined + + // Only override removal options if age or count is specified + if (REMOVE_ON_AGE !== -1 || REMOVE_ON_COUNT !== -1) { + const keepJobObj: KeepJobs = {} + if (REMOVE_ON_AGE !== -1) { + keepJobObj.age = REMOVE_ON_AGE + } + if (REMOVE_ON_COUNT !== -1) { + keepJobObj.count = REMOVE_ON_COUNT + } + removeOnFail = keepJobObj + removeOnComplete = keepJobObj + } + + return await this.queue.add(jobId, jobData, { removeOnFail, removeOnComplete }) } public createWorker(concurrency: number = WORKER_CONCURRENCY): Worker { diff --git a/packages/server/src/queue/PredictionQueue.ts b/packages/server/src/queue/PredictionQueue.ts index 3a4f74145..2b5575712 100644 --- a/packages/server/src/queue/PredictionQueue.ts +++ b/packages/server/src/queue/PredictionQueue.ts @@ -7,6 +7,10 @@ import { RedisEventPublisher } from './RedisEventPublisher' import { AbortControllerPool } from '../AbortControllerPool' import { BaseQueue } from './BaseQueue' import { RedisOptions } from 'bullmq' +import logger from '../utils/logger' +import { generateAgentflowv2 as generateAgentflowv2_json } from 'flowise-components' +import { databaseEntities } from '../utils' +import { executeCustomNodeFunction } from '../utils/executeCustomNodeFunction' interface PredictionQueueOptions { appDataSource: DataSource @@ -16,6 +20,15 @@ interface PredictionQueueOptions { abortControllerPool: AbortControllerPool } +interface IGenerateAgentflowv2Params extends IExecuteFlowParams { + prompt: string + componentNodes: IComponentNodes + toolNodes: IComponentNodes + selectedChatModel: Record + question: string + isAgentFlowGenerator: boolean +} + export class PredictionQueue extends BaseQueue { private componentNodes: IComponentNodes private telemetry: Telemetry @@ -45,13 +58,34 @@ export class PredictionQueue extends BaseQueue { return this.queue } - async processJob(data: IExecuteFlowParams) { + async processJob(data: IExecuteFlowParams | IGenerateAgentflowv2Params) { if (this.appDataSource) data.appDataSource = this.appDataSource if (this.telemetry) data.telemetry = this.telemetry if (this.cachePool) data.cachePool = this.cachePool if (this.componentNodes) data.componentNodes = this.componentNodes if (this.redisPublisher) data.sseStreamer = this.redisPublisher + if (Object.prototype.hasOwnProperty.call(data, 'isAgentFlowGenerator')) { + logger.info(`Generating Agentflow...`) + const { prompt, componentNodes, toolNodes, selectedChatModel, question } = data as IGenerateAgentflowv2Params + const options: Record = { + appDataSource: this.appDataSource, + databaseEntities: databaseEntities, + logger: logger + } + return await generateAgentflowv2_json({ prompt, componentNodes, toolNodes, selectedChatModel }, question, options) + } + + if (Object.prototype.hasOwnProperty.call(data, 'isExecuteCustomFunction')) { + const executeCustomFunctionData = data as any + logger.info(`Executing Custom Function...`) + return await executeCustomNodeFunction({ + appDataSource: this.appDataSource, + componentNodes: this.componentNodes, + data: executeCustomFunctionData.data + }) + } + if (this.abortControllerPool) { const abortControllerId = `${data.chatflow.id}_${data.chatId}` const signal = new AbortController() diff --git a/packages/server/src/queue/QueueManager.ts b/packages/server/src/queue/QueueManager.ts index 166a4125d..abd657ac6 100644 --- a/packages/server/src/queue/QueueManager.ts +++ b/packages/server/src/queue/QueueManager.ts @@ -41,7 +41,12 @@ export class QueueManager { port: parseInt(process.env.REDIS_PORT || '6379'), username: process.env.REDIS_USERNAME || undefined, password: process.env.REDIS_PASSWORD || undefined, - tls: tlsOpts + tls: tlsOpts, + enableReadyCheck: true, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined } } diff --git a/packages/server/src/queue/RedisEventPublisher.ts b/packages/server/src/queue/RedisEventPublisher.ts index 946aae93a..c0fce60c5 100644 --- a/packages/server/src/queue/RedisEventPublisher.ts +++ b/packages/server/src/queue/RedisEventPublisher.ts @@ -7,7 +7,13 @@ export class RedisEventPublisher implements IServerSideEventStreamer { constructor() { if (process.env.REDIS_URL) { this.redisPublisher = createClient({ - url: process.env.REDIS_URL + url: process.env.REDIS_URL, + socket: { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + } }) } else { this.redisPublisher = createClient({ @@ -19,7 +25,11 @@ export class RedisEventPublisher implements IServerSideEventStreamer { tls: process.env.REDIS_TLS === 'true', cert: process.env.REDIS_CERT ? Buffer.from(process.env.REDIS_CERT, 'base64') : undefined, key: process.env.REDIS_KEY ? Buffer.from(process.env.REDIS_KEY, 'base64') : undefined, - ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined + ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined } }) } @@ -119,6 +129,21 @@ export class RedisEventPublisher implements IServerSideEventStreamer { } } + streamCalledToolsEvent(chatId: string, data: any) { + try { + this.redisPublisher.publish( + chatId, + JSON.stringify({ + chatId, + eventType: 'calledTools', + data + }) + ) + } catch (error) { + console.error('Error streaming calledTools event:', error) + } + } + streamFileAnnotationsEvent(chatId: string, data: any) { try { this.redisPublisher.publish( @@ -164,6 +189,36 @@ export class RedisEventPublisher implements IServerSideEventStreamer { } } + streamAgentFlowEvent(chatId: string, data: any): void { + try { + this.redisPublisher.publish( + chatId, + JSON.stringify({ + chatId, + eventType: 'agentFlowEvent', + data + }) + ) + } catch (error) { + console.error('Error streaming agentFlow event:', error) + } + } + + streamAgentFlowExecutedDataEvent(chatId: string, data: any): void { + try { + this.redisPublisher.publish( + chatId, + JSON.stringify({ + chatId, + eventType: 'agentFlowExecutedData', + data + }) + ) + } catch (error) { + console.error('Error streaming agentFlowExecutedData event:', error) + } + } + streamNextAgentEvent(chatId: string, data: any): void { try { this.redisPublisher.publish( @@ -179,6 +234,21 @@ export class RedisEventPublisher implements IServerSideEventStreamer { } } + streamNextAgentFlowEvent(chatId: string, data: any): void { + try { + this.redisPublisher.publish( + chatId, + JSON.stringify({ + chatId, + eventType: 'nextAgentFlow', + data + }) + ) + } catch (error) { + console.error('Error streaming nextAgentFlow event:', error) + } + } + streamActionEvent(chatId: string, data: any): void { try { this.redisPublisher.publish( @@ -254,6 +324,21 @@ export class RedisEventPublisher implements IServerSideEventStreamer { } } + streamUsageMetadataEvent(chatId: string, data: any): void { + try { + this.redisPublisher.publish( + chatId, + JSON.stringify({ + chatId, + eventType: 'usageMetadata', + data + }) + ) + } catch (error) { + console.error('Error streaming usage metadata event:', error) + } + } + async disconnect() { if (this.redisPublisher) { await this.redisPublisher.quit() diff --git a/packages/server/src/queue/RedisEventSubscriber.ts b/packages/server/src/queue/RedisEventSubscriber.ts index 81b18e3cc..5b0331a72 100644 --- a/packages/server/src/queue/RedisEventSubscriber.ts +++ b/packages/server/src/queue/RedisEventSubscriber.ts @@ -9,7 +9,13 @@ export class RedisEventSubscriber { constructor(sseStreamer: SSEStreamer) { if (process.env.REDIS_URL) { this.redisSubscriber = createClient({ - url: process.env.REDIS_URL + url: process.env.REDIS_URL, + socket: { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + } }) } else { this.redisSubscriber = createClient({ @@ -21,7 +27,11 @@ export class RedisEventSubscriber { tls: process.env.REDIS_TLS === 'true', cert: process.env.REDIS_CERT ? Buffer.from(process.env.REDIS_CERT, 'base64') : undefined, key: process.env.REDIS_KEY ? Buffer.from(process.env.REDIS_KEY, 'base64') : undefined, - ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined + ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined } }) } @@ -85,6 +95,15 @@ export class RedisEventSubscriber { case 'nextAgent': this.sseStreamer.streamNextAgentEvent(chatId, data) break + case 'agentFlowEvent': + this.sseStreamer.streamAgentFlowEvent(chatId, data) + break + case 'agentFlowExecutedData': + this.sseStreamer.streamAgentFlowExecutedDataEvent(chatId, data) + break + case 'nextAgentFlow': + this.sseStreamer.streamNextAgentFlowEvent(chatId, data) + break case 'action': this.sseStreamer.streamActionEvent(chatId, data) break diff --git a/packages/server/src/routes/agentflowv2-generator/index.ts b/packages/server/src/routes/agentflowv2-generator/index.ts new file mode 100644 index 000000000..6d2895509 --- /dev/null +++ b/packages/server/src/routes/agentflowv2-generator/index.ts @@ -0,0 +1,7 @@ +import express from 'express' +import agentflowv2GeneratorController from '../../controllers/agentflowv2-generator' +const router = express.Router() + +router.post('/generate', agentflowv2GeneratorController.generateAgentflowv2) + +export default router diff --git a/packages/server/src/routes/executions/index.ts b/packages/server/src/routes/executions/index.ts new file mode 100644 index 000000000..589bbf13a --- /dev/null +++ b/packages/server/src/routes/executions/index.ts @@ -0,0 +1,16 @@ +import express from 'express' +import executionController from '../../controllers/executions' +const router = express.Router() + +// READ +router.get('/', executionController.getAllExecutions) +router.get(['/', '/:id'], executionController.getExecutionById) + +// PUT +router.put(['/', '/:id'], executionController.updateExecution) + +// DELETE - single execution or multiple executions +router.delete('/:id', executionController.deleteExecutions) +router.delete('/', executionController.deleteExecutions) + +export default router diff --git a/packages/server/src/routes/index.ts b/packages/server/src/routes/index.ts index 89fb7350a..42a5d2312 100644 --- a/packages/server/src/routes/index.ts +++ b/packages/server/src/routes/index.ts @@ -35,6 +35,7 @@ import predictionRouter from './predictions' import promptListsRouter from './prompts-lists' import publicChatbotRouter from './public-chatbots' import publicChatflowsRouter from './public-chatflows' +import publicExecutionsRouter from './public-executions' import statsRouter from './stats' import toolsRouter from './tools' import upsertHistoryRouter from './upsert-history' @@ -42,6 +43,10 @@ import variablesRouter from './variables' import vectorRouter from './vectors' import verifyRouter from './verify' import versionRouter from './versions' +import nvidiaNimRouter from './nvidia-nim' +import executionsRouter from './executions' +import validationRouter from './validation' +import agentflowv2GeneratorRouter from './agentflowv2-generator' const router = express.Router() @@ -81,6 +86,7 @@ router.use('/prediction', predictionRouter) router.use('/prompts-list', promptListsRouter) router.use('/public-chatbotConfig', publicChatbotRouter) router.use('/public-chatflows', publicChatflowsRouter) +router.use('/public-executions', publicExecutionsRouter) router.use('/stats', statsRouter) router.use('/tools', toolsRouter) router.use('/variables', variablesRouter) @@ -88,5 +94,9 @@ router.use('/vector', vectorRouter) router.use('/verify', verifyRouter) router.use('/version', versionRouter) router.use('/upsert-history', upsertHistoryRouter) +router.use('/nvidia-nim', nvidiaNimRouter) +router.use('/executions', executionsRouter) +router.use('/validation', validationRouter) +router.use('/agentflowv2-generator', agentflowv2GeneratorRouter) export default router diff --git a/packages/server/src/routes/nvidia-nim/index.ts b/packages/server/src/routes/nvidia-nim/index.ts new file mode 100644 index 000000000..473b57156 --- /dev/null +++ b/packages/server/src/routes/nvidia-nim/index.ts @@ -0,0 +1,16 @@ +import express from 'express' +import nimController from '../../controllers/nvidia-nim' +const router = express.Router() + +// READ +router.get('/preload', nimController.preload) +router.get('/get-token', nimController.getToken) +router.get('/download-installer', nimController.downloadInstaller) +router.get('/list-running-containers', nimController.listRunningContainers) +router.post('/pull-image', nimController.pullImage) +router.post('/start-container', nimController.startContainer) +router.post('/stop-container', nimController.stopContainer) +router.post('/get-image', nimController.getImage) +router.post('/get-container', nimController.getContainer) + +export default router diff --git a/packages/server/src/routes/public-executions/index.ts b/packages/server/src/routes/public-executions/index.ts new file mode 100644 index 000000000..eaabe5a1c --- /dev/null +++ b/packages/server/src/routes/public-executions/index.ts @@ -0,0 +1,14 @@ +import express from 'express' +import executionController from '../../controllers/executions' +const router = express.Router() + +// CREATE + +// READ +router.get(['/', '/:id'], executionController.getPublicExecutionById) + +// UPDATE + +// DELETE + +export default router diff --git a/packages/server/src/routes/validation/index.ts b/packages/server/src/routes/validation/index.ts new file mode 100644 index 000000000..14cefc815 --- /dev/null +++ b/packages/server/src/routes/validation/index.ts @@ -0,0 +1,8 @@ +import express from 'express' +import validationController from '../../controllers/validation' +const router = express.Router() + +// READ +router.get('/:id', validationController.checkFlowValidation) + +export default router diff --git a/packages/server/src/services/agentflowv2-generator/index.ts b/packages/server/src/services/agentflowv2-generator/index.ts new file mode 100644 index 000000000..4d987b90e --- /dev/null +++ b/packages/server/src/services/agentflowv2-generator/index.ts @@ -0,0 +1,248 @@ +import { StatusCodes } from 'http-status-codes' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { getErrorMessage } from '../../errors/utils' +import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import path from 'path' +import * as fs from 'fs' +import { generateAgentflowv2 as generateAgentflowv2_json } from 'flowise-components' +import { z } from 'zod' +import { sysPrompt } from './prompt' +import { databaseEntities } from '../../utils' +import logger from '../../utils/logger' +import { MODE } from '../../Interface' + +// Define the Zod schema for Agentflowv2 data structure +const NodeType = z.object({ + id: z.string(), + type: z.string(), + position: z.object({ + x: z.number(), + y: z.number() + }), + width: z.number(), + height: z.number(), + selected: z.boolean().optional(), + positionAbsolute: z + .object({ + x: z.number(), + y: z.number() + }) + .optional(), + dragging: z.boolean().optional(), + data: z.any().optional(), + parentNode: z.string().optional() +}) + +const EdgeType = z.object({ + source: z.string(), + sourceHandle: z.string(), + target: z.string(), + targetHandle: z.string(), + data: z + .object({ + sourceColor: z.string().optional(), + targetColor: z.string().optional(), + edgeLabel: z.string().optional(), + isHumanInput: z.boolean().optional() + }) + .optional(), + type: z.string().optional(), + id: z.string() +}) + +const AgentFlowV2Type = z + .object({ + description: z.string().optional(), + usecases: z.array(z.string()).optional(), + nodes: z.array(NodeType), + edges: z.array(EdgeType) + }) + .describe('Generate Agentflowv2 nodes and edges') + +// Type for the templates array +type AgentFlowV2Template = z.infer + +const getAllAgentFlow2Nodes = async () => { + const appServer = getRunningExpressApp() + const nodes = appServer.nodesPool.componentNodes + const agentFlow2Nodes = [] + for (const node in nodes) { + if (nodes[node].category === 'Agent Flows') { + agentFlow2Nodes.push({ + name: nodes[node].name, + label: nodes[node].label, + description: nodes[node].description + }) + } + } + return JSON.stringify(agentFlow2Nodes, null, 2) +} + +const getAllToolNodes = async () => { + const appServer = getRunningExpressApp() + const nodes = appServer.nodesPool.componentNodes + const toolNodes = [] + const disabled_nodes = process.env.DISABLED_NODES ? process.env.DISABLED_NODES.split(',') : [] + const removeTools = ['chainTool', 'retrieverTool', 'webBrowser', ...disabled_nodes] + + for (const node in nodes) { + if (nodes[node].category.includes('Tools')) { + if (removeTools.includes(nodes[node].name)) { + continue + } + toolNodes.push({ + name: nodes[node].name, + description: nodes[node].description + }) + } + } + return JSON.stringify(toolNodes, null, 2) +} + +const getAllAgentflowv2Marketplaces = async () => { + const templates: AgentFlowV2Template[] = [] + let marketplaceDir = path.join(__dirname, '..', '..', '..', 'marketplaces', 'agentflowsv2') + let jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') + jsonsInDir.forEach((file) => { + try { + const filePath = path.join(__dirname, '..', '..', '..', 'marketplaces', 'agentflowsv2', file) + const fileData = fs.readFileSync(filePath) + const fileDataObj = JSON.parse(fileData.toString()) + // get rid of the node.data, remain all other properties + const filteredNodes = fileDataObj.nodes.map((node: any) => { + return { + ...node, + data: undefined + } + }) + + const template = { + title: file.split('.json')[0], + description: fileDataObj.description || `Template from ${file}`, + usecases: fileDataObj.usecases || [], + nodes: filteredNodes, + edges: fileDataObj.edges + } + + // Validate template against schema + const validatedTemplate = AgentFlowV2Type.parse(template) + templates.push(validatedTemplate) + } catch (error) { + console.error(`Error processing template file ${file}:`, error) + // Continue with next file instead of failing completely + } + }) + + // Format templates into the requested string format + let formattedTemplates = '' + templates.forEach((template: AgentFlowV2Template, index: number) => { + formattedTemplates += `Example ${index + 1}: <<${(template as any).title}>> - ${template.description}\n` + formattedTemplates += `"nodes": [\n` + + // Format nodes with proper indentation + const nodesJson = JSON.stringify(template.nodes, null, 3) + // Split by newlines and add 3 spaces to the beginning of each line except the first and last + const nodesLines = nodesJson.split('\n') + if (nodesLines.length > 2) { + formattedTemplates += ` ${nodesLines[0]}\n` + for (let i = 1; i < nodesLines.length - 1; i++) { + formattedTemplates += ` ${nodesLines[i]}\n` + } + formattedTemplates += ` ${nodesLines[nodesLines.length - 1]}\n` + } else { + formattedTemplates += ` ${nodesJson}\n` + } + + formattedTemplates += `]\n` + formattedTemplates += `"edges": [\n` + + // Format edges with proper indentation + const edgesJson = JSON.stringify(template.edges, null, 3) + // Split by newlines and add tab to the beginning of each line except the first and last + const edgesLines = edgesJson.split('\n') + if (edgesLines.length > 2) { + formattedTemplates += `\t${edgesLines[0]}\n` + for (let i = 1; i < edgesLines.length - 1; i++) { + formattedTemplates += `\t${edgesLines[i]}\n` + } + formattedTemplates += `\t${edgesLines[edgesLines.length - 1]}\n` + } else { + formattedTemplates += `\t${edgesJson}\n` + } + + formattedTemplates += `]\n\n` + }) + + return formattedTemplates +} + +const generateAgentflowv2 = async (question: string, selectedChatModel: Record) => { + try { + const agentFlow2Nodes = await getAllAgentFlow2Nodes() + const toolNodes = await getAllToolNodes() + const marketplaceTemplates = await getAllAgentflowv2Marketplaces() + + const prompt = sysPrompt + .replace('{agentFlow2Nodes}', agentFlow2Nodes) + .replace('{marketplaceTemplates}', marketplaceTemplates) + .replace('{userRequest}', question) + const options: Record = { + appDataSource: getRunningExpressApp().AppDataSource, + databaseEntities: databaseEntities, + logger: logger + } + + let response + + if (process.env.MODE === MODE.QUEUE) { + const predictionQueue = getRunningExpressApp().queueManager.getQueue('prediction') + const job = await predictionQueue.addJob({ + prompt, + question, + toolNodes, + selectedChatModel, + isAgentFlowGenerator: true + }) + logger.debug(`[server]: Generated Agentflowv2 Job added to queue: ${job.id}`) + const queueEvents = predictionQueue.getQueueEvents() + response = await job.waitUntilFinished(queueEvents) + } else { + response = await generateAgentflowv2_json( + { prompt, componentNodes: getRunningExpressApp().nodesPool.componentNodes, toolNodes, selectedChatModel }, + question, + options + ) + } + + try { + // Try to parse and validate the response if it's a string + if (typeof response === 'string') { + const parsedResponse = JSON.parse(response) + const validatedResponse = AgentFlowV2Type.parse(parsedResponse) + return validatedResponse + } + // If response is already an object + else if (typeof response === 'object') { + const validatedResponse = AgentFlowV2Type.parse(response) + return validatedResponse + } + // Unexpected response type + else { + throw new Error(`Unexpected response type: ${typeof response}`) + } + } catch (parseError) { + console.error('Failed to parse or validate response:', parseError) + // If parsing fails, return an error object + return { + error: 'Failed to validate response format', + rawResponse: response + } as any // Type assertion to avoid type errors + } + } catch (error) { + throw new InternalFlowiseError(StatusCodes.INTERNAL_SERVER_ERROR, `Error: generateAgentflowv2 - ${getErrorMessage(error)}`) + } +} + +export default { + generateAgentflowv2 +} diff --git a/packages/server/src/services/agentflowv2-generator/prompt.ts b/packages/server/src/services/agentflowv2-generator/prompt.ts new file mode 100644 index 000000000..eef732c49 --- /dev/null +++ b/packages/server/src/services/agentflowv2-generator/prompt.ts @@ -0,0 +1,66 @@ +export const sysPromptBackup = `You are a workflow orchestrator that is designed to make agent coordination and execution easy. Workflow consists of nodes and edges. Your goal is to generate nodes and edges needed for the workflow to achieve the given task. + +Here are the nodes to choose from: +{agentFlow2Nodes} + +Here's some examples of workflows, take a look at which nodes are most relevant to the task and how the nodes and edges are connected: +{marketplaceTemplates} + +Now, let's generate the nodes and edges for the user's request. +The response should be in JSON format with "nodes" and "edges" arrays, following the structure shown in the examples. + +Think carefully, break down the task into smaller steps and think about which nodes are needed for each step. +1. First, take a look at the examples and use them as references to think about which nodes are needed to achieve the task. It must always start with startAgentflow node, and have at least 2 nodes in total. You MUST only use nodes that are in the list of nodes above. Each node must have a unique incrementing id. +2. Then, think about the edges between the nodes. +3. An agentAgentflow is an AI Agent that can use tools to accomplish goals, executing decisions, automating tasks, and interacting with the real world autonomously such as web search, interact with database and API, send messages, book appointments, etc. Always place higher priority to this and see if the tasks can be accomplished by this node. Use this node if you are asked to create an agent that can perform multiple tasks autonomously. +4. A llmAgentflow is excel at processing, understanding, and generating human-like language. It can be used for generating text, summarizing, translating, returning JSON outputs, etc. +5. If you need to execute the tool sequentially after another, you can use the toolAgentflow node. +6. If you need to iterate over a set of data, you can use the iteration node. You must have at least 1 node inside the iteration node. The children nodes will be executed N times, where N is the number of items in the iterationInput array. The children nodes must have the property "parentNode" and the value must be the id of the iteration node. +7. If you can't find a node that fits the task, you can use the httpAgentflow node to execute a http request. For example, to retrieve data from 3rd party APIs, or to send data to a webhook +8. If you need to dynamically choose between user intention, for example classifying the user's intent, you can use the conditionAgentAgentflow node. For defined conditions, you can use the conditionAgentflow node. +` + +export const sysPrompt = `You are an advanced workflow orchestrator designed to generate nodes and edges for complex tasks. Your goal is to create a workflow that accomplishes the given user request efficiently and effectively. + +Your task is to generate a workflow for the following user request: + + +{userRequest} + + +First, review the available nodes for this system: + + +{agentFlow2Nodes} + + +Now, examine these workflow examples to understand how nodes are typically connected and which are most relevant for different tasks: + + +{marketplaceTemplates} + + +To create this workflow, follow these steps and wrap your thought process in tags inside your thinking block: + +1. List out all the key components of the user request. +2. Analyze the user request and break it down into smaller steps. +3. For each step, consider which nodes are most appropriate and match each component with potential nodes. Remember: + - Always start with a startAgentflow node. + - Include at least 2 nodes in total. + - Only use nodes from the available nodes list. + - Assign each node a unique, incrementing ID. +4. Outline the overall structure of the workflow. +5. Determine the logical connections (edges) between the nodes. +6. Consider special cases: + - Use agentAgentflow for multiple autonomous tasks. + - Use llmAgentflow for language processing tasks. + - Use toolAgentflow for sequential tool execution. + - Use iteration node when you need to iterate over a set of data (must include at least one child node with a "parentNode" property). + - Use httpAgentflow for API requests or webhooks. + - Use conditionAgentAgentflow for dynamic choices or conditionAgentflow for defined conditions. + - Use humanInputAgentflow for human input and review. + - Use loopAgentflow for repetitive tasks, or when back and forth communication is needed such as hierarchical workflows. + +After your analysis, provide the final workflow as a JSON object with "nodes" and "edges" arrays. + +Begin your analysis and workflow creation process now. Your final output should consist only of the JSON object with the workflow and should not duplicate or rehash any of the work you did in the workflow planning section.` diff --git a/packages/server/src/services/assistants/index.ts b/packages/server/src/services/assistants/index.ts index 0681376bc..1ac9fff56 100644 --- a/packages/server/src/services/assistants/index.ts +++ b/packages/server/src/services/assistants/index.ts @@ -16,6 +16,7 @@ import { ICommonObject } from 'flowise-components' import logger from '../../utils/logger' import { ASSISTANT_PROMPT_GENERATOR } from '../../utils/prompt' import { INPUT_PARAMS_TYPE } from '../../utils/constants' +import { validate } from 'uuid' const createAssistant = async (requestBody: any): Promise => { try { @@ -339,6 +340,12 @@ const updateAssistant = async (assistantId: string, requestBody: any): Promise[], queryRunner?: QueryRunner): Promise => { try { + for (const data of newAssistants) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importAssistants - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(Assistant) : appServer.AppDataSource.getRepository(Assistant) @@ -426,9 +433,10 @@ const getDocumentStores = async (): Promise => { const getTools = async (): Promise => { try { const tools = await nodesService.getAllNodesForCategory('Tools') + const mcpTools = await nodesService.getAllNodesForCategory('Tools (MCP)') // filter out those tools that input params type are not in the list - const filteredTools = tools.filter((tool) => { + const filteredTools = [...tools, ...mcpTools].filter((tool) => { const inputs = tool.inputs || [] return inputs.every((input) => INPUT_PARAMS_TYPE.includes(input.type)) }) diff --git a/packages/server/src/services/chat-messages/index.ts b/packages/server/src/services/chat-messages/index.ts index dc3a9690d..1ee804e0b 100644 --- a/packages/server/src/services/chat-messages/index.ts +++ b/packages/server/src/services/chat-messages/index.ts @@ -1,15 +1,15 @@ -import { DeleteResult, FindOptionsWhere } from 'typeorm' -import { StatusCodes } from 'http-status-codes' -import { ChatMessageRatingType, ChatType, IChatMessage, MODE } from '../../Interface' -import { utilGetChatMessage } from '../../utils/getChatMessage' -import { utilAddChatMessage } from '../../utils/addChatMesage' -import { getRunningExpressApp } from '../../utils/getRunningExpressApp' -import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' import { removeFilesFromStorage } from 'flowise-components' -import logger from '../../utils/logger' +import { StatusCodes } from 'http-status-codes' +import { DeleteResult, FindOptionsWhere } from 'typeorm' import { ChatMessage } from '../../database/entities/ChatMessage' +import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { getErrorMessage } from '../../errors/utils' +import { ChatMessageRatingType, ChatType, IChatMessage, MODE } from '../../Interface' +import { utilAddChatMessage } from '../../utils/addChatMesage' +import { utilGetChatMessage } from '../../utils/getChatMessage' +import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import logger from '../../utils/logger' // Add chatmessages for chatflowid const createChatMessage = async (chatMessage: Partial) => { @@ -118,6 +118,7 @@ const removeAllChatMessages = async ( logger.error(`[server]: Error deleting file storage for chatflow ${chatflowid}, chatId ${chatId}: ${e}`) } } + const dbResponse = await appServer.AppDataSource.getRepository(ChatMessage).delete(deleteOptions) return dbResponse } catch (error) { @@ -136,6 +137,10 @@ const removeChatMessagesByMessageIds = async ( try { const appServer = getRunningExpressApp() + // Get messages before deletion to check for executionId + const messages = await appServer.AppDataSource.getRepository(ChatMessage).findByIds(messageIds) + const executionIds = messages.map((msg) => msg.executionId).filter(Boolean) + for (const [composite_key] of chatIdMap) { const [chatId] = composite_key.split('_') @@ -147,6 +152,11 @@ const removeChatMessagesByMessageIds = async ( await removeFilesFromStorage(chatflowid, chatId) } + // Delete executions if they exist + if (executionIds.length > 0) { + await appServer.AppDataSource.getRepository('Execution').delete(executionIds) + } + const dbResponse = await appServer.AppDataSource.getRepository(ChatMessage).delete(messageIds) return dbResponse } catch (error) { @@ -178,11 +188,23 @@ const abortChatMessage = async (chatId: string, chatflowid: string) => { } } +async function getAllMessages(): Promise { + const appServer = getRunningExpressApp() + return await appServer.AppDataSource.getRepository(ChatMessage).find() +} + +async function getAllMessagesFeedback(): Promise { + const appServer = getRunningExpressApp() + return await appServer.AppDataSource.getRepository(ChatMessageFeedback).find() +} + export default { createChatMessage, getAllChatMessages, getAllInternalChatMessages, removeAllChatMessages, removeChatMessagesByMessageIds, - abortChatMessage + abortChatMessage, + getAllMessages, + getAllMessagesFeedback } diff --git a/packages/server/src/services/chatflows/index.ts b/packages/server/src/services/chatflows/index.ts index edabc3b29..1367f42fc 100644 --- a/packages/server/src/services/chatflows/index.ts +++ b/packages/server/src/services/chatflows/index.ts @@ -1,6 +1,8 @@ -import { removeFolderFromStorage } from 'flowise-components' +import { ICommonObject, removeFolderFromStorage } from 'flowise-components' import { StatusCodes } from 'http-status-codes' +import { QueryRunner } from 'typeorm' import { ChatflowType, IReactFlowObject } from '../../Interface' +import { FLOWISE_COUNTER_STATUS, FLOWISE_METRIC_COUNTERS } from '../../Interface.Metrics' import { ChatFlow } from '../../database/entities/ChatFlow' import { ChatMessage } from '../../database/entities/ChatMessage' import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' @@ -13,8 +15,7 @@ import { containsBase64File, updateFlowDataWithFilePaths } from '../../utils/fil import { getRunningExpressApp } from '../../utils/getRunningExpressApp' import { utilGetUploadsConfig } from '../../utils/getUploadsConfig' import logger from '../../utils/logger' -import { FLOWISE_METRIC_COUNTERS, FLOWISE_COUNTER_STATUS } from '../../Interface.Metrics' -import { QueryRunner } from 'typeorm' +import { validate } from 'uuid' // Check if chatflow valid for streaming const checkIfChatflowIsValidForStreaming = async (chatflowId: string): Promise => { @@ -28,6 +29,19 @@ const checkIfChatflowIsValidForStreaming = async (chatflowId: string): Promise => { const dbResponse = await appServer.AppDataSource.getRepository(ChatFlow).find() if (type === 'MULTIAGENT') { return dbResponse.filter((chatflow) => chatflow.type === 'MULTIAGENT') + } else if (type === 'AGENTFLOW') { + return dbResponse.filter((chatflow) => chatflow.type === 'AGENTFLOW') + } else if (type === 'ASSISTANT') { + return dbResponse.filter((chatflow) => chatflow.type === 'ASSISTANT') } else if (type === 'CHATFLOW') { // fetch all chatflows that are not agentflow return dbResponse.filter((chatflow) => chatflow.type === 'CHATFLOW' || !chatflow.type) @@ -209,6 +227,12 @@ const saveChatflow = async (newChatFlow: ChatFlow): Promise => { const importChatflows = async (newChatflows: Partial[], queryRunner?: QueryRunner): Promise => { try { + for (const data of newChatflows) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importChatflows - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(ChatFlow) : appServer.AppDataSource.getRepository(ChatFlow) @@ -318,7 +342,7 @@ const getSinglePublicChatbotConfig = async (chatflowId: string): Promise => if (dbResponse.chatbotConfig || uploadsConfig) { try { const parsedConfig = dbResponse.chatbotConfig ? JSON.parse(dbResponse.chatbotConfig) : {} - return { ...parsedConfig, uploads: uploadsConfig } + return { ...parsedConfig, uploads: uploadsConfig, flowData: dbResponse.flowData } } catch (e) { throw new InternalFlowiseError(StatusCodes.INTERNAL_SERVER_ERROR, `Error parsing Chatbot Config for Chatflow ${chatflowId}`) } diff --git a/packages/server/src/services/documentstore/index.ts b/packages/server/src/services/documentstore/index.ts index 1d893cc6d..adea69bae 100644 --- a/packages/server/src/services/documentstore/index.ts +++ b/packages/server/src/services/documentstore/index.ts @@ -740,7 +740,7 @@ export const processLoader = async ({ appDataSource, componentNodes, data, docLo return getDocumentStoreFileChunks(appDataSource, data.storeId as string, docLoaderId) } -const processLoaderMiddleware = async (data: IDocumentStoreLoaderForPreview, docLoaderId: string) => { +const processLoaderMiddleware = async (data: IDocumentStoreLoaderForPreview, docLoaderId: string, isInternalRequest = false) => { try { const appServer = getRunningExpressApp() const appDataSource = appServer.AppDataSource @@ -761,6 +761,12 @@ const processLoaderMiddleware = async (data: IDocumentStoreLoaderForPreview, doc const job = await upsertQueue.addJob(omit(executeData, OMIT_QUEUE_JOB_DATA)) logger.debug(`[server]: Job added to queue: ${job.id}`) + if (isInternalRequest) { + return { + jobId: job.id + } + } + const queueEvents = upsertQueue.getQueueEvents() const result = await job.waitUntilFinished(queueEvents) @@ -843,7 +849,7 @@ const _saveChunksToStorage = async ( filesWithMetadata.push(fileMetadata) } } - data.loaderConfig[keys[i]] = 'FILE-STORAGE::' + JSON.stringify(fileNames) + if (fileNames.length) data.loaderConfig[keys[i]] = 'FILE-STORAGE::' + JSON.stringify(fileNames) } else if (re.test(input)) { const fileNames: string[] = [] const fileMetadata = await _saveFileToStorage(input, entity) diff --git a/packages/server/src/services/executions/index.ts b/packages/server/src/services/executions/index.ts new file mode 100644 index 000000000..899d6a092 --- /dev/null +++ b/packages/server/src/services/executions/index.ts @@ -0,0 +1,156 @@ +import { StatusCodes } from 'http-status-codes' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { getErrorMessage } from '../../errors/utils' +import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import { Execution } from '../../database/entities/Execution' +import { ExecutionState, IAgentflowExecutedData } from '../../Interface' +import { In } from 'typeorm' +import { ChatMessage } from '../../database/entities/ChatMessage' +import { _removeCredentialId } from '../../utils/buildAgentflow' + +interface ExecutionFilters { + id?: string + agentflowId?: string + sessionId?: string + state?: ExecutionState + startDate?: Date + endDate?: Date + page?: number + limit?: number +} + +const getExecutionById = async (executionId: string): Promise => { + try { + const appServer = getRunningExpressApp() + const executionRepository = appServer.AppDataSource.getRepository(Execution) + const res = await executionRepository.findOne({ where: { id: executionId } }) + if (!res) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Execution ${executionId} not found`) + } + return res + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: executionsService.getExecutionById - ${getErrorMessage(error)}` + ) + } +} + +const getPublicExecutionById = async (executionId: string): Promise => { + try { + const appServer = getRunningExpressApp() + const executionRepository = appServer.AppDataSource.getRepository(Execution) + const res = await executionRepository.findOne({ where: { id: executionId, isPublic: true } }) + if (!res) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Execution ${executionId} not found`) + } + const executionData = typeof res?.executionData === 'string' ? JSON.parse(res?.executionData) : res?.executionData + const executionDataWithoutCredentialId = executionData.map((data: IAgentflowExecutedData) => _removeCredentialId(data)) + const stringifiedExecutionData = JSON.stringify(executionDataWithoutCredentialId) + return { ...res, executionData: stringifiedExecutionData } + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: executionsService.getPublicExecutionById - ${getErrorMessage(error)}` + ) + } +} + +const getAllExecutions = async (filters: ExecutionFilters = {}): Promise<{ data: Execution[]; total: number }> => { + try { + const appServer = getRunningExpressApp() + const { id, agentflowId, sessionId, state, startDate, endDate, page = 1, limit = 10 } = filters + + // Handle UUID fields properly using raw parameters to avoid type conversion issues + // This uses the query builder instead of direct objects for compatibility with UUID fields + const queryBuilder = appServer.AppDataSource.getRepository(Execution) + .createQueryBuilder('execution') + .leftJoinAndSelect('execution.agentflow', 'agentflow') + .orderBy('execution.createdDate', 'DESC') + .skip((page - 1) * limit) + .take(limit) + + if (id) queryBuilder.andWhere('execution.id = :id', { id }) + if (agentflowId) queryBuilder.andWhere('execution.agentflowId = :agentflowId', { agentflowId }) + if (sessionId) queryBuilder.andWhere('execution.sessionId = :sessionId', { sessionId }) + if (state) queryBuilder.andWhere('execution.state = :state', { state }) + + // Date range conditions + if (startDate && endDate) { + queryBuilder.andWhere('execution.createdDate BETWEEN :startDate AND :endDate', { startDate, endDate }) + } else if (startDate) { + queryBuilder.andWhere('execution.createdDate >= :startDate', { startDate }) + } else if (endDate) { + queryBuilder.andWhere('execution.createdDate <= :endDate', { endDate }) + } + + const [data, total] = await queryBuilder.getManyAndCount() + + return { data, total } + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: executionsService.getAllExecutions - ${getErrorMessage(error)}` + ) + } +} + +const updateExecution = async (executionId: string, data: Partial): Promise => { + try { + const appServer = getRunningExpressApp() + const execution = await appServer.AppDataSource.getRepository(Execution).findOneBy({ + id: executionId + }) + if (!execution) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Execution ${executionId} not found`) + } + const updateExecution = new Execution() + Object.assign(updateExecution, data) + await appServer.AppDataSource.getRepository(Execution).merge(execution, updateExecution) + const dbResponse = await appServer.AppDataSource.getRepository(Execution).save(execution) + return dbResponse + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: executionsService.updateExecution - ${getErrorMessage(error)}` + ) + } +} + +/** + * Delete multiple executions by their IDs + * @param executionIds Array of execution IDs to delete + * @returns Object with success status and count of deleted executions + */ +const deleteExecutions = async (executionIds: string[]): Promise<{ success: boolean; deletedCount: number }> => { + try { + const appServer = getRunningExpressApp() + const executionRepository = appServer.AppDataSource.getRepository(Execution) + + // Delete executions where id is in the provided array + const result = await executionRepository.delete({ + id: In(executionIds) + }) + + // Update chat message executionId column to NULL + await appServer.AppDataSource.getRepository(ChatMessage).update({ executionId: In(executionIds) }, { executionId: null as any }) + + return { + success: true, + deletedCount: result.affected || 0 + } + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: executionsService.deleteExecutions - ${getErrorMessage(error)}` + ) + } +} + +export default { + getExecutionById, + getAllExecutions, + deleteExecutions, + getPublicExecutionById, + updateExecution +} diff --git a/packages/server/src/services/export-import/index.ts b/packages/server/src/services/export-import/index.ts index c113476a3..3eebc1766 100644 --- a/packages/server/src/services/export-import/index.ts +++ b/packages/server/src/services/export-import/index.ts @@ -1,40 +1,80 @@ import { StatusCodes } from 'http-status-codes' +import { In, QueryRunner } from 'typeorm' +import { v4 as uuidv4 } from 'uuid' +import { Assistant } from '../../database/entities/Assistant' import { ChatFlow } from '../../database/entities/ChatFlow' +import { ChatMessage } from '../../database/entities/ChatMessage' +import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' +import { CustomTemplate } from '../../database/entities/CustomTemplate' +import { DocumentStore } from '../../database/entities/DocumentStore' +import { DocumentStoreFileChunk } from '../../database/entities/DocumentStoreFileChunk' +import { Execution } from '../../database/entities/Execution' import { Tool } from '../../database/entities/Tool' import { Variable } from '../../database/entities/Variable' -import { Assistant } from '../../database/entities/Assistant' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { getErrorMessage } from '../../errors/utils' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import assistantService from '../assistants' +import chatMessagesService from '../chat-messages' import chatflowService from '../chatflows' +import documenStoreService from '../documentstore' +import executionService from '../executions' +import marketplacesService from '../marketplaces' import toolsService from '../tools' import variableService from '../variables' -import assistantService from '../assistants' type ExportInput = { - tool: boolean - chatflow: boolean agentflow: boolean + agentflowv2: boolean + assistantCustom: boolean + assistantOpenAI: boolean + assistantAzure: boolean + chatflow: boolean + chat_message: boolean + chat_feedback: boolean + custom_template: boolean + document_store: boolean + execution: boolean + tool: boolean variable: boolean - assistant: boolean } type ExportData = { - Tool: Tool[] - ChatFlow: ChatFlow[] AgentFlow: ChatFlow[] + AgentFlowV2: ChatFlow[] + AssistantCustom: Assistant[] + AssistantFlow: ChatFlow[] + AssistantOpenAI: Assistant[] + AssistantAzure: Assistant[] + ChatFlow: ChatFlow[] + ChatMessage: ChatMessage[] + ChatMessageFeedback: ChatMessageFeedback[] + CustomTemplate: CustomTemplate[] + DocumentStore: DocumentStore[] + DocumentStoreFileChunk: DocumentStoreFileChunk[] + Execution: Execution[] + Tool: Tool[] Variable: Variable[] - Assistant: Assistant[] } const convertExportInput = (body: any): ExportInput => { try { if (!body || typeof body !== 'object') throw new Error('Invalid ExportInput object in request body') - if (body.tool && typeof body.tool !== 'boolean') throw new Error('Invalid tool property in ExportInput object') - if (body.chatflow && typeof body.chatflow !== 'boolean') throw new Error('Invalid chatflow property in ExportInput object') if (body.agentflow && typeof body.agentflow !== 'boolean') throw new Error('Invalid agentflow property in ExportInput object') - if (body.variable && typeof body.variable !== 'boolean') throw new Error('Invalid variable property in ExportInput object') + if (body.agentflowv2 && typeof body.agentflowv2 !== 'boolean') throw new Error('Invalid agentflowv2 property in ExportInput object') if (body.assistant && typeof body.assistant !== 'boolean') throw new Error('Invalid assistant property in ExportInput object') + if (body.chatflow && typeof body.chatflow !== 'boolean') throw new Error('Invalid chatflow property in ExportInput object') + if (body.chat_message && typeof body.chat_message !== 'boolean') + throw new Error('Invalid chat_message property in ExportInput object') + if (body.chat_feedback && typeof body.chat_feedback !== 'boolean') + throw new Error('Invalid chat_feedback property in ExportInput object') + if (body.custom_template && typeof body.custom_template !== 'boolean') + throw new Error('Invalid custom_template property in ExportInput object') + if (body.document_store && typeof body.document_store !== 'boolean') + throw new Error('Invalid document_store property in ExportInput object') + if (body.execution && typeof body.execution !== 'boolean') throw new Error('Invalid execution property in ExportInput object') + if (body.tool && typeof body.tool !== 'boolean') throw new Error('Invalid tool property in ExportInput object') + if (body.variable && typeof body.variable !== 'boolean') throw new Error('Invalid variable property in ExportInput object') return body as ExportInput } catch (error) { throw new InternalFlowiseError( @@ -47,31 +87,55 @@ const convertExportInput = (body: any): ExportInput => { const FileDefaultName = 'ExportData.json' const exportData = async (exportInput: ExportInput): Promise<{ FileDefaultName: string } & ExportData> => { try { - // step 1 - get all Tool - let allTool: Tool[] = [] - if (exportInput.tool === true) allTool = await toolsService.getAllTools() + let AgentFlow: ChatFlow[] = exportInput.agentflow === true ? await chatflowService.getAllChatflows('MULTIAGENT') : [] + let AgentFlowV2: ChatFlow[] = exportInput.agentflowv2 === true ? await chatflowService.getAllChatflows('AGENTFLOW') : [] - // step 2 - get all ChatFlow - let allChatflow: ChatFlow[] = [] - if (exportInput.chatflow === true) allChatflow = await chatflowService.getAllChatflows('CHATFLOW') + let AssistantCustom: Assistant[] = exportInput.assistantCustom === true ? await assistantService.getAllAssistants('CUSTOM') : [] + let AssistantFlow: ChatFlow[] = exportInput.assistantCustom === true ? await chatflowService.getAllChatflows('ASSISTANT') : [] - // step 3 - get all MultiAgent - let allMultiAgent: ChatFlow[] = [] - if (exportInput.agentflow === true) allMultiAgent = await chatflowService.getAllChatflows('MULTIAGENT') + let AssistantOpenAI: Assistant[] = exportInput.assistantOpenAI === true ? await assistantService.getAllAssistants('OPENAI') : [] - let allVars: Variable[] = [] - if (exportInput.variable === true) allVars = await variableService.getAllVariables() + let AssistantAzure: Assistant[] = exportInput.assistantAzure === true ? await assistantService.getAllAssistants('AZURE') : [] - let allAssistants: Assistant[] = [] - if (exportInput.assistant === true) allAssistants = await assistantService.getAllAssistants() + let ChatFlow: ChatFlow[] = exportInput.chatflow === true ? await chatflowService.getAllChatflows('CHATFLOW') : [] + + let ChatMessage: ChatMessage[] = exportInput.chat_message === true ? await chatMessagesService.getAllMessages() : [] + + let ChatMessageFeedback: ChatMessageFeedback[] = + exportInput.chat_feedback === true ? await chatMessagesService.getAllMessagesFeedback() : [] + + let CustomTemplate: CustomTemplate[] = exportInput.custom_template === true ? await marketplacesService.getAllCustomTemplates() : [] + CustomTemplate = CustomTemplate.map((customTemplate) => ({ ...customTemplate, usecases: JSON.stringify(customTemplate.usecases) })) + + let DocumentStore: DocumentStore[] = exportInput.document_store === true ? await documenStoreService.getAllDocumentStores() : [] + + let DocumentStoreFileChunk: DocumentStoreFileChunk[] = + exportInput.document_store === true ? await documenStoreService.getAllDocumentFileChunks() : [] + + const { data: totalExecutions } = exportInput.execution === true ? await executionService.getAllExecutions() : { data: [] } + let Execution: Execution[] = exportInput.execution === true ? totalExecutions : [] + + let Tool: Tool[] = exportInput.tool === true ? await toolsService.getAllTools() : [] + + let Variable: Variable[] = exportInput.variable === true ? await variableService.getAllVariables() : [] return { FileDefaultName, - Tool: allTool, - ChatFlow: allChatflow, - AgentFlow: allMultiAgent, - Variable: allVars, - Assistant: allAssistants + AgentFlow, + AgentFlowV2, + AssistantCustom, + AssistantFlow, + AssistantOpenAI, + AssistantAzure, + ChatFlow, + ChatMessage, + ChatMessageFeedback, + CustomTemplate, + DocumentStore, + DocumentStoreFileChunk, + Execution, + Tool, + Variable } } catch (error) { throw new InternalFlowiseError( @@ -81,28 +145,454 @@ const exportData = async (exportInput: ExportInput): Promise<{ FileDefaultName: } } -const importData = async (importData: ExportData) => { +async function replaceDuplicateIdsForChatFlow(queryRunner: QueryRunner, originalData: ExportData, chatflows: ChatFlow[]) { try { - const appServer = getRunningExpressApp() - const queryRunner = appServer.AppDataSource.createQueryRunner() + const ids = chatflows.map((chatflow) => chatflow.id) + const records = await queryRunner.manager.find(ChatFlow, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForChatflow - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForAssistant(queryRunner: QueryRunner, originalData: ExportData, assistants: Assistant[]) { + try { + const ids = assistants.map((assistant) => assistant.id) + const records = await queryRunner.manager.find(Assistant, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForAssistant - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForChatMessage(queryRunner: QueryRunner, originalData: ExportData, chatMessages: ChatMessage[]) { + try { + const chatmessageChatflowIds = chatMessages.map((chatMessage) => { + return { id: chatMessage.chatflowid, qty: 0 } + }) + const originalDataChatflowIds = [ + ...originalData.AssistantFlow.map((assistantFlow) => assistantFlow.id), + ...originalData.AgentFlow.map((agentFlow) => agentFlow.id), + ...originalData.AgentFlowV2.map((agentFlowV2) => agentFlowV2.id), + ...originalData.ChatFlow.map((chatFlow) => chatFlow.id) + ] + chatmessageChatflowIds.forEach((item) => { + if (originalDataChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + const databaseChatflowIds = await ( + await queryRunner.manager.find(ChatFlow, { + where: { id: In(chatmessageChatflowIds.map((chatmessageChatflowId) => chatmessageChatflowId.id)) } + }) + ).map((chatflow) => chatflow.id) + chatmessageChatflowIds.forEach((item) => { + if (databaseChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + + const missingChatflowIds = chatmessageChatflowIds.filter((item) => item.qty === 0).map((item) => item.id) + if (missingChatflowIds.length > 0) { + chatMessages = chatMessages.filter((chatMessage) => !missingChatflowIds.includes(chatMessage.chatflowid)) + originalData.ChatMessage = chatMessages + } + + const ids = chatMessages.map((chatMessage) => chatMessage.id) + const records = await queryRunner.manager.find(ChatMessage, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForChatMessage - ${getErrorMessage(error)}` + ) + } +} + +async function replaceExecutionIdForChatMessage(queryRunner: QueryRunner, originalData: ExportData, chatMessages: ChatMessage[]) { + try { + // step 1 - get all execution ids from chatMessages + const chatMessageExecutionIds = chatMessages + .map((chatMessage) => { + return { id: chatMessage.executionId, qty: 0 } + }) + .filter((item): item is { id: string; qty: number } => item !== undefined) + + // step 2 - increase qty if execution id is in importData.Execution + const originalDataExecutionIds = originalData.Execution.map((execution) => execution.id) + chatMessageExecutionIds.forEach((item) => { + if (originalDataExecutionIds.includes(item.id)) { + item.qty += 1 + } + }) + + // step 3 - increase qty if execution id is in database + const databaseExecutionIds = await ( + await queryRunner.manager.find(Execution, { + where: { id: In(chatMessageExecutionIds.map((chatMessageExecutionId) => chatMessageExecutionId.id)) } + }) + ).map((execution) => execution.id) + chatMessageExecutionIds.forEach((item) => { + if (databaseExecutionIds.includes(item.id)) { + item.qty += 1 + } + }) + + // step 4 - if executionIds not found replace with NULL + const missingExecutionIds = chatMessageExecutionIds.filter((item) => item.qty === 0).map((item) => item.id) + chatMessages.forEach((chatMessage) => { + if (chatMessage.executionId && missingExecutionIds.includes(chatMessage.executionId)) { + delete chatMessage.executionId + } + }) + + originalData.ChatMessage = chatMessages + + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceExecutionIdForChatMessage - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForChatMessageFeedback( + queryRunner: QueryRunner, + originalData: ExportData, + chatMessageFeedbacks: ChatMessageFeedback[] +) { + try { + const feedbackChatflowIds = chatMessageFeedbacks.map((feedback) => { + return { id: feedback.chatflowid, qty: 0 } + }) + const originalDataChatflowIds = [ + ...originalData.AssistantFlow.map((assistantFlow) => assistantFlow.id), + ...originalData.AgentFlow.map((agentFlow) => agentFlow.id), + ...originalData.AgentFlowV2.map((agentFlowV2) => agentFlowV2.id), + ...originalData.ChatFlow.map((chatFlow) => chatFlow.id) + ] + feedbackChatflowIds.forEach((item) => { + if (originalDataChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + const databaseChatflowIds = await ( + await queryRunner.manager.find(ChatFlow, { + where: { id: In(feedbackChatflowIds.map((feedbackChatflowId) => feedbackChatflowId.id)) } + }) + ).map((chatflow) => chatflow.id) + feedbackChatflowIds.forEach((item) => { + if (databaseChatflowIds.includes(item.id)) { + item.qty += 1 + } + }) + + const feedbackMessageIds = chatMessageFeedbacks.map((feedback) => { + return { id: feedback.messageId, qty: 0 } + }) + const originalDataMessageIds = originalData.ChatMessage.map((chatMessage) => chatMessage.id) + feedbackMessageIds.forEach((item) => { + if (originalDataMessageIds.includes(item.id)) { + item.qty += 1 + } + }) + const databaseMessageIds = await ( + await queryRunner.manager.find(ChatMessage, { + where: { id: In(feedbackMessageIds.map((feedbackMessageId) => feedbackMessageId.id)) } + }) + ).map((chatMessage) => chatMessage.id) + feedbackMessageIds.forEach((item) => { + if (databaseMessageIds.includes(item.id)) { + item.qty += 1 + } + }) + + const missingChatflowIds = feedbackChatflowIds.filter((item) => item.qty === 0).map((item) => item.id) + const missingMessageIds = feedbackMessageIds.filter((item) => item.qty === 0).map((item) => item.id) + + if (missingChatflowIds.length > 0 || missingMessageIds.length > 0) { + chatMessageFeedbacks = chatMessageFeedbacks.filter( + (feedback) => !missingChatflowIds.includes(feedback.chatflowid) && !missingMessageIds.includes(feedback.messageId) + ) + originalData.ChatMessageFeedback = chatMessageFeedbacks + } + + const ids = chatMessageFeedbacks.map((chatMessageFeedback) => chatMessageFeedback.id) + const records = await queryRunner.manager.find(ChatMessageFeedback, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForChatMessageFeedback - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForCustomTemplate(queryRunner: QueryRunner, originalData: ExportData, customTemplates: CustomTemplate[]) { + try { + const ids = customTemplates.map((customTemplate) => customTemplate.id) + const records = await queryRunner.manager.find(CustomTemplate, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForCustomTemplate - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForDocumentStore(queryRunner: QueryRunner, originalData: ExportData, documentStores: DocumentStore[]) { + try { + const ids = documentStores.map((documentStore) => documentStore.id) + const records = await queryRunner.manager.find(DocumentStore, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForDocumentStore - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForDocumentStoreFileChunk( + queryRunner: QueryRunner, + originalData: ExportData, + documentStoreFileChunks: DocumentStoreFileChunk[] +) { + try { + const ids = documentStoreFileChunks.map((documentStoreFileChunk) => documentStoreFileChunk.id) + const records = await queryRunner.manager.find(DocumentStoreFileChunk, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForDocumentStoreFileChunk - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForTool(queryRunner: QueryRunner, originalData: ExportData, tools: Tool[]) { + try { + const ids = tools.map((tool) => tool.id) + const records = await queryRunner.manager.find(Tool, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForTool - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForVariable(queryRunner: QueryRunner, originalData: ExportData, variables: Variable[]) { + try { + const ids = variables.map((variable) => variable.id) + const records = await queryRunner.manager.find(Variable, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForVariable - ${getErrorMessage(error)}` + ) + } +} + +async function replaceDuplicateIdsForExecution(queryRunner: QueryRunner, originalData: ExportData, executions: Execution[]) { + try { + const ids = executions.map((execution) => execution.id) + const records = await queryRunner.manager.find(Execution, { + where: { id: In(ids) } + }) + if (records.length < 0) return originalData + for (let record of records) { + const oldId = record.id + const newId = uuidv4() + originalData = JSON.parse(JSON.stringify(originalData).replaceAll(oldId, newId)) + } + return originalData + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: exportImportService.replaceDuplicateIdsForExecution - ${getErrorMessage(error)}` + ) + } +} + +function reduceSpaceForChatflowFlowData(chatflows: ChatFlow[]) { + return chatflows.map((chatflow) => { + return { ...chatflow, flowData: JSON.stringify(JSON.parse(chatflow.flowData)) } + }) +} + +const importData = async (importData: ExportData) => { + // Initialize missing properties with empty arrays to avoid "undefined" errors + importData.AgentFlow = importData.AgentFlow || [] + importData.AgentFlowV2 = importData.AgentFlowV2 || [] + importData.AssistantCustom = importData.AssistantCustom || [] + importData.AssistantFlow = importData.AssistantFlow || [] + importData.AssistantOpenAI = importData.AssistantOpenAI || [] + importData.AssistantAzure = importData.AssistantAzure || [] + importData.ChatFlow = importData.ChatFlow || [] + importData.ChatMessage = importData.ChatMessage || [] + importData.ChatMessageFeedback = importData.ChatMessageFeedback || [] + importData.CustomTemplate = importData.CustomTemplate || [] + importData.DocumentStore = importData.DocumentStore || [] + importData.DocumentStoreFileChunk = importData.DocumentStoreFileChunk || [] + importData.Execution = importData.Execution || [] + importData.Tool = importData.Tool || [] + importData.Variable = importData.Variable || [] + + let queryRunner + try { + queryRunner = getRunningExpressApp().AppDataSource.createQueryRunner() + await queryRunner.connect() try { + if (importData.AgentFlow.length > 0) { + importData.AgentFlow = reduceSpaceForChatflowFlowData(importData.AgentFlow) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.AgentFlow) + } + if (importData.AgentFlowV2.length > 0) { + importData.AgentFlowV2 = reduceSpaceForChatflowFlowData(importData.AgentFlowV2) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.AgentFlowV2) + } + if (importData.AssistantCustom.length > 0) + importData = await replaceDuplicateIdsForAssistant(queryRunner, importData, importData.AssistantCustom) + if (importData.AssistantFlow.length > 0) { + importData.AssistantFlow = reduceSpaceForChatflowFlowData(importData.AssistantFlow) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.AssistantFlow) + } + if (importData.AssistantOpenAI.length > 0) + importData = await replaceDuplicateIdsForAssistant(queryRunner, importData, importData.AssistantOpenAI) + if (importData.AssistantAzure.length > 0) + importData = await replaceDuplicateIdsForAssistant(queryRunner, importData, importData.AssistantAzure) + if (importData.ChatFlow.length > 0) { + importData.ChatFlow = reduceSpaceForChatflowFlowData(importData.ChatFlow) + importData = await replaceDuplicateIdsForChatFlow(queryRunner, importData, importData.ChatFlow) + } + if (importData.ChatMessage.length > 0) { + importData = await replaceDuplicateIdsForChatMessage(queryRunner, importData, importData.ChatMessage) + importData = await replaceExecutionIdForChatMessage(queryRunner, importData, importData.ChatMessage) + } + if (importData.ChatMessageFeedback.length > 0) + importData = await replaceDuplicateIdsForChatMessageFeedback(queryRunner, importData, importData.ChatMessageFeedback) + if (importData.CustomTemplate.length > 0) + importData = await replaceDuplicateIdsForCustomTemplate(queryRunner, importData, importData.CustomTemplate) + if (importData.DocumentStore.length > 0) + importData = await replaceDuplicateIdsForDocumentStore(queryRunner, importData, importData.DocumentStore) + if (importData.DocumentStoreFileChunk.length > 0) + importData = await replaceDuplicateIdsForDocumentStoreFileChunk(queryRunner, importData, importData.DocumentStoreFileChunk) + if (importData.Tool.length > 0) importData = await replaceDuplicateIdsForTool(queryRunner, importData, importData.Tool) + if (importData.Execution.length > 0) + importData = await replaceDuplicateIdsForExecution(queryRunner, importData, importData.Execution) + if (importData.Variable.length > 0) + importData = await replaceDuplicateIdsForVariable(queryRunner, importData, importData.Variable) + await queryRunner.startTransaction() - if (importData.Tool.length > 0) await toolsService.importTools(importData.Tool, queryRunner) - if (importData.ChatFlow.length > 0) await chatflowService.importChatflows(importData.ChatFlow, queryRunner) - if (importData.AgentFlow.length > 0) await chatflowService.importChatflows(importData.AgentFlow, queryRunner) - if (importData.Variable.length > 0) await variableService.importVariables(importData.Variable, queryRunner) - if (importData.Assistant.length > 0) await assistantService.importAssistants(importData.Assistant, queryRunner) + if (importData.AgentFlow.length > 0) await queryRunner.manager.save(ChatFlow, importData.AgentFlow) + if (importData.AgentFlowV2.length > 0) await queryRunner.manager.save(ChatFlow, importData.AgentFlowV2) + if (importData.AssistantFlow.length > 0) await queryRunner.manager.save(ChatFlow, importData.AssistantFlow) + if (importData.AssistantCustom.length > 0) await queryRunner.manager.save(Assistant, importData.AssistantCustom) + if (importData.AssistantOpenAI.length > 0) await queryRunner.manager.save(Assistant, importData.AssistantOpenAI) + if (importData.AssistantAzure.length > 0) await queryRunner.manager.save(Assistant, importData.AssistantAzure) + if (importData.ChatFlow.length > 0) await queryRunner.manager.save(ChatFlow, importData.ChatFlow) + if (importData.ChatMessage.length > 0) await queryRunner.manager.save(ChatMessage, importData.ChatMessage) + if (importData.ChatMessageFeedback.length > 0) + await queryRunner.manager.save(ChatMessageFeedback, importData.ChatMessageFeedback) + if (importData.CustomTemplate.length > 0) await queryRunner.manager.save(CustomTemplate, importData.CustomTemplate) + if (importData.DocumentStore.length > 0) await queryRunner.manager.save(DocumentStore, importData.DocumentStore) + if (importData.DocumentStoreFileChunk.length > 0) + await queryRunner.manager.save(DocumentStoreFileChunk, importData.DocumentStoreFileChunk) + if (importData.Tool.length > 0) await queryRunner.manager.save(Tool, importData.Tool) + if (importData.Execution.length > 0) await queryRunner.manager.save(Execution, importData.Execution) + if (importData.Variable.length > 0) await queryRunner.manager.save(Variable, importData.Variable) await queryRunner.commitTransaction() } catch (error) { - await queryRunner.rollbackTransaction() + if (queryRunner && !queryRunner.isTransactionActive) await queryRunner.rollbackTransaction() throw error } finally { - if (!queryRunner.isReleased) { - await queryRunner.release() - } + if (queryRunner && !queryRunner.isReleased) await queryRunner.release() } } catch (error) { throw new InternalFlowiseError( diff --git a/packages/server/src/services/feedback/validation.ts b/packages/server/src/services/feedback/validation.ts new file mode 100644 index 000000000..03db24ec9 --- /dev/null +++ b/packages/server/src/services/feedback/validation.ts @@ -0,0 +1,127 @@ +import { StatusCodes } from 'http-status-codes' +import { IChatMessageFeedback } from '../../Interface' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import { ChatMessage } from '../../database/entities/ChatMessage' +import { ChatMessageFeedback } from '../../database/entities/ChatMessageFeedback' + +/** + * Validates that the message ID exists + * @param {string} messageId + */ +export const validateMessageExists = async (messageId: string): Promise => { + const appServer = getRunningExpressApp() + const message = await appServer.AppDataSource.getRepository(ChatMessage).findOne({ + where: { id: messageId } + }) + + if (!message) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Message with ID ${messageId} not found`) + } + + return message +} + +/** + * Validates that the feedback ID exists + * @param {string} feedbackId + */ +export const validateFeedbackExists = async (feedbackId: string): Promise => { + const appServer = getRunningExpressApp() + const feedbackExists = await appServer.AppDataSource.getRepository(ChatMessageFeedback).findOne({ + where: { id: feedbackId } + }) + + if (!feedbackExists) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Feedback with ID ${feedbackId} not found`) + } + + return feedbackExists +} + +/** + * Validates a feedback object for creation + * @param {Partial} feedback + */ +export const validateFeedbackForCreation = async (feedback: Partial): Promise> => { + // If messageId is provided, validate it exists and get the message + let message: ChatMessage | null = null + if (feedback.messageId) { + message = await validateMessageExists(feedback.messageId) + } else { + throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Message ID is required') + } + + // If chatId is provided, validate it matches the message's chatId + if (feedback.chatId) { + if (message.chatId !== feedback.chatId) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chat ID: message with ID ${message.id} does not belong to chat with ID ${feedback.chatId}` + ) + } + } else { + // If not provided, use the message's chatId + feedback.chatId = message.chatId + } + + // If chatflowid is provided, validate it matches the message's chatflowid + if (feedback.chatflowid) { + if (message.chatflowid !== feedback.chatflowid) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chatflow ID: message with ID ${message.id} does not belong to chatflow with ID ${feedback.chatflowid}` + ) + } + } else { + // If not provided, use the message's chatflowid + feedback.chatflowid = message.chatflowid + } + + return feedback +} + +/** + * Validates a feedback object for update + * @param {string} feedbackId + * @param {Partial} feedback + */ +export const validateFeedbackForUpdate = async ( + feedbackId: string, + feedback: Partial +): Promise> => { + // First validate the feedback exists + const existingFeedback = await validateFeedbackExists(feedbackId) + + feedback.messageId = feedback.messageId ?? existingFeedback.messageId + feedback.chatId = feedback.chatId ?? existingFeedback.chatId + feedback.chatflowid = feedback.chatflowid ?? existingFeedback.chatflowid + + // If messageId is provided, validate it exists and get the message + let message: ChatMessage | null = null + if (feedback.messageId) { + message = await validateMessageExists(feedback.messageId) + } + + // If chatId is provided and we have a message, validate it matches the message's chatId + if (feedback.chatId) { + if (message?.chatId !== feedback.chatId) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chat ID: message with ID ${message?.id} does not belong to chat with ID ${feedback.chatId}` + ) + } + } + + // If chatflowid is provided and we have a message, validate it matches the message's chatflowid + if (feedback.chatflowid && message) { + if (message?.chatflowid !== feedback.chatflowid) { + throw new InternalFlowiseError( + StatusCodes.BAD_REQUEST, + `Inconsistent chatflow ID: message with ID ${message?.id} does not belong to chatflow with ID ${feedback.chatflowid}` + ) + } + } + + return feedback +} diff --git a/packages/server/src/services/marketplaces/index.ts b/packages/server/src/services/marketplaces/index.ts index ee5d13132..33bc54920 100644 --- a/packages/server/src/services/marketplaces/index.ts +++ b/packages/server/src/services/marketplaces/index.ts @@ -7,6 +7,7 @@ import { IReactFlowEdge, IReactFlowNode } from '../../Interface' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' import { DeleteResult } from 'typeorm' import { CustomTemplate } from '../../database/entities/CustomTemplate' +import { v4 as uuidv4 } from 'uuid' import chatflowsService from '../chatflows' @@ -29,13 +30,13 @@ const getAllTemplates = async () => { let marketplaceDir = path.join(__dirname, '..', '..', '..', 'marketplaces', 'chatflows') let jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') let templates: any[] = [] - jsonsInDir.forEach((file, index) => { + jsonsInDir.forEach((file) => { const filePath = path.join(__dirname, '..', '..', '..', 'marketplaces', 'chatflows', file) const fileData = fs.readFileSync(filePath) const fileDataObj = JSON.parse(fileData.toString()) as ITemplate const template = { - id: index, + id: uuidv4(), templateName: file.split('.json')[0], flowData: fileData.toString(), badge: fileDataObj?.badge, @@ -50,13 +51,13 @@ const getAllTemplates = async () => { marketplaceDir = path.join(__dirname, '..', '..', '..', 'marketplaces', 'tools') jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') - jsonsInDir.forEach((file, index) => { + jsonsInDir.forEach((file) => { const filePath = path.join(__dirname, '..', '..', '..', 'marketplaces', 'tools', file) const fileData = fs.readFileSync(filePath) const fileDataObj = JSON.parse(fileData.toString()) const template = { ...fileDataObj, - id: index, + id: uuidv4(), type: 'Tool', framework: fileDataObj?.framework, badge: fileDataObj?.badge, @@ -69,12 +70,12 @@ const getAllTemplates = async () => { marketplaceDir = path.join(__dirname, '..', '..', '..', 'marketplaces', 'agentflows') jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') - jsonsInDir.forEach((file, index) => { + jsonsInDir.forEach((file) => { const filePath = path.join(__dirname, '..', '..', '..', 'marketplaces', 'agentflows', file) const fileData = fs.readFileSync(filePath) const fileDataObj = JSON.parse(fileData.toString()) const template = { - id: index, + id: uuidv4(), templateName: file.split('.json')[0], flowData: fileData.toString(), badge: fileDataObj?.badge, @@ -86,6 +87,26 @@ const getAllTemplates = async () => { } templates.push(template) }) + + marketplaceDir = path.join(__dirname, '..', '..', '..', 'marketplaces', 'agentflowsv2') + jsonsInDir = fs.readdirSync(marketplaceDir).filter((file) => path.extname(file) === '.json') + jsonsInDir.forEach((file) => { + const filePath = path.join(__dirname, '..', '..', '..', 'marketplaces', 'agentflowsv2', file) + const fileData = fs.readFileSync(filePath) + const fileDataObj = JSON.parse(fileData.toString()) + const template = { + id: uuidv4(), + templateName: file.split('.json')[0], + flowData: fileData.toString(), + badge: fileDataObj?.badge, + framework: fileDataObj?.framework, + usecases: fileDataObj?.usecases, + categories: getCategories(fileDataObj), + type: 'AgentflowV2', + description: fileDataObj?.description || '' + } + templates.push(template) + }) const sortedTemplates = templates.sort((a, b) => a.templateName.localeCompare(b.templateName)) const FlowiseDocsQnAIndex = sortedTemplates.findIndex((tmp) => tmp.templateName === 'Flowise Docs QnA') if (FlowiseDocsQnAIndex > 0) { @@ -200,6 +221,9 @@ const _generateExportFlowData = (flowData: any) => { version: node.data.version, name: node.data.name, type: node.data.type, + color: node.data.color, + hideOutput: node.data.hideOutput, + hideInput: node.data.hideInput, baseClasses: node.data.baseClasses, tags: node.data.tags, category: node.data.category, diff --git a/packages/server/src/services/nodes/index.ts b/packages/server/src/services/nodes/index.ts index 25838de9c..f0e8b3f53 100644 --- a/packages/server/src/services/nodes/index.ts +++ b/packages/server/src/services/nodes/index.ts @@ -1,12 +1,14 @@ -import { cloneDeep } from 'lodash' +import { cloneDeep, omit } from 'lodash' import { StatusCodes } from 'http-status-codes' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' -import { INodeData } from '../../Interface' -import { INodeOptionsValue, ICommonObject, handleEscapeCharacters } from 'flowise-components' +import { INodeData, MODE } from '../../Interface' +import { INodeOptionsValue } from 'flowise-components' import { databaseEntities } from '../../utils' import logger from '../../utils/logger' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { getErrorMessage } from '../../errors/utils' +import { OMIT_QUEUE_JOB_DATA } from '../../utils/constants' +import { executeCustomNodeFunction } from '../../utils/executeCustomNodeFunction' // Get all component nodes const getAllNodes = async () => { @@ -97,7 +99,10 @@ const getSingleNodeAsyncOptions = async (nodeName: string, requestBody: any): Pr const dbResponse: INodeOptionsValue[] = await nodeInstance.loadMethods![methodName]!.call(nodeInstance, nodeData, { appDataSource: appServer.AppDataSource, - databaseEntities: databaseEntities + databaseEntities: databaseEntities, + componentNodes: appServer.nodesPool.componentNodes, + previousNodes: requestBody.previousNodes, + currentNode: requestBody.currentNode }) return dbResponse @@ -117,47 +122,29 @@ const getSingleNodeAsyncOptions = async (nodeName: string, requestBody: any): Pr // execute custom function node const executeCustomFunction = async (requestBody: any) => { - try { - const appServer = getRunningExpressApp() - const body = requestBody - const functionInputVariables = Object.fromEntries( - [...(body?.javascriptFunction ?? '').matchAll(/\$([a-zA-Z0-9_]+)/g)].map((g) => [g[1], undefined]) - ) - if (functionInputVariables && Object.keys(functionInputVariables).length) { - for (const key in functionInputVariables) { - if (key.includes('vars')) { - delete functionInputVariables[key] - } - } + const appServer = getRunningExpressApp() + const executeData = { + appDataSource: appServer.AppDataSource, + componentNodes: appServer.nodesPool.componentNodes, + data: requestBody, + isExecuteCustomFunction: true + } + + if (process.env.MODE === MODE.QUEUE) { + const predictionQueue = appServer.queueManager.getQueue('prediction') + + const job = await predictionQueue.addJob(omit(executeData, OMIT_QUEUE_JOB_DATA)) + logger.debug(`[server]: Execute Custom Function Job added to queue: ${job.id}`) + + const queueEvents = predictionQueue.getQueueEvents() + const result = await job.waitUntilFinished(queueEvents) + if (!result) { + throw new Error('Failed to execute custom function') } - const nodeData = { inputs: { functionInputVariables, ...body } } - if (Object.prototype.hasOwnProperty.call(appServer.nodesPool.componentNodes, 'customFunction')) { - try { - const nodeInstanceFilePath = appServer.nodesPool.componentNodes['customFunction'].filePath as string - const nodeModule = await import(nodeInstanceFilePath) - const newNodeInstance = new nodeModule.nodeClass() - const options: ICommonObject = { - appDataSource: appServer.AppDataSource, - databaseEntities, - logger - } - - const returnData = await newNodeInstance.init(nodeData, '', options) - const dbResponse = typeof returnData === 'string' ? handleEscapeCharacters(returnData, true) : returnData - - return dbResponse - } catch (error) { - throw new InternalFlowiseError(StatusCodes.INTERNAL_SERVER_ERROR, `Error running custom function: ${error}`) - } - } else { - throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Node customFunction not found`) - } - } catch (error) { - throw new InternalFlowiseError( - StatusCodes.INTERNAL_SERVER_ERROR, - `Error: nodesService.executeCustomFunction - ${getErrorMessage(error)}` - ) + return result + } else { + return await executeCustomNodeFunction(executeData) } } diff --git a/packages/server/src/services/openai-assistants-vector-store/index.ts b/packages/server/src/services/openai-assistants-vector-store/index.ts index 671e18d90..c7b082620 100644 --- a/packages/server/src/services/openai-assistants-vector-store/index.ts +++ b/packages/server/src/services/openai-assistants-vector-store/index.ts @@ -24,7 +24,7 @@ const getAssistantVectorStore = async (credentialId: string, vectorStoreId: stri } const openai = new OpenAI({ apiKey: openAIApiKey }) - const dbResponse = await openai.beta.vectorStores.retrieve(vectorStoreId) + const dbResponse = await openai.vectorStores.retrieve(vectorStoreId) return dbResponse } catch (error) { throw new InternalFlowiseError( @@ -51,7 +51,7 @@ const listAssistantVectorStore = async (credentialId: string) => { } const openai = new OpenAI({ apiKey: openAIApiKey }) - const dbResponse = await openai.beta.vectorStores.list() + const dbResponse = await openai.vectorStores.list() return dbResponse.data } catch (error) { throw new InternalFlowiseError( @@ -61,7 +61,7 @@ const listAssistantVectorStore = async (credentialId: string) => { } } -const createAssistantVectorStore = async (credentialId: string, obj: OpenAI.Beta.VectorStores.VectorStoreCreateParams) => { +const createAssistantVectorStore = async (credentialId: string, obj: OpenAI.VectorStores.VectorStoreCreateParams) => { try { const appServer = getRunningExpressApp() const credential = await appServer.AppDataSource.getRepository(Credential).findOneBy({ @@ -78,7 +78,7 @@ const createAssistantVectorStore = async (credentialId: string, obj: OpenAI.Beta } const openai = new OpenAI({ apiKey: openAIApiKey }) - const dbResponse = await openai.beta.vectorStores.create(obj) + const dbResponse = await openai.vectorStores.create(obj) return dbResponse } catch (error) { throw new InternalFlowiseError( @@ -91,7 +91,7 @@ const createAssistantVectorStore = async (credentialId: string, obj: OpenAI.Beta const updateAssistantVectorStore = async ( credentialId: string, vectorStoreId: string, - obj: OpenAI.Beta.VectorStores.VectorStoreUpdateParams + obj: OpenAI.VectorStores.VectorStoreUpdateParams ) => { try { const appServer = getRunningExpressApp() @@ -109,8 +109,8 @@ const updateAssistantVectorStore = async ( } const openai = new OpenAI({ apiKey: openAIApiKey }) - const dbResponse = await openai.beta.vectorStores.update(vectorStoreId, obj) - const vectorStoreFiles = await openai.beta.vectorStores.files.list(vectorStoreId) + const dbResponse = await openai.vectorStores.update(vectorStoreId, obj) + const vectorStoreFiles = await openai.vectorStores.files.list(vectorStoreId) if (vectorStoreFiles.data?.length) { const files = [] for (const file of vectorStoreFiles.data) { @@ -145,7 +145,7 @@ const deleteAssistantVectorStore = async (credentialId: string, vectorStoreId: s } const openai = new OpenAI({ apiKey: openAIApiKey }) - const dbResponse = await openai.beta.vectorStores.del(vectorStoreId) + const dbResponse = await openai.vectorStores.del(vectorStoreId) return dbResponse } catch (error) { throw new InternalFlowiseError( @@ -190,7 +190,7 @@ const uploadFilesToAssistantVectorStore = async ( const file_ids = [...uploadedFiles.map((file) => file.id)] - const res = await openai.beta.vectorStores.fileBatches.createAndPoll(vectorStoreId, { + const res = await openai.vectorStores.fileBatches.createAndPoll(vectorStoreId, { file_ids }) if (res.status === 'completed' && res.file_counts.completed === uploadedFiles.length) return uploadedFiles @@ -232,7 +232,7 @@ const deleteFilesFromAssistantVectorStore = async (credentialId: string, vectorS const deletedFileIds = [] let count = 0 for (const file of file_ids) { - const res = await openai.beta.vectorStores.files.del(vectorStoreId, file) + const res = await openai.vectorStores.files.del(vectorStoreId, file) if (res.deleted) { deletedFileIds.push(file) count += 1 diff --git a/packages/server/src/services/openai-assistants/index.ts b/packages/server/src/services/openai-assistants/index.ts index e842b0456..e9ccf43d4 100644 --- a/packages/server/src/services/openai-assistants/index.ts +++ b/packages/server/src/services/openai-assistants/index.ts @@ -68,10 +68,10 @@ const getSingleOpenaiAssistant = async (credentialId: string, assistantId: strin if (dbResponse.tool_resources?.file_search?.vector_store_ids?.length) { // Since there can only be 1 vector store per assistant const vectorStoreId = dbResponse.tool_resources.file_search.vector_store_ids[0] - const vectorStoreFiles = await openai.beta.vectorStores.files.list(vectorStoreId) + const vectorStoreFiles = await openai.vectorStores.files.list(vectorStoreId) const fileIds = vectorStoreFiles.data?.map((file) => file.id) ?? [] ;(dbResponse.tool_resources.file_search as any).files = [...existingFiles.filter((file) => fileIds.includes(file.id))] - ;(dbResponse.tool_resources.file_search as any).vector_store_object = await openai.beta.vectorStores.retrieve(vectorStoreId) + ;(dbResponse.tool_resources.file_search as any).vector_store_object = await openai.vectorStores.retrieve(vectorStoreId) } return dbResponse } catch (error) { diff --git a/packages/server/src/services/tools/index.ts b/packages/server/src/services/tools/index.ts index 9ba60d8b3..0dbf69b7f 100644 --- a/packages/server/src/services/tools/index.ts +++ b/packages/server/src/services/tools/index.ts @@ -6,6 +6,7 @@ import { getAppVersion } from '../../utils' import { getRunningExpressApp } from '../../utils/getRunningExpressApp' import { FLOWISE_METRIC_COUNTERS, FLOWISE_COUNTER_STATUS } from '../../Interface.Metrics' import { QueryRunner } from 'typeorm' +import { validate } from 'uuid' const createTool = async (requestBody: any): Promise => { try { @@ -84,6 +85,12 @@ const updateTool = async (toolId: string, toolBody: any): Promise => { const importTools = async (newTools: Partial[], queryRunner?: QueryRunner) => { try { + for (const data of newTools) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importTools - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(Tool) : appServer.AppDataSource.getRepository(Tool) diff --git a/packages/server/src/services/validation/index.ts b/packages/server/src/services/validation/index.ts new file mode 100644 index 000000000..5ac4ea528 --- /dev/null +++ b/packages/server/src/services/validation/index.ts @@ -0,0 +1,326 @@ +import { StatusCodes } from 'http-status-codes' +import { InternalFlowiseError } from '../../errors/internalFlowiseError' +import { getErrorMessage } from '../../errors/utils' +import { getRunningExpressApp } from '../../utils/getRunningExpressApp' +import { ChatFlow } from '../../database/entities/ChatFlow' +import { INodeParams } from 'flowise-components' +import { IReactFlowEdge, IReactFlowNode } from '../../Interface' + +interface IValidationResult { + id: string + label: string + name: string + issues: string[] +} + +const checkFlowValidation = async (flowId: string): Promise => { + try { + const appServer = getRunningExpressApp() + + const componentNodes = appServer.nodesPool.componentNodes + + const flow = await appServer.AppDataSource.getRepository(ChatFlow).findOne({ + where: { + id: flowId + } + }) + + if (!flow) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Error: validationService.checkFlowValidation - flow not found!`) + } + + const flowData = JSON.parse(flow.flowData) + const nodes = flowData.nodes + const edges = flowData.edges + + // Store validation results + const validationResults = [] + + // Create a map of connected nodes + const connectedNodes = new Set() + edges.forEach((edge: IReactFlowEdge) => { + connectedNodes.add(edge.source) + connectedNodes.add(edge.target) + }) + + // Validate each node + for (const node of nodes) { + if (node.data.name === 'stickyNoteAgentflow') continue + + const nodeIssues = [] + + // Check if node is connected + if (!connectedNodes.has(node.id)) { + nodeIssues.push('This node is not connected to anything') + } + + // Validate input parameters + if (node.data && node.data.inputParams && node.data.inputs) { + for (const param of node.data.inputParams) { + // Skip validation if the parameter has show condition that doesn't match + if (param.show) { + let shouldShow = true + for (const [key, value] of Object.entries(param.show)) { + if (node.data.inputs[key] !== value) { + shouldShow = false + break + } + } + if (!shouldShow) continue + } + + // Skip validation if the parameter has hide condition that matches + if (param.hide) { + let shouldHide = true + for (const [key, value] of Object.entries(param.hide)) { + if (node.data.inputs[key] !== value) { + shouldHide = false + break + } + } + if (shouldHide) continue + } + + // Check if required parameter has a value + if (!param.optional) { + const inputValue = node.data.inputs[param.name] + if (inputValue === undefined || inputValue === null || inputValue === '') { + nodeIssues.push(`${param.label} is required`) + } + } + + // Check array type parameters (even if the array itself is optional) + if (param.type === 'array' && Array.isArray(node.data.inputs[param.name])) { + const inputValue = node.data.inputs[param.name] + + // Only validate non-empty arrays (if array is required but empty, it's caught above) + if (inputValue.length > 0) { + // Check each item in the array + inputValue.forEach((item: Record, index: number) => { + if (param.array) { + param.array.forEach((arrayParam: INodeParams) => { + // Evaluate if this parameter should be shown based on current values + // First check show conditions + let shouldValidate = true + + if (arrayParam.show) { + // Default to not showing unless conditions match + shouldValidate = false + + // Each key in show is a condition that must be satisfied + for (const [conditionKey, expectedValue] of Object.entries(arrayParam.show)) { + const isIndexCondition = conditionKey.includes('$index') + let actualValue + + if (isIndexCondition) { + // Replace $index with actual index and evaluate + const normalizedKey = conditionKey.replace(/conditions\[\$index\]\.(\w+)/, '$1') + actualValue = item[normalizedKey] + } else { + // Direct property in the current item + actualValue = item[conditionKey] + } + + // Check if condition is satisfied + let conditionMet = false + if (Array.isArray(expectedValue)) { + conditionMet = expectedValue.includes(actualValue) + } else { + conditionMet = actualValue === expectedValue + } + + if (conditionMet) { + shouldValidate = true + break // One matching condition is enough + } + } + } + + // Then check hide conditions (they override show conditions) + if (shouldValidate && arrayParam.hide) { + for (const [conditionKey, expectedValue] of Object.entries(arrayParam.hide)) { + const isIndexCondition = conditionKey.includes('$index') + let actualValue + + if (isIndexCondition) { + // Replace $index with actual index and evaluate + const normalizedKey = conditionKey.replace(/conditions\[\$index\]\.(\w+)/, '$1') + actualValue = item[normalizedKey] + } else { + // Direct property in the current item + actualValue = item[conditionKey] + } + + // Check if hide condition is met + let shouldHide = false + if (Array.isArray(expectedValue)) { + shouldHide = expectedValue.includes(actualValue) + } else { + shouldHide = actualValue === expectedValue + } + + if (shouldHide) { + shouldValidate = false + break // One matching hide condition is enough to hide + } + } + } + + // Only validate if field should be shown + if (shouldValidate) { + // Check if value is required and missing + if ( + (arrayParam.optional === undefined || !arrayParam.optional) && + (item[arrayParam.name] === undefined || + item[arrayParam.name] === null || + item[arrayParam.name] === '' || + item[arrayParam.name] === '

') + ) { + nodeIssues.push(`${param.label} item #${index + 1}: ${arrayParam.label} is required`) + } + } + }) + } + }) + } + } + + // Check for credential requirements + if (param.name === 'credential' && !param.optional) { + const credentialValue = node.data.inputs[param.name] + if (!credentialValue) { + nodeIssues.push(`Credential is required`) + } + } + + // Check for nested config parameters + const configKey = `${param.name}Config` + if (node.data.inputs[configKey] && node.data.inputs[param.name]) { + const componentName = node.data.inputs[param.name] + const configValue = node.data.inputs[configKey] + + // Check if the component exists in the componentNodes pool + if (componentNodes[componentName] && componentNodes[componentName].inputs) { + const componentInputParams = componentNodes[componentName].inputs + + // Validate each required input parameter in the component + for (const componentParam of componentInputParams) { + // Skip validation if the parameter has show condition that doesn't match + if (componentParam.show) { + let shouldShow = true + for (const [key, value] of Object.entries(componentParam.show)) { + if (configValue[key] !== value) { + shouldShow = false + break + } + } + if (!shouldShow) continue + } + + // Skip validation if the parameter has hide condition that matches + if (componentParam.hide) { + let shouldHide = true + for (const [key, value] of Object.entries(componentParam.hide)) { + if (configValue[key] !== value) { + shouldHide = false + break + } + } + if (shouldHide) continue + } + + if (!componentParam.optional) { + const nestedValue = configValue[componentParam.name] + if (nestedValue === undefined || nestedValue === null || nestedValue === '') { + nodeIssues.push(`${param.label} configuration: ${componentParam.label} is required`) + } + } + } + + // Check for credential requirement in the component + if (componentNodes[componentName].credential && !componentNodes[componentName].credential.optional) { + if (!configValue.FLOWISE_CREDENTIAL_ID && !configValue.credential) { + nodeIssues.push(`${param.label} requires a credential`) + } + } + } + } + } + } + + // Add node to validation results if it has issues + if (nodeIssues.length > 0) { + validationResults.push({ + id: node.id, + label: node.data.label, + name: node.data.name, + issues: nodeIssues + }) + } + } + + // Check for hanging edges + for (const edge of edges) { + const sourceExists = nodes.some((node: IReactFlowNode) => node.id === edge.source) + const targetExists = nodes.some((node: IReactFlowEdge) => node.id === edge.target) + + if (!sourceExists || !targetExists) { + // Find the existing node that is connected to this hanging edge + if (!sourceExists && targetExists) { + // Target exists but source doesn't - add issue to target node + const targetNode = nodes.find((node: IReactFlowNode) => node.id === edge.target) + const targetNodeResult = validationResults.find((result) => result.id === edge.target) + + if (targetNodeResult) { + // Add to existing validation result + targetNodeResult.issues.push(`Connected to non-existent source node ${edge.source}`) + } else { + // Create new validation result for this node + validationResults.push({ + id: targetNode.id, + label: targetNode.data.label, + name: targetNode.data.name, + issues: [`Connected to non-existent source node ${edge.source}`] + }) + } + } else if (sourceExists && !targetExists) { + // Source exists but target doesn't - add issue to source node + const sourceNode = nodes.find((node: IReactFlowNode) => node.id === edge.source) + const sourceNodeResult = validationResults.find((result) => result.id === edge.source) + + if (sourceNodeResult) { + // Add to existing validation result + sourceNodeResult.issues.push(`Connected to non-existent target node ${edge.target}`) + } else { + // Create new validation result for this node + validationResults.push({ + id: sourceNode.id, + label: sourceNode.data.label, + name: sourceNode.data.name, + issues: [`Connected to non-existent target node ${edge.target}`] + }) + } + } else { + // Both source and target don't exist - create a generic edge issue + validationResults.push({ + id: edge.id, + label: `Edge ${edge.id}`, + name: 'edge', + issues: ['Disconnected edge - both source and target nodes do not exist'] + }) + } + } + } + + return validationResults + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: validationService.checkFlowValidation - ${getErrorMessage(error)}` + ) + } +} + +export default { + checkFlowValidation +} diff --git a/packages/server/src/services/variables/index.ts b/packages/server/src/services/variables/index.ts index a01d5b3dc..d06e8c6c7 100644 --- a/packages/server/src/services/variables/index.ts +++ b/packages/server/src/services/variables/index.ts @@ -4,6 +4,7 @@ import { Variable } from '../../database/entities/Variable' import { InternalFlowiseError } from '../../errors/internalFlowiseError' import { getErrorMessage } from '../../errors/utils' import { QueryRunner } from 'typeorm' +import { validate } from 'uuid' const createVariable = async (newVariable: Variable) => { try { @@ -76,6 +77,12 @@ const updateVariable = async (variable: Variable, updatedVariable: Variable) => const importVariables = async (newVariables: Partial[], queryRunner?: QueryRunner): Promise => { try { + for (const data of newVariables) { + if (data.id && !validate(data.id)) { + throw new InternalFlowiseError(StatusCodes.PRECONDITION_FAILED, `Error: importVariables - invalid id!`) + } + } + const appServer = getRunningExpressApp() const repository = queryRunner ? queryRunner.manager.getRepository(Variable) : appServer.AppDataSource.getRepository(Variable) diff --git a/packages/server/src/utils/SSEStreamer.ts b/packages/server/src/utils/SSEStreamer.ts index a5327bad1..2b950579c 100644 --- a/packages/server/src/utils/SSEStreamer.ts +++ b/packages/server/src/utils/SSEStreamer.ts @@ -99,6 +99,16 @@ export class SSEStreamer implements IServerSideEventStreamer { client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') } } + streamCalledToolsEvent(chatId: string, data: any): void { + const client = this.clients[chatId] + if (client) { + const clientResponse = { + event: 'calledTools', + data: data + } + client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') + } + } streamFileAnnotationsEvent(chatId: string, data: any): void { const client = this.clients[chatId] if (client) { @@ -139,6 +149,36 @@ export class SSEStreamer implements IServerSideEventStreamer { client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') } } + streamAgentFlowEvent(chatId: string, data: any): void { + const client = this.clients[chatId] + if (client) { + const clientResponse = { + event: 'agentFlowEvent', + data: data + } + client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') + } + } + streamAgentFlowExecutedDataEvent(chatId: string, data: any): void { + const client = this.clients[chatId] + if (client) { + const clientResponse = { + event: 'agentFlowExecutedData', + data: data + } + client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') + } + } + streamNextAgentFlowEvent(chatId: string, data: any): void { + const client = this.clients[chatId] + if (client) { + const clientResponse = { + event: 'nextAgentFlow', + data: data + } + client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') + } + } streamActionEvent(chatId: string, data: any): void { const client = this.clients[chatId] if (client) { @@ -166,6 +206,7 @@ export class SSEStreamer implements IServerSideEventStreamer { } streamErrorEvent(chatId: string, msg: string) { + if (msg.includes('401 Incorrect API key provided')) msg = '401 Invalid model key or Incorrect local model configuration.' const client = this.clients[chatId] if (client) { const clientResponse = { @@ -205,4 +246,15 @@ export class SSEStreamer implements IServerSideEventStreamer { this.streamCustomEvent(chatId, 'metadata', metadataJson) } } + + streamUsageMetadataEvent(chatId: string, data: any): void { + const client = this.clients[chatId] + if (client) { + const clientResponse = { + event: 'usageMetadata', + data: data + } + client.response.write('message:\ndata:' + JSON.stringify(clientResponse) + '\n\n') + } + } } diff --git a/packages/server/src/utils/buildAgentflow.ts b/packages/server/src/utils/buildAgentflow.ts new file mode 100644 index 000000000..28c117057 --- /dev/null +++ b/packages/server/src/utils/buildAgentflow.ts @@ -0,0 +1,1785 @@ +import { DataSource } from 'typeorm' +import { v4 as uuidv4 } from 'uuid' +import { cloneDeep, get } from 'lodash' +import TurndownService from 'turndown' +import { + AnalyticHandler, + ICommonObject, + ICondition, + IFileUpload, + IHumanInput, + IMessage, + IServerSideEventStreamer, + convertChatHistoryToText, + generateFollowUpPrompts +} from 'flowise-components' +import { + IncomingAgentflowInput, + INodeData, + IReactFlowObject, + IExecuteFlowParams, + IFlowConfig, + IAgentflowExecutedData, + ExecutionState, + IExecution, + IChatMessage, + ChatType, + IReactFlowNode, + IReactFlowEdge, + IComponentNodes, + INodeOverrides, + IVariableOverride, + INodeDirectedGraph +} from '../Interface' +import { + RUNTIME_MESSAGES_LENGTH_VAR_PREFIX, + CHAT_HISTORY_VAR_PREFIX, + databaseEntities, + FILE_ATTACHMENT_PREFIX, + getAppVersion, + getGlobalVariable, + getStartingNode, + getTelemetryFlowObj, + QUESTION_VAR_PREFIX +} from '.' +import { ChatFlow } from '../database/entities/ChatFlow' +import { Variable } from '../database/entities/Variable' +import { replaceInputsWithConfig, constructGraphs, getAPIOverrideConfig } from '../utils' +import logger from './logger' +import { getErrorMessage } from '../errors/utils' +import { Execution } from '../database/entities/Execution' +import { utilAddChatMessage } from './addChatMesage' +import { CachePool } from '../CachePool' +import { ChatMessage } from '../database/entities/ChatMessage' +import { Telemetry } from './telemetry' + +interface IWaitingNode { + nodeId: string + receivedInputs: Map + expectedInputs: Set + isConditional: boolean + conditionalGroups: Map +} + +interface INodeQueue { + nodeId: string + data: any + inputs: Record +} + +interface IProcessNodeOutputsParams { + nodeId: string + nodeName: string + result: any + humanInput?: IHumanInput + graph: Record + nodes: IReactFlowNode[] + edges: IReactFlowEdge[] + nodeExecutionQueue: INodeQueue[] + waitingNodes: Map + loopCounts: Map + abortController?: AbortController +} + +interface IAgentFlowRuntime { + state?: ICommonObject + chatHistory?: IMessage[] + form?: Record +} + +interface IExecuteNodeParams { + nodeId: string + reactFlowNode: IReactFlowNode + nodes: IReactFlowNode[] + edges: IReactFlowEdge[] + graph: INodeDirectedGraph + reversedGraph: INodeDirectedGraph + incomingInput: IncomingAgentflowInput + chatflow: ChatFlow + chatId: string + sessionId: string + apiMessageId: string + isInternal: boolean + pastChatHistory: IMessage[] + appDataSource: DataSource + telemetry: Telemetry + componentNodes: IComponentNodes + cachePool: CachePool + sseStreamer: IServerSideEventStreamer + baseURL: string + overrideConfig?: ICommonObject + apiOverrideStatus?: boolean + nodeOverrides?: INodeOverrides + variableOverrides?: IVariableOverride[] + uploadedFilesContent?: string + fileUploads?: IFileUpload[] + humanInput?: IHumanInput + agentFlowExecutedData?: IAgentflowExecutedData[] + agentflowRuntime: IAgentFlowRuntime + abortController?: AbortController + parentTraceIds?: ICommonObject + analyticHandlers?: AnalyticHandler + parentExecutionId?: string + isRecursive?: boolean + iterationContext?: ICommonObject +} + +interface IExecuteAgentFlowParams extends Omit { + incomingInput: IncomingAgentflowInput +} + +const MAX_LOOP_COUNT = process.env.MAX_LOOP_COUNT ? parseInt(process.env.MAX_LOOP_COUNT) : 10 + +/** + * Add execution to database + * @param {DataSource} appDataSource + * @param {string} agentflowId + * @param {IAgentflowExecutedData[]} agentFlowExecutedData + * @param {string} sessionId + * @returns {Promise} + */ +const addExecution = async ( + appDataSource: DataSource, + agentflowId: string, + agentFlowExecutedData: IAgentflowExecutedData[], + sessionId: string +) => { + const newExecution = new Execution() + const bodyExecution = { + agentflowId, + state: 'INPROGRESS', + sessionId, + executionData: JSON.stringify(agentFlowExecutedData) + } + Object.assign(newExecution, bodyExecution) + + const execution = appDataSource.getRepository(Execution).create(newExecution) + return await appDataSource.getRepository(Execution).save(execution) +} + +/** + * Update execution in database + * @param {DataSource} appDataSource + * @param {string} executionId + * @param {Partial} data + * @returns {Promise} + */ +const updateExecution = async (appDataSource: DataSource, executionId: string, data?: Partial) => { + const execution = await appDataSource.getRepository(Execution).findOneBy({ + id: executionId + }) + + if (!execution) { + throw new Error(`Execution ${executionId} not found`) + } + + const updateExecution = new Execution() + const bodyExecution: ICommonObject = {} + if (data && data.executionData) { + bodyExecution.executionData = typeof data.executionData === 'string' ? data.executionData : JSON.stringify(data.executionData) + } + if (data && data.state) { + bodyExecution.state = data.state + + if (data.state === 'STOPPED') { + bodyExecution.stoppedDate = new Date() + } + } + + Object.assign(updateExecution, bodyExecution) + + appDataSource.getRepository(Execution).merge(execution, updateExecution) + await appDataSource.getRepository(Execution).save(execution) +} + +export const _removeCredentialId = (obj: any): any => { + if (!obj || typeof obj !== 'object') return obj + + if (Array.isArray(obj)) { + return obj.map((item) => _removeCredentialId(item)) + } + + const newObj: Record = {} + for (const [key, value] of Object.entries(obj)) { + if (key === 'FLOWISE_CREDENTIAL_ID') continue + newObj[key] = _removeCredentialId(value) + } + return newObj +} + +export const resolveVariables = async ( + reactFlowNodeData: INodeData, + question: string, + form: Record, + flowConfig: IFlowConfig | undefined, + availableVariables: Variable[], + variableOverrides: IVariableOverride[], + uploadedFilesContent: string, + chatHistory: IMessage[], + agentFlowExecutedData?: IAgentflowExecutedData[], + iterationContext?: ICommonObject +): Promise => { + let flowNodeData = cloneDeep(reactFlowNodeData) + const types = 'inputs' + + const resolveNodeReference = async (value: any): Promise => { + // If value is an array, process each element + if (Array.isArray(value)) { + return Promise.all(value.map((item) => resolveNodeReference(item))) + } + + // If value is an object, process each property + if (typeof value === 'object' && value !== null) { + const resolvedObj: any = {} + for (const [key, val] of Object.entries(value)) { + resolvedObj[key] = await resolveNodeReference(val) + } + return resolvedObj + } + + // If value is not a string, return as is + if (typeof value !== 'string') return value + + const turndownService = new TurndownService() + value = turndownService.turndown(value) + // After conversion, replace any escaped underscores with regular underscores + value = value.replace(/\\_/g, '_') + + const matches = value.match(/{{(.*?)}}/g) + + if (!matches) return value + + let resolvedValue = value + for (const match of matches) { + // Remove {{ }} and trim whitespace + const reference = match.replace(/[{}]/g, '').trim() + const variableFullPath = reference + + if (variableFullPath === QUESTION_VAR_PREFIX) { + resolvedValue = resolvedValue.replace(match, question) + resolvedValue = uploadedFilesContent ? `${uploadedFilesContent}\n\n${resolvedValue}` : resolvedValue + } + + if (variableFullPath.startsWith('$form.')) { + const variableValue = get(form, variableFullPath.replace('$form.', '')) + if (variableValue != null) { + // For arrays and objects, stringify them to prevent toString() conversion issues + const formattedValue = + Array.isArray(variableValue) || (typeof variableValue === 'object' && variableValue !== null) + ? JSON.stringify(variableValue) + : variableValue + resolvedValue = resolvedValue.replace(match, formattedValue) + } + } + + if (variableFullPath === FILE_ATTACHMENT_PREFIX) { + resolvedValue = resolvedValue.replace(match, uploadedFilesContent) + } + + if (variableFullPath === CHAT_HISTORY_VAR_PREFIX) { + resolvedValue = resolvedValue.replace(match, convertChatHistoryToText(chatHistory)) + } + + if (variableFullPath === RUNTIME_MESSAGES_LENGTH_VAR_PREFIX) { + resolvedValue = resolvedValue.replace(match, flowConfig?.runtimeChatHistoryLength ?? 0) + } + + if (variableFullPath.startsWith('$iteration')) { + if (iterationContext && iterationContext.value) { + if (typeof iterationContext.value === 'string') { + resolvedValue = resolvedValue.replace(match, iterationContext?.value) + } else if (typeof iterationContext.value === 'object') { + const iterationValue = get(iterationContext.value, variableFullPath.replace('$iteration.', '')) + // For arrays and objects, stringify them to prevent toString() conversion issues + const formattedValue = + Array.isArray(iterationValue) || (typeof iterationValue === 'object' && iterationValue !== null) + ? JSON.stringify(iterationValue) + : iterationValue + resolvedValue = resolvedValue.replace(match, formattedValue) + } + } + } + + if (variableFullPath.startsWith('$vars.')) { + const vars = await getGlobalVariable(flowConfig, availableVariables, variableOverrides) + const variableValue = get(vars, variableFullPath.replace('$vars.', '')) + if (variableValue != null) { + // For arrays and objects, stringify them to prevent toString() conversion issues + const formattedValue = + Array.isArray(variableValue) || (typeof variableValue === 'object' && variableValue !== null) + ? JSON.stringify(variableValue) + : variableValue + resolvedValue = resolvedValue.replace(match, formattedValue) + } + } + + if (variableFullPath.startsWith('$flow.') && flowConfig) { + const variableValue = get(flowConfig, variableFullPath.replace('$flow.', '')) + if (variableValue != null) { + // For arrays and objects, stringify them to prevent toString() conversion issues + const formattedValue = + Array.isArray(variableValue) || (typeof variableValue === 'object' && variableValue !== null) + ? JSON.stringify(variableValue) + : variableValue + resolvedValue = resolvedValue.replace(match, formattedValue) + } + } + + // Find node data in executed data + // sometimes turndown value returns a backslash like `llmAgentflow\_1`, remove the backslash + const cleanNodeId = variableFullPath.replace('\\', '') + // Find the last (most recent) matching node data instead of the first one + const nodeData = agentFlowExecutedData + ? [...agentFlowExecutedData].reverse().find((data) => data.nodeId === cleanNodeId) + : undefined + if (nodeData && nodeData.data) { + // Replace the reference with actual value + const actualValue = (nodeData.data['output'] as ICommonObject)?.content + // For arrays and objects, stringify them to prevent toString() conversion issues + const formattedValue = + Array.isArray(actualValue) || (typeof actualValue === 'object' && actualValue !== null) + ? JSON.stringify(actualValue) + : actualValue?.toString() ?? match + resolvedValue = resolvedValue.replace(match, formattedValue) + } + } + + return resolvedValue + } + + const getParamValues = async (paramsObj: ICommonObject) => { + for (const key in paramsObj) { + const paramValue = paramsObj[key] + const isAcceptVariable = reactFlowNodeData.inputParams.find((param) => param.name === key)?.acceptVariable ?? false + if (isAcceptVariable) { + paramsObj[key] = await resolveNodeReference(paramValue) + } + } + } + + const paramsObj = flowNodeData[types] ?? {} + await getParamValues(paramsObj) + + return flowNodeData +} + +/* + * Gets all input connections for a specific node + * @param {IEdge[]} edges - Array of all edges (connections) in the workflow + * @param {string} nodeId - ID of the node to get input connections for + * @returns {IEdge[]} Array of input connections for the specified node + * + * @example + * // For llmAgentflow_2 which has two inputs from llmAgentflow_0 and llmAgentflow_1 + * const connections = getNodeInputConnections(nodes, edges, 'llmAgentflow_2'); + * // Returns array of two edge objects connecting to llmAgentflow_2 + */ +function getNodeInputConnections(edges: IReactFlowEdge[], nodeId: string): IReactFlowEdge[] { + // Filter edges where target matches the nodeId + const inputConnections = edges.filter((edge) => edge.target === nodeId) + + // Sort connections by sourceHandle to maintain consistent order + // This is important for nodes that have multiple inputs that need to be processed in order + inputConnections.sort((a, b) => { + // Extract index from sourceHandle (e.g., "output-0" vs "output-1") + const indexA = parseInt(a.sourceHandle.split('-').find((part) => !isNaN(parseInt(part))) || '0') + const indexB = parseInt(b.sourceHandle.split('-').find((part) => !isNaN(parseInt(part))) || '0') + return indexA - indexB + }) + + return inputConnections +} + +/** + * Analyzes node dependencies and sets up expected inputs + */ +function setupNodeDependencies(nodeId: string, edges: IReactFlowEdge[], nodes: IReactFlowNode[]): IWaitingNode { + logger.debug(`\n🔍 Analyzing dependencies for node: ${nodeId}`) + const inputConnections = getNodeInputConnections(edges, nodeId) + const waitingNode: IWaitingNode = { + nodeId, + receivedInputs: new Map(), + expectedInputs: new Set(), + isConditional: false, + conditionalGroups: new Map() + } + + // Group inputs by their parent condition nodes + const inputsByCondition = new Map() + + for (const connection of inputConnections) { + const sourceNode = nodes.find((n) => n.id === connection.source) + if (!sourceNode) continue + + // Find if this input comes from a conditional branch + const conditionParent = findConditionParent(connection.source, edges, nodes) + + if (conditionParent) { + logger.debug(` 📌 Found conditional input from ${connection.source} (condition: ${conditionParent})`) + waitingNode.isConditional = true + const group = inputsByCondition.get(conditionParent) || [] + group.push(connection.source) + inputsByCondition.set(conditionParent, group) + } else { + logger.debug(` 📌 Found required input from ${connection.source}`) + waitingNode.expectedInputs.add(connection.source) + } + } + + // Set up conditional groups + inputsByCondition.forEach((sources, conditionId) => { + if (conditionId) { + logger.debug(` 📋 Conditional group ${conditionId}: [${sources.join(', ')}]`) + waitingNode.conditionalGroups.set(conditionId, sources) + } + }) + + return waitingNode +} + +/** + * Finds the parent condition node for a given node, if any + */ +function findConditionParent(nodeId: string, edges: IReactFlowEdge[], nodes: IReactFlowNode[]): string | null { + const currentNode = nodes.find((n) => n.id === nodeId) + if (!currentNode) return null + if ( + currentNode.data.name === 'conditionAgentflow' || + currentNode.data.name === 'conditionAgentAgentflow' || + currentNode.data.name === 'humanInputAgentflow' + ) { + return currentNode.id + } + + let currentId = nodeId + const visited = new Set() + + let shouldContinue = true + while (shouldContinue) { + if (visited.has(currentId)) { + shouldContinue = false + continue + } + visited.add(currentId) + + const parentEdge = edges.find((edge) => edge.target === currentId) + if (!parentEdge) { + shouldContinue = false + continue + } + + const parentNode = nodes.find((n) => n.id === parentEdge.source) + if (!parentNode) { + shouldContinue = false + continue + } + + if ( + parentNode.data.name === 'conditionAgentflow' || + parentNode.data.name === 'conditionAgentAgentflow' || + parentNode.data.name === 'humanInputAgentflow' + ) { + return parentNode.id + } + + currentId = parentNode.id + } + + return null +} + +/** + * Checks if a node has received all required inputs + */ +function hasReceivedRequiredInputs(waitingNode: IWaitingNode): boolean { + logger.debug(`\n✨ Checking inputs for node: ${waitingNode.nodeId}`) + + // Check non-conditional required inputs + for (const required of waitingNode.expectedInputs) { + const hasInput = waitingNode.receivedInputs.has(required) + logger.debug(` 📊 Required input ${required}: ${hasInput ? '✅' : '❌'}`) + if (!hasInput) return false + } + + // Check conditional groups + for (const [groupId, possibleSources] of waitingNode.conditionalGroups) { + // Need at least one input from each conditional group + const hasInputFromGroup = possibleSources.some((source) => waitingNode.receivedInputs.has(source)) + logger.debug(` 📊 Conditional group ${groupId}: ${hasInputFromGroup ? '✅' : '❌'}`) + if (!hasInputFromGroup) return false + } + + return true +} + +/** + * Determines which nodes should be ignored based on condition results + * @param currentNode - The node being processed + * @param result - The execution result from the node + * @param edges - All edges in the workflow + * @param nodeId - Current node ID + * @returns Array of node IDs that should be ignored + */ +async function determineNodesToIgnore( + currentNode: IReactFlowNode, + result: any, + humanInput: IHumanInput | undefined, + edges: IReactFlowEdge[], + nodeId: string +): Promise { + const ignoreNodeIds: string[] = [] + + // Check if this is a decision node + const isDecisionNode = + currentNode.data.name === 'conditionAgentflow' || + currentNode.data.name === 'conditionAgentAgentflow' || + (currentNode.data.name === 'humanInputAgentflow' && humanInput) + + if (isDecisionNode && result.output?.conditions) { + const outputConditions: ICondition[] = result.output.conditions + + // Find indexes of unfulfilled conditions + const unfulfilledIndexes = outputConditions + .map((condition: any, index: number) => + condition.isFulfilled === false || !Object.prototype.hasOwnProperty.call(condition, 'isFulfilled') ? index : -1 + ) + .filter((index: number) => index !== -1) + + // Find nodes to ignore based on unfulfilled conditions + for (const index of unfulfilledIndexes) { + const ignoreEdge = edges.find((edge) => edge.source === nodeId && edge.sourceHandle === `${nodeId}-output-${index}`) + + if (ignoreEdge) { + ignoreNodeIds.push(ignoreEdge.target) + } + } + } + + return ignoreNodeIds +} + +/** + * Process node outputs and handle branching logic + */ +async function processNodeOutputs({ + nodeId, + nodeName, + result, + humanInput, + graph, + nodes, + edges, + nodeExecutionQueue, + waitingNodes, + loopCounts +}: IProcessNodeOutputsParams): Promise<{ humanInput?: IHumanInput }> { + logger.debug(`\n🔄 Processing outputs from node: ${nodeId}`) + + let updatedHumanInput = humanInput + + const childNodeIds = graph[nodeId] || [] + logger.debug(` 👉 Child nodes: [${childNodeIds.join(', ')}]`) + + const currentNode = nodes.find((n) => n.id === nodeId) + if (!currentNode) return { humanInput: updatedHumanInput } + + // Get nodes to ignore based on conditions + const ignoreNodeIds = await determineNodesToIgnore(currentNode, result, humanInput, edges, nodeId) + if (ignoreNodeIds.length) { + logger.debug(` ⏭️ Skipping nodes: [${ignoreNodeIds.join(', ')}]`) + } + + for (const childId of childNodeIds) { + if (ignoreNodeIds.includes(childId)) continue + + const childNode = nodes.find((n) => n.id === childId) + if (!childNode) continue + + logger.debug(` 📝 Processing child node: ${childId}`) + + let waitingNode = waitingNodes.get(childId) + + if (!waitingNode) { + logger.debug(` 🆕 First time seeing node ${childId} - analyzing dependencies`) + waitingNode = setupNodeDependencies(childId, edges, nodes) + waitingNodes.set(childId, waitingNode) + } + + waitingNode.receivedInputs.set(nodeId, result) + logger.debug(` ➕ Added input from ${nodeId}`) + + // Check if node is ready to execute + if (hasReceivedRequiredInputs(waitingNode)) { + logger.debug(` ✅ Node ${childId} ready for execution!`) + waitingNodes.delete(childId) + nodeExecutionQueue.push({ + nodeId: childId, + data: combineNodeInputs(waitingNode.receivedInputs), + inputs: Object.fromEntries(waitingNode.receivedInputs) + }) + } else { + logger.debug(` ⏳ Node ${childId} still waiting for inputs`) + logger.debug(` Has: [${Array.from(waitingNode.receivedInputs.keys()).join(', ')}]`) + logger.debug(` Needs: [${Array.from(waitingNode.expectedInputs).join(', ')}]`) + if (waitingNode.conditionalGroups.size > 0) { + logger.debug(' Conditional groups:') + waitingNode.conditionalGroups.forEach((sources, groupId) => { + logger.debug(` ${groupId}: [${sources.join(', ')}]`) + }) + } + } + } + + if (nodeName === 'loopAgentflow' && result.output?.nodeID) { + logger.debug(` 🔄 Looping back to node: ${result.output.nodeID}`) + + const loopCount = (loopCounts.get(nodeId) || 0) + 1 + const maxLoop = result.output.maxLoopCount || MAX_LOOP_COUNT + + if (loopCount < maxLoop) { + logger.debug(` Loop count: ${loopCount}/${maxLoop}`) + loopCounts.set(nodeId, loopCount) + nodeExecutionQueue.push({ + nodeId: result.output.nodeID, + data: result.output, + inputs: {} + }) + + // Clear humanInput when looping to prevent it from being reused + if (updatedHumanInput) { + logger.debug(` 🧹 Clearing humanInput for loop iteration`) + updatedHumanInput = undefined + } + } else { + logger.debug(` ⚠️ Maximum loop count (${maxLoop}) reached, stopping loop`) + } + } + + return { humanInput: updatedHumanInput } +} + +/** + * Combines inputs from multiple source nodes into a single input object + * @param {Map} receivedInputs - Map of inputs received from different nodes + * @returns {any} Combined input data + * + * @example + * const inputs = new Map(); + * inputs.set('node1', { json: { value: 1 }, text: 'Hello' }); + * inputs.set('node2', { json: { value: 2 }, text: 'World' }); + * + * const combined = combineNodeInputs(inputs); + * Result: + * { + * json: { + * node1: { value: 1 }, + * node2: { value: 2 } + * }, + * text: 'Hello\nWorld' + * } + */ +function combineNodeInputs(receivedInputs: Map): any { + // Filter out null/undefined inputs + const validInputs = new Map(Array.from(receivedInputs.entries()).filter(([_, value]) => value !== null && value !== undefined)) + + if (validInputs.size === 0) { + return null + } + + if (validInputs.size === 1) { + return Array.from(validInputs.values())[0] + } + + // Initialize result object to store combined data + const result: { + json: any + text?: string + binary?: any + error?: Error + } = { + json: {} + } + + // Sort inputs by source node ID to ensure consistent ordering + const sortedInputs = Array.from(validInputs.entries()).sort((a, b) => a[0].localeCompare(b[0])) + + for (const [sourceNodeId, inputData] of sortedInputs) { + if (!inputData) continue + + try { + // Handle different types of input data + if (typeof inputData === 'object') { + // Merge JSON data + if (inputData.json) { + result.json = { + ...result.json, + [sourceNodeId]: inputData.json + } + } + + // Combine text data if present + if (inputData.text) { + result.text = result.text ? `${result.text}\n${inputData.text}` : inputData.text + } + + // Merge binary data if present + if (inputData.binary) { + result.binary = { + ...result.binary, + [sourceNodeId]: inputData.binary + } + } + + // Handle error data + if (inputData.error) { + result.error = inputData.error + } + } else { + // Handle primitive data types + result.json[sourceNodeId] = inputData + } + } catch (error) { + // Log error but continue processing other inputs + console.error(`Error combining input from node ${sourceNodeId}:`, error) + result.error = error as Error + } + } + + // Special handling for text-only nodes + if (Object.keys(result.json).length === 0 && result.text) { + result.json = { text: result.text } + } + + return result +} + +/** + * Executes a single node in the workflow + * @param params - Parameters needed for node execution + * @returns The result of the node execution + */ +const executeNode = async ({ + nodeId, + reactFlowNode, + nodes, + edges, + graph, + reversedGraph, + incomingInput, + chatflow, + chatId, + sessionId, + apiMessageId, + parentExecutionId, + pastChatHistory, + appDataSource, + telemetry, + componentNodes, + cachePool, + sseStreamer, + baseURL, + overrideConfig = {}, + apiOverrideStatus = false, + nodeOverrides = {}, + variableOverrides = [], + uploadedFilesContent = '', + fileUploads, + humanInput, + agentFlowExecutedData = [], + agentflowRuntime, + abortController, + parentTraceIds, + analyticHandlers, + isInternal, + isRecursive, + iterationContext +}: IExecuteNodeParams): Promise<{ + result: any + shouldStop?: boolean + agentFlowExecutedData?: IAgentflowExecutedData[] +}> => { + try { + if (abortController?.signal?.aborted) { + throw new Error('Aborted') + } + + // Stream progress event + sseStreamer?.streamNextAgentFlowEvent(chatId, { + nodeId, + nodeLabel: reactFlowNode.data.label, + status: 'INPROGRESS' + }) + + // Get node implementation + const nodeInstanceFilePath = componentNodes[reactFlowNode.data.name].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newNodeInstance = new nodeModule.nodeClass() + + // Prepare node data + let flowNodeData = cloneDeep(reactFlowNode.data) + + // Apply config overrides if needed + if (overrideConfig && apiOverrideStatus) { + flowNodeData = replaceInputsWithConfig(flowNodeData, overrideConfig, nodeOverrides, variableOverrides) + } + + // Get available variables and resolve them + const availableVariables = await appDataSource.getRepository(Variable).find() + + // Prepare flow config + let updatedState = cloneDeep(agentflowRuntime.state) + const runtimeChatHistory = agentflowRuntime.chatHistory || [] + const chatHistory = [...pastChatHistory, ...runtimeChatHistory] + const flowConfig: IFlowConfig = { + chatflowid: chatflow.id, + chatId, + sessionId, + apiMessageId, + chatHistory, + runtimeChatHistoryLength: Math.max(0, runtimeChatHistory.length - 1), + state: updatedState, + ...overrideConfig + } + if ( + iterationContext && + iterationContext.agentflowRuntime && + iterationContext.agentflowRuntime.state && + Object.keys(iterationContext.agentflowRuntime.state).length > 0 + ) { + updatedState = { + ...updatedState, + ...iterationContext.agentflowRuntime.state + } + flowConfig.state = updatedState + } + + // Resolve variables in node data + const reactFlowNodeData: INodeData = await resolveVariables( + flowNodeData, + incomingInput.question ?? '', + incomingInput.form ?? agentflowRuntime.form ?? {}, + flowConfig, + availableVariables, + variableOverrides, + uploadedFilesContent, + chatHistory, + agentFlowExecutedData, + iterationContext + ) + + // Handle human input if present + let humanInputAction: Record | undefined + + if (agentFlowExecutedData.length) { + const lastNodeOutput = agentFlowExecutedData[agentFlowExecutedData.length - 1]?.data?.output as ICommonObject | undefined + humanInputAction = lastNodeOutput?.humanInputAction + } + + if (humanInput && nodeId === humanInput.startNodeId) { + reactFlowNodeData.inputs = { ...reactFlowNodeData.inputs, humanInput } + // Remove the stopped humanInput from execution data + agentFlowExecutedData = agentFlowExecutedData.filter((execData) => execData.nodeId !== nodeId) + } + + // Check if this is the last node for streaming purpose + const isLastNode = + !isRecursive && + (!graph[nodeId] || graph[nodeId].length === 0 || (!humanInput && reactFlowNode.data.name === 'humanInputAgentflow')) + + if (incomingInput.question && incomingInput.form) { + throw new Error('Question and form cannot be provided at the same time') + } + + let finalInput: string | Record | undefined + if (incomingInput.question) { + // Prepare final question with uploaded content if any + finalInput = uploadedFilesContent ? `${uploadedFilesContent}\n\n${incomingInput.question}` : incomingInput.question + } else if (incomingInput.form) { + finalInput = Object.entries(incomingInput.form || {}) + .map(([key, value]) => `${key}: ${value}`) + .join('\n') + } + + // Prepare run parameters + const runParams = { + chatId, + sessionId, + chatflowid: chatflow.id, + apiMessageId: flowConfig.apiMessageId, + logger, + appDataSource, + databaseEntities, + componentNodes, + cachePool, + analytic: chatflow.analytic, + uploads: fileUploads, + baseURL, + isLastNode, + sseStreamer, + pastChatHistory, + agentflowRuntime, + abortController, + analyticHandlers, + parentTraceIds, + humanInputAction, + iterationContext + } + + // Execute node + let results = await newNodeInstance.run(reactFlowNodeData, finalInput, runParams) + + // Handle iteration node with recursive execution + if ( + reactFlowNode.data.name === 'iterationAgentflow' && + results?.input?.iterationInput && + Array.isArray(results.input.iterationInput) + ) { + logger.debug(` 🔄 Processing iteration node with ${results.input.iterationInput.length} items using recursive execution`) + + // Get child nodes for this iteration + const childNodes = nodes.filter((node) => node.parentNode === nodeId) + + if (childNodes.length > 0) { + logger.debug(` 📦 Found ${childNodes.length} child nodes for iteration`) + + // Create a new flow object containing only the nodes in this iteration block + const iterationFlowData: IReactFlowObject = { + nodes: childNodes, + edges: edges.filter((edge: IReactFlowEdge) => { + const sourceNode = nodes.find((n) => n.id === edge.source) + const targetNode = nodes.find((n) => n.id === edge.target) + return sourceNode?.parentNode === nodeId && targetNode?.parentNode === nodeId + }), + viewport: { x: 0, y: 0, zoom: 1 } + } + + // Create a modified chatflow for this iteration + const iterationChatflow = { + ...chatflow, + flowData: JSON.stringify(iterationFlowData) + } + + // Initialize array to collect results from iterations + const iterationResults: string[] = [] + + // Execute sub-flow for each item in the iteration array + for (let i = 0; i < results.input.iterationInput.length; i++) { + const item = results.input.iterationInput[i] + logger.debug(` 🔄 Processing iteration ${i + 1}/${results.input.iterationInput.length} recursively`) + + // Create iteration context + const iterationContext = { + index: i, + value: item, + isFirst: i === 0, + isLast: i === results.input.iterationInput.length - 1 + } + + try { + // Execute sub-flow recursively + const subFlowResult = await executeAgentFlow({ + componentNodes, + incomingInput, + chatflow: iterationChatflow, + chatId, + appDataSource, + telemetry, + cachePool, + sseStreamer, + baseURL, + isInternal, + uploadedFilesContent, + fileUploads, + signal: abortController, + isRecursive: true, + parentExecutionId, + iterationContext: { + ...iterationContext, + agentflowRuntime + } + }) + + // Store the result + if (subFlowResult?.text) { + iterationResults.push(subFlowResult.text) + } + + // Add executed data from sub-flow to main execution data with appropriate iteration context + if (subFlowResult?.agentFlowExecutedData) { + const subflowExecutedData = subFlowResult.agentFlowExecutedData.map((data: IAgentflowExecutedData) => ({ + ...data, + data: { + ...data.data, + iterationIndex: i, + iterationContext, + parentNodeId: reactFlowNode.data.id + } + })) + + // Add executed data to parent execution + agentFlowExecutedData.push(...subflowExecutedData) + + // Update parent execution record with combined data if we have a parent execution ID + if (parentExecutionId) { + try { + logger.debug(` 📝 Updating parent execution ${parentExecutionId} with iteration ${i + 1} data`) + await updateExecution(appDataSource, parentExecutionId, { + executionData: JSON.stringify(agentFlowExecutedData) + }) + } catch (error) { + console.error(` ❌ Error updating parent execution: ${getErrorMessage(error)}`) + } + } + } + + // Merge the child iteration's runtime state back to parent + if ( + subFlowResult?.agentflowRuntime && + subFlowResult.agentflowRuntime.state && + Object.keys(subFlowResult.agentflowRuntime.state).length > 0 + ) { + logger.debug(` 🔄 Merging iteration ${i + 1} runtime state back to parent`) + + updatedState = { + ...updatedState, + ...subFlowResult.agentflowRuntime.state + } + + // Update next iteration's runtime state + agentflowRuntime.state = updatedState + + // Update parent execution's runtime state + results.state = updatedState + } + } catch (error) { + console.error(` ❌ Error in iteration ${i + 1}: ${getErrorMessage(error)}`) + iterationResults.push(`Error in iteration ${i + 1}: ${getErrorMessage(error)}`) + } + } + + // Update the output with combined results + results.output = { + ...(results.output || {}), + iterationResults, + content: iterationResults.join('\n') + } + + logger.debug(` 📊 Completed all iterations. Total results: ${iterationResults.length}`) + } + } + + // Stop going through the current route if the node is a human task + if (!humanInput && reactFlowNode.data.name === 'humanInputAgentflow') { + const humanInputAction = { + id: uuidv4(), + mapping: { + approve: 'Proceed', + reject: 'Reject' + }, + elements: [ + { type: 'agentflowv2-approve-button', label: 'Proceed' }, + { type: 'agentflowv2-reject-button', label: 'Reject' } + ], + data: { + nodeId, + nodeLabel: reactFlowNode.data.label, + input: results.input + } + } + + const newWorkflowExecutedData: IAgentflowExecutedData = { + nodeId, + nodeLabel: reactFlowNode.data.label, + data: { + ...results, + output: { + ...results.output, + humanInputAction + } + }, + previousNodeIds: reversedGraph[nodeId] || [], + status: 'STOPPED' + } + agentFlowExecutedData.push(newWorkflowExecutedData) + + sseStreamer?.streamNextAgentFlowEvent(chatId, { + nodeId, + nodeLabel: reactFlowNode.data.label, + status: 'STOPPED' + }) + sseStreamer?.streamAgentFlowExecutedDataEvent(chatId, agentFlowExecutedData) + sseStreamer?.streamAgentFlowEvent(chatId, 'STOPPED') + + sseStreamer?.streamActionEvent(chatId, humanInputAction) + + return { result: results, shouldStop: true, agentFlowExecutedData } + } + + // Stop going through the current route if the node is a agent node waiting for human input before using the tool + if (reactFlowNode.data.name === 'agentAgentflow' && results?.output?.isWaitingForHumanInput) { + const humanInputAction = { + id: uuidv4(), + mapping: { + approve: 'Proceed', + reject: 'Reject' + }, + elements: [ + { type: 'agentflowv2-approve-button', label: 'Proceed' }, + { type: 'agentflowv2-reject-button', label: 'Reject' } + ], + data: { + nodeId, + nodeLabel: reactFlowNode.data.label, + input: results.input + } + } + + const newWorkflowExecutedData: IAgentflowExecutedData = { + nodeId, + nodeLabel: reactFlowNode.data.label, + data: { + ...results, + output: { + ...results.output, + humanInputAction + } + }, + previousNodeIds: reversedGraph[nodeId] || [], + status: 'STOPPED' + } + agentFlowExecutedData.push(newWorkflowExecutedData) + + sseStreamer?.streamNextAgentFlowEvent(chatId, { + nodeId, + nodeLabel: reactFlowNode.data.label, + status: 'STOPPED' + }) + sseStreamer?.streamAgentFlowExecutedDataEvent(chatId, agentFlowExecutedData) + sseStreamer?.streamAgentFlowEvent(chatId, 'STOPPED') + + sseStreamer?.streamActionEvent(chatId, humanInputAction) + + return { result: results, shouldStop: true, agentFlowExecutedData } + } + + return { result: results, agentFlowExecutedData } + } catch (error) { + logger.error(`[server]: Error executing node ${nodeId}: ${getErrorMessage(error)}`) + throw error + } +} + +const checkForMultipleStartNodes = (startingNodeIds: string[], isRecursive: boolean, nodes: IReactFlowNode[]) => { + // For non-recursive, loop through and check if each starting node is inside an iteration node, if yes, delete it + const clonedStartingNodeIds = [...startingNodeIds] + for (const nodeId of clonedStartingNodeIds) { + const node = nodes.find((node) => node.id === nodeId) + if (node?.extent === 'parent' && !isRecursive) { + startingNodeIds.splice(startingNodeIds.indexOf(nodeId), 1) + } + } + + if (!isRecursive && startingNodeIds.length > 1) { + throw new Error('Multiple starting nodes are not allowed') + } +} + +/* + * Function to traverse the flow graph and execute the nodes + */ +export const executeAgentFlow = async ({ + componentNodes, + incomingInput, + chatflow, + chatId, + appDataSource, + telemetry, + cachePool, + sseStreamer, + baseURL, + isInternal, + uploadedFilesContent, + fileUploads, + signal: abortController, + isRecursive = false, + parentExecutionId, + iterationContext, + isTool = false +}: IExecuteAgentFlowParams) => { + logger.debug('\n🚀 Starting flow execution') + + const question = incomingInput.question + const form = incomingInput.form + let overrideConfig = incomingInput.overrideConfig ?? {} + const uploads = incomingInput.uploads + const userMessageDateTime = new Date() + const chatflowid = chatflow.id + const sessionId = incomingInput.sessionId ?? chatId + const humanInput: IHumanInput | undefined = incomingInput.humanInput + const apiMessageId = uuidv4() + + /*** Get chatflows and prepare data ***/ + const flowData = chatflow.flowData + const parsedFlowData: IReactFlowObject = JSON.parse(flowData) + const nodes = (parsedFlowData.nodes || []).filter((node) => node.data.name !== 'stickyNoteAgentflow') + const edges = parsedFlowData.edges + const { graph, nodeDependencies } = constructGraphs(nodes, edges) + const { graph: reversedGraph } = constructGraphs(nodes, edges, { isReversed: true }) + const startInputType = nodes.find((node) => node.data.name === 'startAgentflow')?.data.inputs?.startInputType as + | 'chatInput' + | 'formInput' + if (!startInputType && !isRecursive) { + throw new Error('Start input type not found') + } + // @ts-ignore + if (isTool) sseStreamer = undefined // If the request is from ChatflowTool, don't stream the response + + /*** Get API Config ***/ + const { nodeOverrides, variableOverrides, apiOverrideStatus } = getAPIOverrideConfig(chatflow) + + /* + graph { + startAgentflow_0: [ 'conditionAgentflow_0' ], + conditionAgentflow_0: [ 'llmAgentflow_0', 'llmAgentflow_1' ], + llmAgentflow_0: [ 'llmAgentflow_2' ], + llmAgentflow_1: [ 'llmAgentflow_2' ], + llmAgentflow_2: [] + } + */ + + /* + nodeDependencies { + startAgentflow_0: 0, + conditionAgentflow_0: 1, + llmAgentflow_0: 1, + llmAgentflow_1: 1, + llmAgentflow_2: 2 + } + */ + + let status: ExecutionState = 'INPROGRESS' + let agentFlowExecutedData: IAgentflowExecutedData[] = [] + let newExecution: Execution + const startingNodeIds: string[] = [] + + // Initialize execution queue + const nodeExecutionQueue: INodeQueue[] = [] + const waitingNodes: Map = new Map() + const loopCounts: Map = new Map() + + // Initialize runtime state for new execution + let agentflowRuntime: IAgentFlowRuntime = { + state: {}, + chatHistory: [], + form: {} + } + + let previousExecution: Execution | undefined + + // If not a recursive call or parent execution not found, proceed normally + if (!isRecursive) { + const previousExecutions = await appDataSource.getRepository(Execution).find({ + where: { + sessionId, + agentflowId: chatflowid + }, + order: { + createdDate: 'DESC' + } + }) + + if (previousExecutions.length) { + previousExecution = previousExecutions[0] + } + } + + // If the start input type is form input, get the form values from the previous execution (form values are persisted in the same session) + if (startInputType === 'formInput' && previousExecution) { + const previousExecutionData = (JSON.parse(previousExecution.executionData) as IAgentflowExecutedData[]) ?? [] + + const previousStartAgent = previousExecutionData.find((execData) => execData.data.name === 'startAgentflow') + + if (previousStartAgent) { + const previousStartAgentOutput = previousStartAgent.data.output + if (previousStartAgentOutput && typeof previousStartAgentOutput === 'object' && 'form' in previousStartAgentOutput) { + agentflowRuntime.form = previousStartAgentOutput.form + } + } + } + + // If it is human input, find the last checkpoint and resume + if (humanInput?.startNodeId) { + if (!previousExecution) { + throw new Error(`No previous execution found for session ${sessionId}`) + } + + if (previousExecution.state !== 'STOPPED') { + throw new Error( + `Cannot resume execution ${previousExecution.id} because it is in '${previousExecution.state}' state. ` + + `Only executions in 'STOPPED' state can be resumed.` + ) + } + + startingNodeIds.push(humanInput.startNodeId) + checkForMultipleStartNodes(startingNodeIds, isRecursive, nodes) + + const executionData = JSON.parse(previousExecution.executionData) as IAgentflowExecutedData[] + + // Verify that the humanInputAgentflow node exists in previous execution + const humanInputNodeExists = executionData.some((data) => data.nodeId === humanInput.startNodeId) + + if (!humanInputNodeExists) { + throw new Error( + `Human Input node ${humanInput.startNodeId} not found in previous execution. ` + + `This could indicate an invalid resume attempt or a modified flow.` + ) + } + + agentFlowExecutedData.push(...executionData) + + // Get last state + const lastState = executionData[executionData.length - 1].data.state + + // Update agentflow runtime state + agentflowRuntime.state = (lastState as ICommonObject) ?? {} + + // Update execution state to INPROGRESS + await updateExecution(appDataSource, previousExecution.id, { + state: 'INPROGRESS' + }) + newExecution = previousExecution + parentExecutionId = previousExecution.id + } else if (isRecursive && parentExecutionId) { + const { startingNodeIds: startingNodeIdsFromFlow } = getStartingNode(nodeDependencies) + startingNodeIds.push(...startingNodeIdsFromFlow) + checkForMultipleStartNodes(startingNodeIds, isRecursive, nodes) + + // For recursive calls with a valid parent execution ID, don't create a new execution + // Instead, fetch the parent execution to use it + const parentExecution = await appDataSource.getRepository(Execution).findOne({ + where: { id: parentExecutionId } + }) + + if (parentExecution) { + logger.debug(` 📝 Using parent execution ID: ${parentExecutionId} for recursive call`) + newExecution = parentExecution + } else { + console.warn(` ⚠️ Parent execution ID ${parentExecutionId} not found, will create new execution`) + newExecution = await addExecution(appDataSource, chatflowid, agentFlowExecutedData, sessionId) + parentExecutionId = newExecution.id + } + } else { + const { startingNodeIds: startingNodeIdsFromFlow } = getStartingNode(nodeDependencies) + startingNodeIds.push(...startingNodeIdsFromFlow) + checkForMultipleStartNodes(startingNodeIds, isRecursive, nodes) + + // Only create a new execution if this is not a recursive call + newExecution = await addExecution(appDataSource, chatflowid, agentFlowExecutedData, sessionId) + parentExecutionId = newExecution.id + } + + // Add starting nodes to queue + startingNodeIds.forEach((nodeId) => { + nodeExecutionQueue.push({ + nodeId, + data: {}, + inputs: {} + }) + }) + + const maxIterations = process.env.MAX_ITERATIONS ? parseInt(process.env.MAX_ITERATIONS) : 1000 + + // Get chat history from ChatMessage table + const pastChatHistory = (await appDataSource + .getRepository(ChatMessage) + .find({ + where: { + chatflowid, + chatId + }, + order: { + createdDate: 'ASC' + } + }) + .then((messages) => + messages.map((message) => { + const mappedMessage: any = { + content: message.content, + role: message.role === 'userMessage' ? 'user' : 'assistant' + } + + // Only add additional_kwargs when fileUploads or artifacts exists and is not empty + if ((message.fileUploads && message.fileUploads !== '') || (message.artifacts && message.artifacts !== '')) { + mappedMessage.additional_kwargs = {} + + if (message.fileUploads && message.fileUploads !== '') { + mappedMessage.additional_kwargs.fileUploads = message.fileUploads + } + } + + return mappedMessage + }) + )) as IMessage[] + + let iterations = 0 + let currentHumanInput = humanInput + + let analyticHandlers: AnalyticHandler | undefined + let parentTraceIds: ICommonObject | undefined + + try { + if (chatflow.analytic) { + analyticHandlers = AnalyticHandler.getInstance({ inputs: {} } as any, { + appDataSource, + databaseEntities, + componentNodes, + analytic: chatflow.analytic, + chatId + }) + await analyticHandlers.init() + parentTraceIds = await analyticHandlers.onChainStart( + 'Agentflow', + form && Object.keys(form).length > 0 ? JSON.stringify(form) : question || '' + ) + } + } catch (error) { + logger.error(`[server]: Error initializing analytic handlers: ${getErrorMessage(error)}`) + } + + while (nodeExecutionQueue.length > 0 && status === 'INPROGRESS') { + logger.debug(`\n▶️ Iteration ${iterations + 1}:`) + logger.debug(` Queue: [${nodeExecutionQueue.map((n) => n.nodeId).join(', ')}]`) + + if (iterations === 0 && !isRecursive) { + sseStreamer?.streamAgentFlowEvent(chatId, 'INPROGRESS') + } + + if (iterations++ > maxIterations) { + throw new Error('Maximum iteration limit reached') + } + + const currentNode = nodeExecutionQueue.shift() + if (!currentNode) continue + + const reactFlowNode = nodes.find((nd) => nd.id === currentNode.nodeId) + if (!reactFlowNode || reactFlowNode === undefined || reactFlowNode.data.name === 'stickyNoteAgentflow') continue + + let nodeResult + try { + // Check for abort signal early in the loop + if (abortController?.signal?.aborted) { + throw new Error('Aborted') + } + + logger.debug(` 🎯 Executing node: ${reactFlowNode?.data.label}`) + + // Execute current node + const executionResult = await executeNode({ + nodeId: currentNode.nodeId, + reactFlowNode, + nodes, + edges, + graph, + reversedGraph, + incomingInput, + chatflow, + chatId, + sessionId, + apiMessageId, + parentExecutionId, + isInternal, + pastChatHistory, + appDataSource, + telemetry, + componentNodes, + cachePool, + sseStreamer, + baseURL, + overrideConfig, + apiOverrideStatus, + nodeOverrides, + variableOverrides, + uploadedFilesContent, + fileUploads, + humanInput: currentHumanInput, + agentFlowExecutedData, + agentflowRuntime, + abortController, + parentTraceIds, + analyticHandlers, + isRecursive, + iterationContext + }) + + if (executionResult.agentFlowExecutedData) { + agentFlowExecutedData = executionResult.agentFlowExecutedData + } + + if (executionResult.shouldStop) { + status = 'STOPPED' + break + } + + nodeResult = executionResult.result + + // Add execution data + agentFlowExecutedData.push({ + nodeId: currentNode.nodeId, + nodeLabel: reactFlowNode.data.label, + data: nodeResult, + previousNodeIds: reversedGraph[currentNode.nodeId], + status: 'FINISHED' + }) + + sseStreamer?.streamNextAgentFlowEvent(chatId, { + nodeId: currentNode.nodeId, + nodeLabel: reactFlowNode.data.label, + status: 'FINISHED' + }) + + if (!isRecursive) sseStreamer?.streamAgentFlowExecutedDataEvent(chatId, agentFlowExecutedData) + + // Add to agentflow runtime state + if (nodeResult && nodeResult.state) { + agentflowRuntime.state = nodeResult.state + } + + if (nodeResult && nodeResult.chatHistory) { + agentflowRuntime.chatHistory = [...(agentflowRuntime.chatHistory ?? []), ...nodeResult.chatHistory] + } + + if (nodeResult && nodeResult.output && nodeResult.output.form) { + agentflowRuntime.form = nodeResult.output.form + } + + if (nodeResult && nodeResult.output && nodeResult.output.ephemeralMemory) { + pastChatHistory.length = 0 + } + + // Process node outputs and handle branching + const processResult = await processNodeOutputs({ + nodeId: currentNode.nodeId, + nodeName: reactFlowNode.data.name, + result: nodeResult, + humanInput: currentHumanInput, + graph, + nodes, + edges, + nodeExecutionQueue, + waitingNodes, + loopCounts, + abortController + }) + + // Update humanInput if it was changed + if (processResult.humanInput !== currentHumanInput) { + currentHumanInput = processResult.humanInput + } + } catch (error) { + const isAborted = getErrorMessage(error).includes('Aborted') + const errorStatus = isAborted ? 'TERMINATED' : 'ERROR' + const errorMessage = isAborted ? 'Flow execution was cancelled' : getErrorMessage(error) + + status = errorStatus + + // Add error info to execution data + agentFlowExecutedData.push({ + nodeId: currentNode.nodeId, + nodeLabel: reactFlowNode.data.label, + previousNodeIds: reversedGraph[currentNode.nodeId] || [], + data: { + id: currentNode.nodeId, + name: reactFlowNode.data.name, + error: errorMessage + }, + status: errorStatus + }) + + // Stream events to client + sseStreamer?.streamNextAgentFlowEvent(chatId, { + nodeId: currentNode.nodeId, + nodeLabel: reactFlowNode.data.label, + status: errorStatus, + error: isAborted ? undefined : errorMessage + }) + + // Only update execution record if this is not a recursive call + if (!isRecursive) { + sseStreamer?.streamAgentFlowExecutedDataEvent(chatId, agentFlowExecutedData) + + await updateExecution(appDataSource, newExecution.id, { + executionData: JSON.stringify(agentFlowExecutedData), + state: errorStatus + }) + + sseStreamer?.streamAgentFlowEvent(chatId, errorStatus) + } + + if (parentTraceIds && analyticHandlers) { + await analyticHandlers.onChainError(parentTraceIds, errorMessage, true) + } + + throw new Error(errorMessage) + } + + logger.debug(`/////////////////////////////////////////////////////////////////////////////`) + } + + // check if there is any status stopped from agentFlowExecutedData + const terminatedNode = agentFlowExecutedData.find((data) => data.status === 'TERMINATED') + const errorNode = agentFlowExecutedData.find((data) => data.status === 'ERROR') + const stoppedNode = agentFlowExecutedData.find((data) => data.status === 'STOPPED') + + if (terminatedNode) { + status = 'TERMINATED' + } else if (errorNode) { + status = 'ERROR' + } else if (stoppedNode) { + status = 'STOPPED' + } else { + status = 'FINISHED' + } + + // Only update execution record if this is not a recursive call + if (!isRecursive) { + await updateExecution(appDataSource, newExecution.id, { + executionData: JSON.stringify(agentFlowExecutedData), + state: status + }) + + sseStreamer?.streamAgentFlowEvent(chatId, status) + } + + logger.debug(`\n🏁 Flow execution completed`) + logger.debug(` Status: ${status}`) + + // check if last agentFlowExecutedData.data.output contains the key "content" + const lastNodeOutput = agentFlowExecutedData[agentFlowExecutedData.length - 1].data?.output as ICommonObject | undefined + const content = (lastNodeOutput?.content as string) ?? ' ' + + // remove credentialId from agentFlowExecutedData + agentFlowExecutedData = agentFlowExecutedData.map((data) => _removeCredentialId(data)) + + if (parentTraceIds && analyticHandlers) { + await analyticHandlers.onChainEnd(parentTraceIds, content, true) + } + + if (isRecursive) { + return { + agentFlowExecutedData, + agentflowRuntime, + status, + text: content + } + } + + // Find the previous chat message with the same session/chat id and remove the action + if (humanInput && Object.keys(humanInput).length) { + let query = await appDataSource + .getRepository(ChatMessage) + .createQueryBuilder('chat_message') + .where('chat_message.chatId = :chatId', { chatId }) + .orWhere('chat_message.sessionId = :sessionId', { sessionId }) + .orderBy('chat_message.createdDate', 'DESC') + .getMany() + + for (const result of query) { + if (result.action) { + try { + const newChatMessage = new ChatMessage() + Object.assign(newChatMessage, result) + newChatMessage.action = null + const cm = await appDataSource.getRepository(ChatMessage).create(newChatMessage) + await appDataSource.getRepository(ChatMessage).save(cm) + break + } catch (e) { + // error converting action to JSON + } + } + } + } + + let finalUserInput = incomingInput.question || ' ' + + if (startInputType === 'chatInput') { + finalUserInput = question || humanInput?.feedback || ' ' + } else if (startInputType === 'formInput') { + if (form) { + finalUserInput = Object.entries(form || {}) + .map(([key, value]) => `${key}: ${value}`) + .join('\n') + } else { + finalUserInput = question || humanInput?.feedback || ' ' + } + } + + const userMessage: Omit = { + role: 'userMessage', + content: finalUserInput, + chatflowid, + chatType: isInternal ? ChatType.INTERNAL : ChatType.EXTERNAL, + chatId, + sessionId, + createdDate: userMessageDateTime, + fileUploads: uploads ? JSON.stringify(fileUploads) : undefined, + leadEmail: incomingInput.leadEmail, + executionId: newExecution.id + } + await utilAddChatMessage(userMessage, appDataSource) + + const apiMessage: Omit = { + id: apiMessageId, + role: 'apiMessage', + content: content, + chatflowid, + chatType: isInternal ? ChatType.INTERNAL : ChatType.EXTERNAL, + chatId, + sessionId, + executionId: newExecution.id + } + if (lastNodeOutput?.sourceDocuments) apiMessage.sourceDocuments = JSON.stringify(lastNodeOutput.sourceDocuments) + if (lastNodeOutput?.usedTools) apiMessage.usedTools = JSON.stringify(lastNodeOutput.usedTools) + if (lastNodeOutput?.fileAnnotations) apiMessage.fileAnnotations = JSON.stringify(lastNodeOutput.fileAnnotations) + if (lastNodeOutput?.artifacts) apiMessage.artifacts = JSON.stringify(lastNodeOutput.artifacts) + if (chatflow.followUpPrompts) { + const followUpPromptsConfig = JSON.parse(chatflow.followUpPrompts) + const followUpPrompts = await generateFollowUpPrompts(followUpPromptsConfig, apiMessage.content, { + chatId, + chatflowid, + appDataSource, + databaseEntities + }) + if (followUpPrompts?.questions) { + apiMessage.followUpPrompts = JSON.stringify(followUpPrompts.questions) + } + } + if (lastNodeOutput?.humanInputAction && Object.keys(lastNodeOutput.humanInputAction).length) + apiMessage.action = JSON.stringify(lastNodeOutput.humanInputAction) + + const chatMessage = await utilAddChatMessage(apiMessage, appDataSource) + + logger.debug(`[server]: Finished running agentflow ${chatflowid}`) + + await telemetry.sendTelemetry('prediction_sent', { + version: await getAppVersion(), + chatflowId: chatflowid, + chatId, + type: isInternal ? ChatType.INTERNAL : ChatType.EXTERNAL, + flowGraph: getTelemetryFlowObj(nodes, edges) + }) + + /*** Prepare response ***/ + let result: ICommonObject = {} + result.text = content + result.question = incomingInput.question // return the question in the response, this is used when input text is empty but question is in audio format + result.form = form + result.chatId = chatId + result.chatMessageId = chatMessage?.id + result.followUpPrompts = JSON.stringify(apiMessage.followUpPrompts) + result.executionId = newExecution.id + result.agentFlowExecutedData = agentFlowExecutedData + + if (sessionId) result.sessionId = sessionId + + return result +} diff --git a/packages/server/src/utils/buildChatflow.ts b/packages/server/src/utils/buildChatflow.ts index 0ee001df2..d0b5e0fbe 100644 --- a/packages/server/src/utils/buildChatflow.ts +++ b/packages/server/src/utils/buildChatflow.ts @@ -14,7 +14,8 @@ import { mapMimeTypeToInputField, mapExtToInputField, getFileFromUpload, - removeSpecificFileFromUpload + removeSpecificFileFromUpload, + handleEscapeCharacters } from 'flowise-components' import { StatusCodes } from 'http-status-codes' import { @@ -62,6 +63,7 @@ import { buildAgentGraph } from './buildAgentGraph' import { getErrorMessage } from '../errors/utils' import { FLOWISE_METRIC_COUNTERS, FLOWISE_COUNTER_STATUS, IMetricsProvider } from '../Interface.Metrics' import { OMIT_QUEUE_JOB_DATA } from './constants' +import { executeAgentFlow } from './buildAgentflow' /* * Initialize the ending node to be executed @@ -156,9 +158,9 @@ const getChatHistory = async ({ if (isAgentFlow) { const startNode = nodes.find((node) => node.data.name === 'seqStart') - if (!startNode?.data?.inputs?.memory) return [] + if (!startNode?.data?.inputs?.agentMemory) return prependMessages - const memoryNodeId = startNode.data.inputs.memory.split('.')[0].replace('{{', '') + const memoryNodeId = startNode.data.inputs.agentMemory.split('.')[0].replace('{{', '') const memoryNode = nodes.find((node) => node.data.id === memoryNodeId) if (memoryNode) { @@ -235,13 +237,21 @@ export const executeFlow = async ({ baseURL, isInternal, files, - signal + signal, + isTool }: IExecuteFlowParams) => { - const question = incomingInput.question + // Ensure incomingInput has all required properties with default values + incomingInput = { + history: [], + streaming: false, + ...incomingInput + } + + let question = incomingInput.question || '' // Ensure question is never undefined let overrideConfig = incomingInput.overrideConfig ?? {} const uploads = incomingInput.uploads const prependMessages = incomingInput.history ?? [] - const streaming = incomingInput.streaming + const streaming = incomingInput.streaming ?? false const userMessageDateTime = new Date() const chatflowid = chatflow.id @@ -252,8 +262,8 @@ export const executeFlow = async ({ */ let fileUploads: IFileUpload[] = [] let uploadedFilesContent = '' - if (incomingInput.uploads) { - fileUploads = incomingInput.uploads + if (uploads) { + fileUploads = uploads for (let i = 0; i < fileUploads.length; i += 1) { const upload = fileUploads[i] @@ -301,6 +311,7 @@ export const executeFlow = async ({ logger.debug(`Speech to text result: ${speechToTextResult}`) if (speechToTextResult) { incomingInput.question = speechToTextResult + question = speechToTextResult } } } @@ -364,6 +375,26 @@ export const executeFlow = async ({ } } + const isAgentFlowV2 = chatflow.type === 'AGENTFLOW' + if (isAgentFlowV2) { + return executeAgentFlow({ + componentNodes, + incomingInput, + chatflow, + chatId, + appDataSource, + telemetry, + cachePool, + sseStreamer, + baseURL, + isInternal, + uploadedFilesContent, + fileUploads, + signal, + isTool + }) + } + /*** Get chatflows and prepare data ***/ const flowData = chatflow.flowData const parsedFlowData: IReactFlowObject = JSON.parse(flowData) @@ -489,7 +520,7 @@ export const executeFlow = async ({ memoryType, sessionId, createdDate: userMessageDateTime, - fileUploads: incomingInput.uploads ? JSON.stringify(fileUploads) : undefined, + fileUploads: uploads ? JSON.stringify(fileUploads) : undefined, leadEmail: incomingInput.leadEmail } await utilAddChatMessage(userMessage, appDataSource) @@ -579,7 +610,19 @@ export const executeFlow = async ({ } return undefined } else { - const isStreamValid = await checkIfStreamValid(endingNodes, nodes, streaming) + let chatflowConfig: ICommonObject = {} + if (chatflow.chatbotConfig) { + chatflowConfig = JSON.parse(chatflow.chatbotConfig) + } + + let isStreamValid = false + + /* Check for post-processing settings, if available isStreamValid is always false */ + if (chatflowConfig?.postProcessing?.enabled === true) { + isStreamValid = false + } else { + isStreamValid = await checkIfStreamValid(endingNodes, nodes, streaming) + } /*** Find the last node to execute ***/ const { endingNodeData, endingNodeInstance } = await initEndingNode({ @@ -637,8 +680,44 @@ export const executeFlow = async ({ await utilAddChatMessage(userMessage, appDataSource) let resultText = '' - if (result.text) resultText = result.text - else if (result.json) resultText = '```json\n' + JSON.stringify(result.json, null, 2) + if (result.text) { + resultText = result.text + /* Check for post-processing settings */ + if (chatflowConfig?.postProcessing?.enabled === true) { + try { + const postProcessingFunction = JSON.parse(chatflowConfig?.postProcessing?.customFunction) + const nodeInstanceFilePath = componentNodes['customFunction'].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + //set the outputs.output to EndingNode to prevent json escaping of content... + const nodeData = { + inputs: { javascriptFunction: postProcessingFunction }, + outputs: { output: 'output' } + } + const options: ICommonObject = { + chatflowid: chatflow.id, + sessionId, + chatId, + input: question, + rawOutput: resultText, + appDataSource, + databaseEntities, + logger + } + const customFuncNodeInstance = new nodeModule.nodeClass() + let moderatedResponse = await customFuncNodeInstance.init(nodeData, question, options) + if (typeof moderatedResponse === 'string') { + result.text = handleEscapeCharacters(moderatedResponse, true) + } else if (typeof moderatedResponse === 'object') { + result.text = '```json\n' + JSON.stringify(moderatedResponse, null, 2) + '\n```' + } else { + result.text = moderatedResponse + } + resultText = result.text + } catch (e) { + logger.log('[server]: Post Processing Error:', e) + } + } + } else if (result.json) resultText = '```json\n' + JSON.stringify(result.json, null, 2) else resultText = JSON.stringify(result, null, 2) const apiMessage: Omit = { @@ -707,13 +786,18 @@ const checkIfStreamValid = async ( nodes: IReactFlowNode[], streaming: boolean | string | undefined ): Promise => { + // If streaming is undefined, set to false by default + if (streaming === undefined) { + streaming = false + } + // Once custom function ending node exists, flow is always unavailable to stream const isCustomFunctionEndingNode = endingNodes.some((node) => node.data?.outputs?.output === 'EndingNode') if (isCustomFunctionEndingNode) return false let isStreamValid = false for (const endingNode of endingNodes) { - const endingNodeData = endingNode.data + const endingNodeData = endingNode.data || {} // Ensure endingNodeData is never undefined const isEndingNode = endingNodeData?.outputs?.output === 'EndingNode' @@ -757,12 +841,14 @@ export const utilBuildChatflow = async (req: Request, isInternal: boolean = fals } const isAgentFlow = chatflow.type === 'MULTIAGENT' + const httpProtocol = req.get('x-forwarded-proto') || req.protocol const baseURL = `${httpProtocol}://${req.get('host')}` - const incomingInput: IncomingInput = req.body + const incomingInput: IncomingInput = req.body || {} // Ensure incomingInput is never undefined const chatId = incomingInput.chatId ?? incomingInput.overrideConfig?.sessionId ?? uuidv4() const files = (req.files as Express.Multer.File[]) || [] const abortControllerId = `${chatflow.id}_${chatId}` + const isTool = req.get('flowise-tool') === 'true' try { // Validate API Key if its external API request @@ -774,7 +860,7 @@ export const utilBuildChatflow = async (req: Request, isInternal: boolean = fals } const executeData: IExecuteFlowParams = { - incomingInput: req.body, + incomingInput, // Use the defensively created incomingInput variable chatflow, chatId, baseURL, @@ -784,7 +870,8 @@ export const utilBuildChatflow = async (req: Request, isInternal: boolean = fals sseStreamer: appServer.sseStreamer, telemetry: appServer.telemetry, cachePool: appServer.cachePool, - componentNodes: appServer.nodesPool.componentNodes + componentNodes: appServer.nodesPool.componentNodes, + isTool // used to disable streaming if incoming request its from ChatflowTool } if (process.env.MODE === MODE.QUEUE) { @@ -806,7 +893,6 @@ export const utilBuildChatflow = async (req: Request, isInternal: boolean = fals const signal = new AbortController() appServer.abortControllerPool.add(abortControllerId, signal) executeData.signal = signal - const result = await executeFlow(executeData) appServer.abortControllerPool.remove(abortControllerId) diff --git a/packages/server/src/utils/constants.ts b/packages/server/src/utils/constants.ts index d423ff394..247446057 100644 --- a/packages/server/src/utils/constants.ts +++ b/packages/server/src/utils/constants.ts @@ -3,6 +3,7 @@ export const WHITELIST_URLS = [ '/api/v1/chatflows/apikey/', '/api/v1/public-chatflows', '/api/v1/public-chatbotConfig', + '/api/v1/public-executions', '/api/v1/prediction/', '/api/v1/vector/upsert/', '/api/v1/node-icon/', @@ -17,15 +18,18 @@ export const WHITELIST_URLS = [ '/api/v1/ping', '/api/v1/version', '/api/v1/attachments', - '/api/v1/metrics' + '/api/v1/metrics', + '/api/v1/nvidia-nim' ] export const OMIT_QUEUE_JOB_DATA = ['componentNodes', 'appDataSource', 'sseStreamer', 'telemetry', 'cachePool'] export const INPUT_PARAMS_TYPE = [ 'asyncOptions', + 'asyncMultiOptions', 'options', 'multiOptions', + 'array', 'datagrid', 'string', 'number', diff --git a/packages/server/src/utils/createAttachment.ts b/packages/server/src/utils/createAttachment.ts index 3a2e691a8..8c9bb9958 100644 --- a/packages/server/src/utils/createAttachment.ts +++ b/packages/server/src/utils/createAttachment.ts @@ -6,10 +6,15 @@ import { IDocument, mapExtToInputField, mapMimeTypeToInputField, - removeSpecificFileFromUpload + removeSpecificFileFromUpload, + isValidUUID, + isPathTraversal } from 'flowise-components' import { getRunningExpressApp } from './getRunningExpressApp' import { getErrorMessage } from '../errors/utils' +import { InternalFlowiseError } from '../errors/internalFlowiseError' +import { StatusCodes } from 'http-status-codes' +import { ChatFlow } from '../database/entities/ChatFlow' /** * Create attachment @@ -19,17 +24,48 @@ export const createFileAttachment = async (req: Request) => { const appServer = getRunningExpressApp() const chatflowid = req.params.chatflowId - if (!chatflowid) { - throw new Error( - 'Params chatflowId is required! Please provide chatflowId and chatId in the URL: /api/v1/attachments/:chatflowId/:chatId' - ) + if (!chatflowid || !isValidUUID(chatflowid)) { + throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Invalid chatflowId format - must be a valid UUID') } const chatId = req.params.chatId - if (!chatId) { - throw new Error( - 'Params chatId is required! Please provide chatflowId and chatId in the URL: /api/v1/attachments/:chatflowId/:chatId' - ) + if (!chatId || !isValidUUID(chatId)) { + throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Invalid chatId format - must be a valid UUID') + } + + // Check for path traversal attempts + if (isPathTraversal(chatflowid) || isPathTraversal(chatId)) { + throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Invalid path characters detected') + } + + // Validate chatflow exists and check API key + const chatflow = await appServer.AppDataSource.getRepository(ChatFlow).findOneBy({ + id: chatflowid + }) + if (!chatflow) { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Chatflow ${chatflowid} not found`) + } + + // Parse chatbot configuration to get file upload settings + let pdfConfig = { + usage: 'perPage', + legacyBuild: false + } + + if (chatflow.chatbotConfig) { + try { + const chatbotConfig = JSON.parse(chatflow.chatbotConfig) + if (chatbotConfig?.fullFileUpload?.pdfFile) { + if (chatbotConfig.fullFileUpload.pdfFile.usage) { + pdfConfig.usage = chatbotConfig.fullFileUpload.pdfFile.usage + } + if (chatbotConfig.fullFileUpload.pdfFile.legacyBuild !== undefined) { + pdfConfig.legacyBuild = chatbotConfig.fullFileUpload.pdfFile.legacyBuild + } + } + } catch (e) { + // Use default PDF config if parsing fails + } } // Find FileLoader node @@ -79,6 +115,12 @@ export const createFileAttachment = async (req: Request) => { outputs: { output: 'document' } } + // Apply PDF specific configuration if this is a PDF file + if (fileInputField === 'pdfFile') { + nodeData.inputs.usage = pdfConfig.usage + nodeData.inputs.legacyBuild = pdfConfig.legacyBuild as unknown as string + } + let content = '' if (isBase64) { diff --git a/packages/server/src/utils/executeCustomNodeFunction.ts b/packages/server/src/utils/executeCustomNodeFunction.ts new file mode 100644 index 000000000..a22a0291a --- /dev/null +++ b/packages/server/src/utils/executeCustomNodeFunction.ts @@ -0,0 +1,59 @@ +import { handleEscapeCharacters, ICommonObject } from 'flowise-components' +import { databaseEntities } from '.' +import { InternalFlowiseError } from '../errors/internalFlowiseError' +import { StatusCodes } from 'http-status-codes' +import { getErrorMessage } from '../errors/utils' +import { DataSource } from 'typeorm' +import { IComponentNodes } from '../Interface' + +export const executeCustomNodeFunction = async ({ + appDataSource, + componentNodes, + data +}: { + appDataSource: DataSource + componentNodes: IComponentNodes + data: any +}) => { + try { + const body = data + const jsFunction = typeof body?.javascriptFunction === 'string' ? body.javascriptFunction : '' + const matches = jsFunction.matchAll(/\$([a-zA-Z0-9_]+)/g) + const matchesArray: RegExpMatchArray[] = Array.from(matches) + const functionInputVariables = Object.fromEntries(matchesArray.map((g) => [g[1], undefined])) + if (functionInputVariables && Object.keys(functionInputVariables).length) { + for (const key in functionInputVariables) { + if (key.includes('vars')) { + delete functionInputVariables[key] + } + } + } + const nodeData = { inputs: { functionInputVariables, ...body } } + if (Object.prototype.hasOwnProperty.call(componentNodes, 'customFunction')) { + try { + const nodeInstanceFilePath = componentNodes['customFunction'].filePath as string + const nodeModule = await import(nodeInstanceFilePath) + const newNodeInstance = new nodeModule.nodeClass() + + const options: ICommonObject = { + appDataSource, + databaseEntities + } + + const returnData = await newNodeInstance.init(nodeData, '', options) + const dbResponse = typeof returnData === 'string' ? handleEscapeCharacters(returnData, true) : returnData + + return dbResponse + } catch (error) { + throw new InternalFlowiseError(StatusCodes.INTERNAL_SERVER_ERROR, `Error running custom function: ${error}`) + } + } else { + throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Node customFunction not found`) + } + } catch (error) { + throw new InternalFlowiseError( + StatusCodes.INTERNAL_SERVER_ERROR, + `Error: nodesService.executeCustomFunction - ${getErrorMessage(error)}` + ) + } +} diff --git a/packages/server/src/utils/getChatMessage.ts b/packages/server/src/utils/getChatMessage.ts index f5244aa60..9d8726f0e 100644 --- a/packages/server/src/utils/getChatMessage.ts +++ b/packages/server/src/utils/getChatMessage.ts @@ -52,6 +52,7 @@ export const utilGetChatMessage = async ({ // do the join with chat message feedback based on messageId for each chat message in the chatflow query + .leftJoinAndSelect('chat_message.execution', 'execution') .leftJoinAndMapOne('chat_message.feedback', ChatMessageFeedback, 'feedback', 'feedback.messageId = chat_message.id') .where('chat_message.chatflowid = :chatflowid', { chatflowid }) @@ -121,6 +122,9 @@ export const utilGetChatMessage = async ({ createdDate: createdDateQuery, id: messageId ?? undefined }, + relations: { + execution: true + }, order: { createdDate: sortOrder === 'DESC' ? 'DESC' : 'ASC' } diff --git a/packages/server/src/utils/getUploadsConfig.ts b/packages/server/src/utils/getUploadsConfig.ts index 96df07a15..937fcab02 100644 --- a/packages/server/src/utils/getUploadsConfig.ts +++ b/packages/server/src/utils/getUploadsConfig.ts @@ -93,22 +93,43 @@ export const utilGetUploadsConfig = async (chatflowid: string): Promise imgUploadAllowedNodes.includes(node.data.name))) { - nodes.forEach((node: IReactFlowNode) => { - const data = node.data - if (data.category === 'Chat Models' && data.inputs?.['allowImageUploads'] === true) { - // TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties - node.data.inputParams.map((param: INodeParams) => { - if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads']) { - imgUploadSizeAndTypes.push({ - fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'), - maxUploadSize: 5 - }) - isImageUploadAllowed = true - } - }) + const isAgentflow = nodes.some((node) => node.data.category === 'Agent Flows') + + if (isAgentflow) { + // check through all the nodes and check if any of the nodes data inputs agentModelConfig or llmModelConfig or conditionAgentModelConfig has allowImageUploads + nodes.forEach((node) => { + if (node.data.category === 'Agent Flows') { + if ( + node.data.inputs?.agentModelConfig?.allowImageUploads || + node.data.inputs?.llmModelConfig?.allowImageUploads || + node.data.inputs?.conditionAgentModelConfig?.allowImageUploads + ) { + imgUploadSizeAndTypes.push({ + fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'), + maxUploadSize: 5 + }) + isImageUploadAllowed = true + } } }) + } else { + if (nodes.some((node) => imgUploadAllowedNodes.includes(node.data.name))) { + nodes.forEach((node: IReactFlowNode) => { + const data = node.data + if (data.category === 'Chat Models' && data.inputs?.['allowImageUploads'] === true) { + // TODO: for now the maxUploadSize is hardcoded to 5MB, we need to add it to the node properties + node.data.inputParams.map((param: INodeParams) => { + if (param.name === 'allowImageUploads' && node.data.inputs?.['allowImageUploads']) { + imgUploadSizeAndTypes.push({ + fileTypes: 'image/gif;image/jpeg;image/png;image/webp;'.split(';'), + maxUploadSize: 5 + }) + isImageUploadAllowed = true + } + }) + } + }) + } } return { diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index cfb70ec1e..48303a911 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -43,6 +43,7 @@ import { randomBytes } from 'crypto' import { AES, enc } from 'crypto-js' import multer from 'multer' import multerS3 from 'multer-s3' +import MulterGoogleCloudStorage from 'multer-cloud-storage' import { ChatFlow } from '../database/entities/ChatFlow' import { ChatMessage } from '../database/entities/ChatMessage' import { Credential } from '../database/entities/Credential' @@ -59,15 +60,15 @@ import { StatusCodes } from 'http-status-codes' import { CreateSecretCommand, GetSecretValueCommand, - PutSecretValueCommand, SecretsManagerClient, SecretsManagerClientConfig } from '@aws-sdk/client-secrets-manager' -const QUESTION_VAR_PREFIX = 'question' -const FILE_ATTACHMENT_PREFIX = 'file_attachment' -const CHAT_HISTORY_VAR_PREFIX = 'chat_history' -const REDACTED_CREDENTIAL_VALUE = '_FLOWISE_BLANK_07167752-1a71-43b1-bf8f-4f32252165db' +export const QUESTION_VAR_PREFIX = 'question' +export const FILE_ATTACHMENT_PREFIX = 'file_attachment' +export const CHAT_HISTORY_VAR_PREFIX = 'chat_history' +export const RUNTIME_MESSAGES_LENGTH_VAR_PREFIX = 'runtime_messages_length' +export const REDACTED_CREDENTIAL_VALUE = '_FLOWISE_BLANK_07167752-1a71-43b1-bf8f-4f32252165db' let secretsManagerClient: SecretsManagerClient | null = null const USE_AWS_SECRETS_MANAGER = process.env.SECRETKEY_STORAGE_TYPE === 'aws' @@ -76,14 +77,17 @@ if (USE_AWS_SECRETS_MANAGER) { const accessKeyId = process.env.SECRETKEY_AWS_ACCESS_KEY const secretAccessKey = process.env.SECRETKEY_AWS_SECRET_KEY - let credentials: SecretsManagerClientConfig['credentials'] | undefined + const secretManagerConfig: SecretsManagerClientConfig = { + region: region + } + if (accessKeyId && secretAccessKey) { - credentials = { + secretManagerConfig.credentials = { accessKeyId, secretAccessKey } } - secretsManagerClient = new SecretsManagerClient({ credentials, region }) + secretsManagerClient = new SecretsManagerClient(secretManagerConfig) } export const databaseEntities: IDatabaseEntity = { @@ -235,6 +239,22 @@ export const getStartingNodes = (graph: INodeDirectedGraph, endNodeId: string) = return { startingNodeIds, depthQueue: depthQueueReversed } } +/** + * Get starting node and check if flow is valid + * @param {INodeDependencies} nodeDependencies + */ +export const getStartingNode = (nodeDependencies: INodeDependencies) => { + // Find starting node + const startingNodeIds = [] as string[] + Object.keys(nodeDependencies).forEach((nodeId) => { + if (nodeDependencies[nodeId] === 0) { + startingNodeIds.push(nodeId) + } + }) + + return { startingNodeIds } +} + /** * Get all connected nodes from startnode * @param {INodeDependencies} graph @@ -760,7 +780,7 @@ export const clearSessionMemory = async ( } } -const getGlobalVariable = async ( +export const getGlobalVariable = async ( overrideConfig?: ICommonObject, availableVariables: IVariable[] = [], variableOverrides: ICommonObject[] = [] @@ -987,7 +1007,6 @@ export const resolveVariables = async ( variableOverrides: ICommonObject[] = [] ): Promise => { let flowNodeData = cloneDeep(reactFlowNodeData) - const types = 'inputs' const getParamValues = async (paramsObj: ICommonObject) => { for (const key in paramsObj) { @@ -1027,7 +1046,7 @@ export const resolveVariables = async ( } } - const paramsObj = flowNodeData[types] ?? {} + const paramsObj = flowNodeData['inputs'] ?? {} await getParamValues(paramsObj) return flowNodeData @@ -1241,7 +1260,8 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component for (const flowNode of reactFlowNodes) { for (const inputParam of flowNode.data.inputParams) { - let obj: IOverrideConfig + let obj: IOverrideConfig | undefined + if (inputParam.type === 'file') { obj = { node: flowNode.data.label, @@ -1282,6 +1302,34 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component } } continue + } else if (inputParam.type === 'array') { + // get array item schema + const arrayItem = inputParam.array + if (Array.isArray(arrayItem)) { + const arraySchema = [] + // Each array item is a field definition + for (const item of arrayItem) { + let itemType = item.type + if (itemType === 'options') { + const availableOptions = item.options?.map((option) => option.name).join(', ') + itemType = `(${availableOptions})` + } else if (itemType === 'file') { + itemType = item.fileType ?? item.type + } + arraySchema.push({ + name: item.name, + type: itemType + }) + } + obj = { + node: flowNode.data.label, + nodeId: flowNode.data.id, + label: inputParam.label, + name: inputParam.name, + type: inputParam.type, + schema: arraySchema + } + } } else { obj = { node: flowNode.data.label, @@ -1291,7 +1339,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component type: inputParam.type === 'password' ? 'string' : inputParam.type } } - if (!configs.some((config) => JSON.stringify(config) === JSON.stringify(obj))) { + if (obj && !configs.some((config) => JSON.stringify(config) === JSON.stringify(obj))) { configs.push(obj) } } @@ -1391,6 +1439,29 @@ export const getEncryptionKey = async (): Promise => { if (process.env.FLOWISE_SECRETKEY_OVERWRITE !== undefined && process.env.FLOWISE_SECRETKEY_OVERWRITE !== '') { return process.env.FLOWISE_SECRETKEY_OVERWRITE } + if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { + const secretId = process.env.SECRETKEY_AWS_NAME || 'FlowiseEncryptionKey' + try { + const command = new GetSecretValueCommand({ SecretId: secretId }) + const response = await secretsManagerClient.send(command) + + if (response.SecretString) { + return response.SecretString + } + } catch (error: any) { + if (error.name === 'ResourceNotFoundException') { + // Secret doesn't exist, create it + const newKey = generateEncryptKey() + const createCommand = new CreateSecretCommand({ + Name: secretId, + SecretString: newKey + }) + await secretsManagerClient.send(createCommand) + return newKey + } + throw error + } + } try { return await fs.promises.readFile(getEncryptionKeyPath(), 'utf8') } catch (error) { @@ -1409,39 +1480,7 @@ export const getEncryptionKey = async (): Promise => { * @returns {Promise} */ export const encryptCredentialData = async (plainDataObj: ICredentialDataDecrypted): Promise => { - if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { - const secretName = `FlowiseCredential_${randomBytes(12).toString('hex')}` - - logger.info(`[server]: Upserting AWS Secret: ${secretName}`) - - const secretString = JSON.stringify({ ...plainDataObj }) - - try { - // Try to update the secret if it exists - const putCommand = new PutSecretValueCommand({ - SecretId: secretName, - SecretString: secretString - }) - await secretsManagerClient.send(putCommand) - } catch (error: any) { - if (error.name === 'ResourceNotFoundException') { - // Secret doesn't exist, so create it - const createCommand = new CreateSecretCommand({ - Name: secretName, - SecretString: secretString - }) - await secretsManagerClient.send(createCommand) - } else { - // Rethrow any other errors - throw error - } - } - return secretName - } - const encryptKey = await getEncryptionKey() - - // Fallback to existing code return AES.encrypt(JSON.stringify(plainDataObj), encryptKey).toString() } @@ -1462,14 +1501,20 @@ export const decryptCredentialData = async ( if (USE_AWS_SECRETS_MANAGER && secretsManagerClient) { try { logger.info(`[server]: Reading AWS Secret: ${encryptedData}`) - const command = new GetSecretValueCommand({ SecretId: encryptedData }) - const response = await secretsManagerClient.send(command) + if (encryptedData.startsWith('FlowiseCredential_')) { + const command = new GetSecretValueCommand({ SecretId: encryptedData }) + const response = await secretsManagerClient.send(command) - if (response.SecretString) { - const secretObj = JSON.parse(response.SecretString) - decryptedDataStr = JSON.stringify(secretObj) + if (response.SecretString) { + const secretObj = JSON.parse(response.SecretString) + decryptedDataStr = JSON.stringify(secretObj) + } else { + throw new Error('Failed to retrieve secret value.') + } } else { - throw new Error('Failed to retrieve secret value.') + const encryptKey = await getEncryptionKey() + const decryptedData = AES.decrypt(encryptedData, encryptKey) + decryptedDataStr = decryptedData.toString(enc.Utf8) } } catch (error) { console.error(error) @@ -1800,7 +1845,84 @@ export const getMulterStorage = () => { }) }) return upload + } else if (storageType === 'gcs') { + return multer({ + storage: new MulterGoogleCloudStorage({ + projectId: process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID, + bucket: process.env.GOOGLE_CLOUD_STORAGE_BUCKET_NAME, + keyFilename: process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL, + uniformBucketLevelAccess: Boolean(process.env.GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS) ?? true, + destination: `uploads/${getOrgId()}` + }) + }) } else { return multer({ dest: getUploadPath() }) } } + +/** + * Calculate depth of each node from starting nodes + * @param {INodeDirectedGraph} graph + * @param {string[]} startingNodeIds + * @returns {Record} Map of nodeId to its depth + */ +export const calculateNodesDepth = (graph: INodeDirectedGraph, startingNodeIds: string[]): Record => { + const depths: Record = {} + const visited = new Set() + + // Initialize all nodes with depth -1 (unvisited) + for (const nodeId in graph) { + depths[nodeId] = -1 + } + + // BFS queue with [nodeId, depth] + const queue: [string, number][] = startingNodeIds.map((id) => [id, 0]) + + // Set starting nodes depth to 0 + startingNodeIds.forEach((id) => { + depths[id] = 0 + }) + + while (queue.length > 0) { + const [currentNode, currentDepth] = queue.shift()! + + if (visited.has(currentNode)) continue + visited.add(currentNode) + + // Process all neighbors + for (const neighbor of graph[currentNode]) { + if (!visited.has(neighbor)) { + // Update depth if unvisited or found shorter path + if (depths[neighbor] === -1 || depths[neighbor] > currentDepth + 1) { + depths[neighbor] = currentDepth + 1 + } + queue.push([neighbor, currentDepth + 1]) + } + } + } + + return depths +} + +/** + * Helper function to get all nodes in a path starting from a node + * @param {INodeDirectedGraph} graph + * @param {string} startNode + * @returns {string[]} + */ +export const getAllNodesInPath = (startNode: string, graph: INodeDirectedGraph): string[] => { + const nodes = new Set() + const queue = [startNode] + + while (queue.length > 0) { + const current = queue.shift()! + if (nodes.has(current)) continue + + nodes.add(current) + if (graph[current]) { + queue.push(...graph[current]) + } + } + + return Array.from(nodes) +} diff --git a/packages/server/src/utils/logger.ts b/packages/server/src/utils/logger.ts index 207f7ce34..7ad5b58a4 100644 --- a/packages/server/src/utils/logger.ts +++ b/packages/server/src/utils/logger.ts @@ -5,6 +5,7 @@ import config from './config' // should be replaced by node-config or similar import { createLogger, transports, format } from 'winston' import { NextFunction, Request, Response } from 'express' import { S3ClientConfig } from '@aws-sdk/client-s3' +import { LoggingWinston } from '@google-cloud/logging-winston' const { S3StreamLogger } = require('s3-streamlogger') @@ -13,6 +14,11 @@ const { combine, timestamp, printf, errors } = format let s3ServerStream: any let s3ErrorStream: any let s3ServerReqStream: any + +let gcsServerStream: any +let gcsErrorStream: any +let gcsServerReqStream: any + if (process.env.STORAGE_TYPE === 's3') { const accessKeyId = process.env.S3_STORAGE_ACCESS_KEY_ID const secretAccessKey = process.env.S3_STORAGE_SECRET_ACCESS_KEY @@ -21,13 +27,20 @@ if (process.env.STORAGE_TYPE === 's3') { const customURL = process.env.S3_ENDPOINT_URL const forcePathStyle = process.env.S3_FORCE_PATH_STYLE === 'true' + if (!region || !s3Bucket) { + throw new Error('S3 storage configuration is missing') + } + const s3Config: S3ClientConfig = { region: region, endpoint: customURL, - forcePathStyle: forcePathStyle, - credentials: { - accessKeyId: accessKeyId as string, - secretAccessKey: secretAccessKey as string + forcePathStyle: forcePathStyle + } + + if (accessKeyId && secretAccessKey) { + s3Config.credentials = { + accessKeyId: accessKeyId, + secretAccessKey: secretAccessKey } } @@ -53,6 +66,29 @@ if (process.env.STORAGE_TYPE === 's3') { }) } +if (process.env.STORAGE_TYPE === 'gcs') { + const config = { + projectId: process.env.GOOGLE_CLOUD_STORAGE_PROJ_ID, + keyFilename: process.env.GOOGLE_CLOUD_STORAGE_CREDENTIAL, + defaultCallback: (err: any) => { + if (err) { + console.error('Error logging to GCS: ' + err) + } + } + } + gcsServerStream = new LoggingWinston({ + ...config, + logName: 'server' + }) + gcsErrorStream = new LoggingWinston({ + ...config, + logName: 'error' + }) + gcsServerReqStream = new LoggingWinston({ + ...config, + logName: 'requests' + }) +} // expect the log dir be relative to the projects root const logDir = config.logging.dir @@ -94,7 +130,8 @@ const logger = createLogger({ stream: s3ServerStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsServerStream] : []) ], exceptionHandlers: [ ...(!process.env.STORAGE_TYPE || process.env.STORAGE_TYPE === 'local' @@ -110,7 +147,8 @@ const logger = createLogger({ stream: s3ErrorStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsErrorStream] : []) ], rejectionHandlers: [ ...(!process.env.STORAGE_TYPE || process.env.STORAGE_TYPE === 'local' @@ -126,12 +164,13 @@ const logger = createLogger({ stream: s3ErrorStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsErrorStream] : []) ] }) export function expressRequestLogger(req: Request, res: Response, next: NextFunction): void { - const unwantedLogURLs = ['/api/v1/node-icon/', '/api/v1/components-credentials-icon/'] + const unwantedLogURLs = ['/api/v1/node-icon/', '/api/v1/components-credentials-icon/', '/api/v1/ping'] if (/\/api\/v1\//i.test(req.url) && !unwantedLogURLs.some((url) => new RegExp(url, 'i').test(req.url))) { const fileLogger = createLogger({ format: combine(timestamp({ format: 'YYYY-MM-DD HH:mm:ss' }), format.json(), errors({ stack: true })), @@ -161,7 +200,8 @@ export function expressRequestLogger(req: Request, res: Response, next: NextFunc stream: s3ServerReqStream }) ] - : []) + : []), + ...(process.env.STORAGE_TYPE === 'gcs' ? [gcsServerReqStream] : []) ] }) diff --git a/packages/server/src/utils/rateLimit.ts b/packages/server/src/utils/rateLimit.ts index 4e8e9db4b..d4dd168a6 100644 --- a/packages/server/src/utils/rateLimit.ts +++ b/packages/server/src/utils/rateLimit.ts @@ -24,7 +24,12 @@ export class RateLimiterManager { constructor() { if (process.env.MODE === MODE.QUEUE) { if (process.env.REDIS_URL) { - this.redisClient = new Redis(process.env.REDIS_URL) + this.redisClient = new Redis(process.env.REDIS_URL, { + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined + }) } else { this.redisClient = new Redis({ host: process.env.REDIS_HOST || 'localhost', @@ -38,6 +43,10 @@ export class RateLimiterManager { key: process.env.REDIS_KEY ? Buffer.from(process.env.REDIS_KEY, 'base64') : undefined, ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined } + : undefined, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) : undefined }) } @@ -65,7 +74,13 @@ export class RateLimiterManager { port: parseInt(process.env.REDIS_PORT || '6379'), username: process.env.REDIS_USERNAME || undefined, password: process.env.REDIS_PASSWORD || undefined, - tls: tlsOpts + tls: tlsOpts, + maxRetriesPerRequest: null, + enableReadyCheck: true, + keepAlive: + process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10)) + ? parseInt(process.env.REDIS_KEEP_ALIVE, 10) + : undefined } } diff --git a/packages/server/src/utils/telemetry.ts b/packages/server/src/utils/telemetry.ts index 4b033f209..cd26c8c93 100644 --- a/packages/server/src/utils/telemetry.ts +++ b/packages/server/src/utils/telemetry.ts @@ -8,8 +8,8 @@ export class Telemetry { postHog?: PostHog constructor() { - if (process.env.DISABLE_FLOWISE_TELEMETRY !== 'true') { - this.postHog = new PostHog('phc_jEDuFYnOnuXsws986TLWzuisbRjwFqTl9JL8tDMgqme') + if (process.env.POSTHOG_PUBLIC_API_KEY) { + this.postHog = new PostHog(process.env.POSTHOG_PUBLIC_API_KEY) } else { this.postHog = undefined } diff --git a/packages/ui/README-ZH.md b/packages/ui/README-ZH.md index e34c3de0a..4f6211d68 100644 --- a/packages/ui/README-ZH.md +++ b/packages/ui/README-ZH.md @@ -6,7 +6,7 @@ Flowise 的 React 前端界面。 -![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true) +![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true) 安装: diff --git a/packages/ui/README.md b/packages/ui/README.md index b476d47f5..7ec334d71 100644 --- a/packages/ui/README.md +++ b/packages/ui/README.md @@ -6,7 +6,7 @@ English | [中文](./README-ZH.md) React frontend ui for Flowise. -![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true) +![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true) Install: diff --git a/packages/ui/index.html b/packages/ui/index.html index 64875b7bd..1992cea06 100644 --- a/packages/ui/index.html +++ b/packages/ui/index.html @@ -1,14 +1,17 @@ - Flowise - Low-code LLM apps builder + Flowise - Build AI Agents, Visually - - + + @@ -16,13 +19,19 @@ - - + + - - + + diff --git a/packages/ui/package.json b/packages/ui/package.json index bddc9e6ea..a29bccc55 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "2.2.5", + "version": "3.0.0", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { @@ -14,17 +14,25 @@ "@emotion/cache": "^11.4.0", "@emotion/react": "^11.10.6", "@emotion/styled": "^11.10.6", + "@lezer/highlight": "^1.2.1", "@microsoft/fetch-event-source": "^2.0.1", "@mui/base": "5.0.0-beta.40", "@mui/icons-material": "5.0.3", "@mui/lab": "5.0.0-alpha.156", "@mui/material": "5.15.0", + "@mui/system": "^6.4.3", "@mui/x-data-grid": "6.8.0", - "@tabler/icons-react": "^3.3.0", + "@mui/x-tree-view": "^7.25.0", + "@tabler/icons-react": "^3.30.0", + "@tiptap/extension-mention": "^2.11.5", + "@tiptap/extension-placeholder": "^2.11.5", + "@tiptap/pm": "^2.11.5", + "@tiptap/react": "^2.11.5", + "@tiptap/starter-kit": "^2.11.5", "@uiw/codemirror-theme-sublime": "^4.21.21", "@uiw/codemirror-theme-vscode": "^4.21.21", "@uiw/react-codemirror": "^4.21.21", - "axios": "1.6.2", + "axios": "1.7.9", "clsx": "^1.1.1", "dotenv": "^16.0.0", "flowise-embed": "latest", @@ -39,7 +47,7 @@ "notistack": "^2.0.4", "prop-types": "^15.7.2", "react": "^18.2.0", - "react-code-blocks": "^0.0.9-0", + "react-code-blocks": "^0.1.6", "react-color": "^2.19.3", "react-datepicker": "^4.21.0", "react-device-detect": "^1.17.0", @@ -47,6 +55,7 @@ "react-markdown": "^8.0.6", "react-perfect-scrollbar": "^1.5.8", "react-redux": "^8.0.5", + "react-rewards": "^2.1.0", "react-router": "~6.3.0", "react-router-dom": "~6.3.0", "react-syntax-highlighter": "^15.5.0", @@ -56,6 +65,7 @@ "rehype-raw": "^7.0.0", "remark-gfm": "^3.0.1", "remark-math": "^5.1.1", + "tippy.js": "^6.3.7", "uuid": "^9.0.1", "yup": "^0.32.9" }, diff --git a/packages/ui/public/favicon-16x16.png b/packages/ui/public/favicon-16x16.png index c056f6a9a..73e3e1fcb 100644 Binary files a/packages/ui/public/favicon-16x16.png and b/packages/ui/public/favicon-16x16.png differ diff --git a/packages/ui/public/favicon-32x32.png b/packages/ui/public/favicon-32x32.png index 857abed06..4c1d7f3ab 100644 Binary files a/packages/ui/public/favicon-32x32.png and b/packages/ui/public/favicon-32x32.png differ diff --git a/packages/ui/public/favicon.ico b/packages/ui/public/favicon.ico index ee99d2664..df8dba4c1 100644 Binary files a/packages/ui/public/favicon.ico and b/packages/ui/public/favicon.ico differ diff --git a/packages/ui/public/index.html b/packages/ui/public/index.html index 4f8cd033c..fd8636463 100644 --- a/packages/ui/public/index.html +++ b/packages/ui/public/index.html @@ -1,15 +1,21 @@ - Flowise - Low-code LLM apps builder + Flowise - Build AI Agents, Visually - - - + + + @@ -17,19 +23,19 @@ - + - + diff --git a/packages/ui/src/ErrorBoundary.jsx b/packages/ui/src/ErrorBoundary.jsx index bbe4fafe3..9745013fa 100644 --- a/packages/ui/src/ErrorBoundary.jsx +++ b/packages/ui/src/ErrorBoundary.jsx @@ -16,7 +16,7 @@ const ErrorBoundary = ({ error }) => { Oh snap! - The following error occured when loading this page. + The following error occurred when loading this page. @@ -27,7 +27,7 @@ const ErrorBoundary = ({ error }) => { > -
+                        
                             {`Status: ${error.response.status}`}
                             
{error.response.data.message} diff --git a/packages/ui/src/api/chatflows.js b/packages/ui/src/api/chatflows.js index 6bdd61285..8bd47f4f7 100644 --- a/packages/ui/src/api/chatflows.js +++ b/packages/ui/src/api/chatflows.js @@ -2,7 +2,7 @@ import client from './client' const getAllChatflows = () => client.get('/chatflows?type=CHATFLOW') -const getAllAgentflows = () => client.get('/chatflows?type=MULTIAGENT') +const getAllAgentflows = (type) => client.get(`/chatflows?type=${type}`) const getSpecificChatflow = (id) => client.get(`/chatflows/${id}`) @@ -20,6 +20,8 @@ const getIsChatflowStreaming = (id) => client.get(`/chatflows-streaming/${id}`) const getAllowChatflowUploads = (id) => client.get(`/chatflows-uploads/${id}`) +const generateAgentflow = (body) => client.post(`/agentflowv2-generator/generate`, body) + export default { getAllChatflows, getAllAgentflows, @@ -30,5 +32,6 @@ export default { updateChatflow, deleteChatflow, getIsChatflowStreaming, - getAllowChatflowUploads + getAllowChatflowUploads, + generateAgentflow } diff --git a/packages/ui/src/api/executions.js b/packages/ui/src/api/executions.js new file mode 100644 index 000000000..9135fa7cd --- /dev/null +++ b/packages/ui/src/api/executions.js @@ -0,0 +1,15 @@ +import client from './client' + +const getAllExecutions = (params = {}) => client.get('/executions', { params }) +const deleteExecutions = (executionIds) => client.delete('/executions', { data: { executionIds } }) +const getExecutionById = (executionId) => client.get(`/executions/${executionId}`) +const getExecutionByIdPublic = (executionId) => client.get(`/public-executions/${executionId}`) +const updateExecution = (executionId, body) => client.put(`/executions/${executionId}`, body) + +export default { + getAllExecutions, + deleteExecutions, + getExecutionById, + getExecutionByIdPublic, + updateExecution +} diff --git a/packages/ui/src/api/prediction.js b/packages/ui/src/api/prediction.js index 207d222f1..2219b1792 100644 --- a/packages/ui/src/api/prediction.js +++ b/packages/ui/src/api/prediction.js @@ -2,8 +2,10 @@ import client from './client' const sendMessageAndGetPrediction = (id, input) => client.post(`/internal-prediction/${id}`, input) const sendMessageAndStreamPrediction = (id, input) => client.post(`/internal-prediction/stream/${id}`, input) +const sendMessageAndGetPredictionPublic = (id, input) => client.post(`/prediction/${id}`, input) export default { sendMessageAndGetPrediction, - sendMessageAndStreamPrediction + sendMessageAndStreamPrediction, + sendMessageAndGetPredictionPublic } diff --git a/packages/ui/src/api/validation.js b/packages/ui/src/api/validation.js new file mode 100644 index 000000000..78999cd6d --- /dev/null +++ b/packages/ui/src/api/validation.js @@ -0,0 +1,7 @@ +import client from './client' + +const checkValidation = (id) => client.get(`/validation/${id}`) + +export default { + checkValidation +} diff --git a/packages/ui/src/assets/images/agentflow-generator.gif b/packages/ui/src/assets/images/agentflow-generator.gif new file mode 100644 index 000000000..de0614d95 Binary files /dev/null and b/packages/ui/src/assets/images/agentflow-generator.gif differ diff --git a/packages/ui/src/assets/images/executions_empty.svg b/packages/ui/src/assets/images/executions_empty.svg new file mode 100644 index 000000000..ef946b5f3 --- /dev/null +++ b/packages/ui/src/assets/images/executions_empty.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/ui/src/assets/images/flowise_dark.svg b/packages/ui/src/assets/images/flowise_dark.svg new file mode 100644 index 000000000..f5c0725fa --- /dev/null +++ b/packages/ui/src/assets/images/flowise_dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/ui/src/assets/images/flowise_white.svg b/packages/ui/src/assets/images/flowise_white.svg new file mode 100644 index 000000000..2a93a7449 --- /dev/null +++ b/packages/ui/src/assets/images/flowise_white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/ui/src/assets/images/key.svg b/packages/ui/src/assets/images/key.svg new file mode 100644 index 000000000..00b240296 --- /dev/null +++ b/packages/ui/src/assets/images/key.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/ui/src/assets/images/opik.png b/packages/ui/src/assets/images/opik.png new file mode 100644 index 000000000..20de0c39d Binary files /dev/null and b/packages/ui/src/assets/images/opik.png differ diff --git a/packages/ui/src/assets/images/tool.svg b/packages/ui/src/assets/images/tool.svg new file mode 100644 index 000000000..e8947d469 --- /dev/null +++ b/packages/ui/src/assets/images/tool.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/ui/src/assets/images/validate_empty.svg b/packages/ui/src/assets/images/validate_empty.svg new file mode 100644 index 000000000..f3cc2039e --- /dev/null +++ b/packages/ui/src/assets/images/validate_empty.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/ui/src/assets/scss/_themes-vars.module.scss b/packages/ui/src/assets/scss/_themes-vars.module.scss index 6304b7071..7235b8447 100644 --- a/packages/ui/src/assets/scss/_themes-vars.module.scss +++ b/packages/ui/src/assets/scss/_themes-vars.module.scss @@ -46,6 +46,7 @@ $grey50: #fafafa; $grey100: #f5f5f5; $grey200: #eeeeee; $grey300: #e0e0e0; +$grey400: #c4c4c4; $grey500: #9e9e9e; $grey600: #757575; $grey700: #616161; @@ -134,6 +135,7 @@ $darkTextSecondary: #8492c4; grey100: $grey100; grey200: $grey200; grey300: $grey300; + grey400: $grey400; grey500: $grey500; grey600: $grey600; grey700: $grey700; diff --git a/packages/ui/src/assets/scss/style.scss b/packages/ui/src/assets/scss/style.scss index 901c919ab..52e700403 100644 --- a/packages/ui/src/assets/scss/style.scss +++ b/packages/ui/src/assets/scss/style.scss @@ -120,3 +120,29 @@ transform: translateX(10px); } } + +.tiptap { + .variable { + background-color: #b3f0b8; + border-radius: 0.4rem; + box-decoration-break: clone; + color: #0d7115; + padding: 0.1rem 0.3rem; + &::after { + content: '\200B'; + } + } +} + +.spin-animation { + animation: spin 1s linear infinite; +} + +@keyframes spin { + from { + transform: rotate(0deg); + } + to { + transform: rotate(360deg); + } +} diff --git a/packages/ui/src/config.js b/packages/ui/src/config.js index 34e5f157a..4f0aab10e 100644 --- a/packages/ui/src/config.js +++ b/packages/ui/src/config.js @@ -2,7 +2,8 @@ const config = { // basename: only at build time to set, and Don't add '/' at end off BASENAME for breadcrumbs, also Don't put only '/' use blank('') instead, basename: '', defaultPath: '/chatflows', - fontFamily: `'Roboto', sans-serif`, + // You can specify multiple fallback fonts + fontFamily: `'Inter', 'Roboto', 'Arial', sans-serif`, borderRadius: 12 } diff --git a/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx b/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx index 5caec1463..5e008be92 100644 --- a/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx +++ b/packages/ui/src/layout/MainLayout/Header/ProfileSection/index.jsx @@ -3,8 +3,8 @@ import { exportData, stringify } from '@/utils/exportImport' import useNotifier from '@/utils/useNotifier' import PropTypes from 'prop-types' import { useEffect, useRef, useState } from 'react' -import { useDispatch, useSelector } from 'react-redux' import { createPortal } from 'react-dom' +import { useDispatch, useSelector } from 'react-redux' // material-ui import { @@ -12,22 +12,22 @@ import { Box, Button, ButtonBase, + Checkbox, ClickAwayListener, + Dialog, + DialogActions, + DialogContent, + DialogTitle, Divider, + FormControlLabel, List, ListItemButton, ListItemIcon, ListItemText, Paper, Popper, - Typography, - Dialog, - DialogTitle, - DialogContent, Stack, - FormControlLabel, - Checkbox, - DialogActions + Typography } from '@mui/material' import { useTheme } from '@mui/material/styles' @@ -40,9 +40,9 @@ import AboutDialog from '@/ui-component/dialog/AboutDialog' import Transitions from '@/ui-component/extended/Transitions' // assets +import ExportingGIF from '@/assets/images/Exporting.gif' import { IconFileExport, IconFileUpload, IconInfoCircle, IconLogout, IconSettings, IconX } from '@tabler/icons-react' import './index.css' -import ExportingGIF from '@/assets/images/Exporting.gif' //API import exportImportApi from '@/api/exportimport' @@ -52,12 +52,26 @@ import useApi from '@/hooks/useApi' import { getErrorMessage } from '@/utils/errorHandler' import { useNavigate } from 'react-router-dom' -const dataToExport = ['Chatflows', 'Agentflows', 'Tools', 'Variables', 'Assistants'] +const dataToExport = [ + 'Agentflows', + 'Agentflows V2', + 'Assistants Custom', + 'Assistants OpenAI', + 'Assistants Azure', + 'Chatflows', + 'Chat Messages', + 'Chat Feedbacks', + 'Custom Templates', + 'Document Stores', + 'Executions', + 'Tools', + 'Variables' +] const ExportDialog = ({ show, onCancel, onExport }) => { const portalElement = document.getElementById('portal') - const [selectedData, setSelectedData] = useState(['Chatflows', 'Agentflows', 'Tools', 'Variables', 'Assistants']) + const [selectedData, setSelectedData] = useState(dataToExport) const [isExporting, setIsExporting] = useState(false) useEffect(() => { @@ -243,11 +257,19 @@ const ProfileSection = ({ username, handleLogout }) => { const onExport = (data) => { const body = {} - if (data.includes('Chatflows')) body.chatflow = true if (data.includes('Agentflows')) body.agentflow = true + if (data.includes('Agentflows V2')) body.agentflowv2 = true + if (data.includes('Assistants Custom')) body.assistantCustom = true + if (data.includes('Assistants OpenAI')) body.assistantOpenAI = true + if (data.includes('Assistants Azure')) body.assistantAzure = true + if (data.includes('Chatflows')) body.chatflow = true + if (data.includes('Chat Messages')) body.chat_message = true + if (data.includes('Chat Feedbacks')) body.chat_feedback = true + if (data.includes('Custom Templates')) body.custom_template = true + if (data.includes('Document Stores')) body.document_store = true + if (data.includes('Executions')) body.execution = true if (data.includes('Tools')) body.tool = true if (data.includes('Variables')) body.variable = true - if (data.includes('Assistants')) body.assistant = true exportAllApi.request(body) } diff --git a/packages/ui/src/layout/MainLayout/ViewHeader.jsx b/packages/ui/src/layout/MainLayout/ViewHeader.jsx index d6c10e819..2be32d8a5 100644 --- a/packages/ui/src/layout/MainLayout/ViewHeader.jsx +++ b/packages/ui/src/layout/MainLayout/ViewHeader.jsx @@ -54,7 +54,7 @@ const ViewHeader = ({ import('@/views/canvas'))) const MarketplaceCanvas = Loadable(lazy(() => import('@/views/marketplaces/MarketplaceCanvas'))) +const CanvasV2 = Loadable(lazy(() => import('@/views/agentflowsv2/Canvas'))) +const MarketplaceCanvasV2 = Loadable(lazy(() => import('@/views/agentflowsv2/MarketplaceCanvas'))) // ==============================|| CANVAS ROUTING ||============================== // @@ -30,9 +32,21 @@ const CanvasRoutes = { path: '/agentcanvas/:id', element: }, + { + path: '/v2/agentcanvas', + element: + }, + { + path: '/v2/agentcanvas/:id', + element: + }, { path: '/marketplace/:id', element: + }, + { + path: '/v2/marketplace/:id', + element: } ] } diff --git a/packages/ui/src/routes/ExecutionRoutes.jsx b/packages/ui/src/routes/ExecutionRoutes.jsx new file mode 100644 index 000000000..a70232e3a --- /dev/null +++ b/packages/ui/src/routes/ExecutionRoutes.jsx @@ -0,0 +1,23 @@ +import { lazy } from 'react' + +// project imports +import Loadable from '@/ui-component/loading/Loadable' +import MinimalLayout from '@/layout/MinimalLayout' + +// canvas routing +const PublicExecutionDetails = Loadable(lazy(() => import('@/views/agentexecutions/PublicExecutionDetails'))) + +// ==============================|| CANVAS ROUTING ||============================== // + +const ExecutionRoutes = { + path: '/', + element: , + children: [ + { + path: '/execution/:id', + element: + } + ] +} + +export default ExecutionRoutes diff --git a/packages/ui/src/routes/MainRoutes.jsx b/packages/ui/src/routes/MainRoutes.jsx index b4b9bbe4a..f50873de3 100644 --- a/packages/ui/src/routes/MainRoutes.jsx +++ b/packages/ui/src/routes/MainRoutes.jsx @@ -39,6 +39,9 @@ const LoaderConfigPreviewChunks = Loadable(lazy(() => import('@/views/docstore/L const VectorStoreConfigure = Loadable(lazy(() => import('@/views/docstore/VectorStoreConfigure'))) const VectorStoreQuery = Loadable(lazy(() => import('@/views/docstore/VectorStoreQuery'))) +// execution routing +const Executions = Loadable(lazy(() => import('@/views/agentexecutions'))) + // ==============================|| MAIN ROUTING ||============================== // const MainRoutes = { @@ -57,6 +60,10 @@ const MainRoutes = { path: '/agentflows', element: }, + { + path: '/executions', + element: + }, { path: '/marketplaces', element: diff --git a/packages/ui/src/routes/index.jsx b/packages/ui/src/routes/index.jsx index f0d9d5e96..3d40f2f9a 100644 --- a/packages/ui/src/routes/index.jsx +++ b/packages/ui/src/routes/index.jsx @@ -4,10 +4,11 @@ import { useRoutes } from 'react-router-dom' import MainRoutes from './MainRoutes' import CanvasRoutes from './CanvasRoutes' import ChatbotRoutes from './ChatbotRoutes' +import ExecutionRoutes from './ExecutionRoutes' import config from '@/config' // ==============================|| ROUTING RENDER ||============================== // export default function ThemeRoutes() { - return useRoutes([MainRoutes, CanvasRoutes, ChatbotRoutes], config.basename) + return useRoutes([MainRoutes, CanvasRoutes, ChatbotRoutes, ExecutionRoutes], config.basename) } diff --git a/packages/ui/src/store/constant.js b/packages/ui/src/store/constant.js index de700ebe1..15a468955 100644 --- a/packages/ui/src/store/constant.js +++ b/packages/ui/src/store/constant.js @@ -1,4 +1,22 @@ // constant +import { + IconLibrary, + IconTools, + IconFunctionFilled, + IconMessageCircleFilled, + IconRobot, + IconArrowsSplit, + IconPlayerPlayFilled, + IconSparkles, + IconReplaceUser, + IconRepeat, + IconSubtask, + IconNote, + IconWorld, + IconRelationOneToManyFilled, + IconVectorBezier2 +} from '@tabler/icons-react' + export const gridSpacing = 3 export const drawerWidth = 260 export const appDrawerWidth = 320 @@ -8,3 +26,80 @@ export const baseURL = import.meta.env.VITE_API_BASE_URL || window.location.orig export const uiBaseURL = import.meta.env.VITE_UI_BASE_URL || window.location.origin export const FLOWISE_CREDENTIAL_ID = 'FLOWISE_CREDENTIAL_ID' export const REDACTED_CREDENTIAL_VALUE = '_FLOWISE_BLANK_07167752-1a71-43b1-bf8f-4f32252165db' +export const AGENTFLOW_ICONS = [ + { + name: 'conditionAgentflow', + icon: IconArrowsSplit, + color: '#FFB938' + }, + { + name: 'startAgentflow', + icon: IconPlayerPlayFilled, + color: '#7EE787' + }, + { + name: 'llmAgentflow', + icon: IconSparkles, + color: '#64B5F6' + }, + { + name: 'agentAgentflow', + icon: IconRobot, + color: '#4DD0E1' + }, + { + name: 'humanInputAgentflow', + icon: IconReplaceUser, + color: '#6E6EFD' + }, + { + name: 'loopAgentflow', + icon: IconRepeat, + color: '#FFA07A' + }, + { + name: 'directReplyAgentflow', + icon: IconMessageCircleFilled, + color: '#4DDBBB' + }, + { + name: 'customFunctionAgentflow', + icon: IconFunctionFilled, + color: '#E4B7FF' + }, + { + name: 'toolAgentflow', + icon: IconTools, + color: '#d4a373' + }, + { + name: 'retrieverAgentflow', + icon: IconLibrary, + color: '#b8bedd' + }, + { + name: 'conditionAgentAgentflow', + icon: IconSubtask, + color: '#ff8fab' + }, + { + name: 'stickyNoteAgentflow', + icon: IconNote, + color: '#fee440' + }, + { + name: 'httpAgentflow', + icon: IconWorld, + color: '#FF7F7F' + }, + { + name: 'iterationAgentflow', + icon: IconRelationOneToManyFilled, + color: '#9C89B8' + }, + { + name: 'executeFlowAgentflow', + icon: IconVectorBezier2, + color: '#a3b18a' + } +] diff --git a/packages/ui/src/store/context/ReactFlowContext.jsx b/packages/ui/src/store/context/ReactFlowContext.jsx index 99f371386..d59f28d50 100644 --- a/packages/ui/src/store/context/ReactFlowContext.jsx +++ b/packages/ui/src/store/context/ReactFlowContext.jsx @@ -1,8 +1,8 @@ import { createContext, useState } from 'react' import { useDispatch } from 'react-redux' import PropTypes from 'prop-types' -import { getUniqueNodeId } from '@/utils/genericHelper' -import { cloneDeep } from 'lodash' +import { getUniqueNodeId, showHideInputParams } from '@/utils/genericHelper' +import { cloneDeep, isEqual } from 'lodash' import { SET_DIRTY } from '@/store/actions' const initialValue = { @@ -10,7 +10,8 @@ const initialValue = { setReactFlowInstance: () => {}, duplicateNode: () => {}, deleteNode: () => {}, - deleteEdge: () => {} + deleteEdge: () => {}, + onNodeDataChange: () => {} } export const flowContext = createContext(initialValue) @@ -19,10 +20,112 @@ export const ReactFlowContext = ({ children }) => { const dispatch = useDispatch() const [reactFlowInstance, setReactFlowInstance] = useState(null) + const onAgentflowNodeStatusUpdate = ({ nodeId, status, error }) => { + reactFlowInstance.setNodes((nds) => + nds.map((node) => { + if (node.id === nodeId) { + node.data = { + ...node.data, + status, + error + } + } + return node + }) + ) + } + + const clearAgentflowNodeStatus = () => { + reactFlowInstance.setNodes((nds) => + nds.map((node) => { + node.data = { + ...node.data, + status: undefined, + error: undefined + } + return node + }) + ) + } + + const onNodeDataChange = ({ nodeId, inputParam, newValue }) => { + const updatedNodes = reactFlowInstance.getNodes().map((node) => { + if (node.id === nodeId) { + const updatedInputs = { ...node.data.inputs } + + updatedInputs[inputParam.name] = newValue + + const updatedInputParams = showHideInputParams({ + ...node.data, + inputs: updatedInputs + }) + + // Remove inputs with display set to false + Object.keys(updatedInputs).forEach((key) => { + const input = updatedInputParams.find((param) => param.name === key) + if (input && input.display === false) { + delete updatedInputs[key] + } + }) + + return { + ...node, + data: { + ...node.data, + inputParams: updatedInputParams, + inputs: updatedInputs + } + } + } + return node + }) + + // Check if any node's inputParams have changed before updating + const hasChanges = updatedNodes.some( + (node, index) => !isEqual(node.data.inputParams, reactFlowInstance.getNodes()[index].data.inputParams) + ) + + if (hasChanges) { + reactFlowInstance.setNodes(updatedNodes) + } + } + const deleteNode = (nodeid) => { deleteConnectedInput(nodeid, 'node') - reactFlowInstance.setNodes(reactFlowInstance.getNodes().filter((n) => n.id !== nodeid)) - reactFlowInstance.setEdges(reactFlowInstance.getEdges().filter((ns) => ns.source !== nodeid && ns.target !== nodeid)) + + // Gather all nodes to be deleted (parent and all descendants) + const nodesToDelete = new Set() + + // Helper function to collect all descendant nodes recursively + const collectDescendants = (parentId) => { + const childNodes = reactFlowInstance.getNodes().filter((node) => node.parentNode === parentId) + + childNodes.forEach((childNode) => { + nodesToDelete.add(childNode.id) + collectDescendants(childNode.id) + }) + } + + // Collect all descendants first + collectDescendants(nodeid) + + // Add the parent node itself last + nodesToDelete.add(nodeid) + + // Clean up inputs for all nodes to be deleted + nodesToDelete.forEach((id) => { + if (id !== nodeid) { + // Skip parent node as it's already processed at the beginning + deleteConnectedInput(id, 'node') + } + }) + + // Filter out all nodes and edges in a single operation + reactFlowInstance.setNodes((nodes) => nodes.filter((node) => !nodesToDelete.has(node.id))) + + // Remove all edges connected to any of the deleted nodes + reactFlowInstance.setEdges((edges) => edges.filter((edge) => !nodesToDelete.has(edge.source) && !nodesToDelete.has(edge.target))) + dispatch({ type: SET_DIRTY }) } @@ -72,7 +175,7 @@ export const ReactFlowContext = ({ children }) => { } } - const duplicateNode = (id) => { + const duplicateNode = (id, distance = 50) => { const nodes = reactFlowInstance.getNodes() const originalNode = nodes.find((n) => n.id === id) if (originalNode) { @@ -83,16 +186,17 @@ export const ReactFlowContext = ({ children }) => { ...clonedNode, id: newNodeId, position: { - x: clonedNode.position.x + 400, + x: clonedNode.position.x + clonedNode.width + distance, y: clonedNode.position.y }, positionAbsolute: { - x: clonedNode.positionAbsolute.x + 400, + x: clonedNode.positionAbsolute.x + clonedNode.width + distance, y: clonedNode.positionAbsolute.y }, data: { ...clonedNode.data, - id: newNodeId + id: newNodeId, + label: clonedNode.data.label + ` (${newNodeId.split('_').pop()})` }, selected: false } @@ -147,7 +251,10 @@ export const ReactFlowContext = ({ children }) => { setReactFlowInstance, deleteNode, deleteEdge, - duplicateNode + duplicateNode, + onAgentflowNodeStatusUpdate, + clearAgentflowNodeStatus, + onNodeDataChange }} > {children} diff --git a/packages/ui/src/themes/palette.js b/packages/ui/src/themes/palette.js index 750fe1be3..838822b28 100644 --- a/packages/ui/src/themes/palette.js +++ b/packages/ui/src/themes/palette.js @@ -78,6 +78,10 @@ export default function themePalette(theme) { paper: theme.paper, default: theme.backgroundDefault }, + textBackground: { + main: theme.customization.isDarkMode ? theme.colors?.darkPrimary800 : theme.colors?.grey50, + border: theme.customization.isDarkMode ? theme.colors?.transparent : theme.colors?.grey400 + }, card: { main: theme.customization.isDarkMode ? theme.colors?.darkPrimaryMain : theme.colors?.paper, light: theme.customization.isDarkMode ? theme.colors?.darkPrimary200 : theme.colors?.paper, diff --git a/packages/ui/src/ui-component/array/ArrayRenderer.jsx b/packages/ui/src/ui-component/array/ArrayRenderer.jsx new file mode 100644 index 000000000..c38499547 --- /dev/null +++ b/packages/ui/src/ui-component/array/ArrayRenderer.jsx @@ -0,0 +1,261 @@ +import { useState, useEffect, useContext } from 'react' +import { useSelector } from 'react-redux' +import PropTypes from 'prop-types' +import { Chip, Box, Button, IconButton } from '@mui/material' +import { useTheme } from '@mui/material/styles' +import { IconTrash, IconPlus } from '@tabler/icons-react' +import NodeInputHandler from '@/views/canvas/NodeInputHandler' +import { showHideInputs } from '@/utils/genericHelper' +import { cloneDeep } from 'lodash' +import { flowContext } from '@/store/context/ReactFlowContext' + +export const ArrayRenderer = ({ inputParam, data, disabled }) => { + const [arrayItems, setArrayItems] = useState([]) // these are the actual values. Ex: [{name: 'John', age: 30}, {name: 'Jane', age: 25}] + const [itemParameters, setItemParameters] = useState([]) // these are the input parameters for each array item. Ex: [{label: 'Name', type: 'string', display: true}, {label: 'age', type: 'number', display: false}] + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const { reactFlowInstance } = useContext(flowContext) + + // Handler for when input values change within array items + const handleItemInputChange = ({ inputParam: changedParam, newValue }, itemIndex) => { + // Create deep copy to avoid mutating state directly + let clonedData = cloneDeep(data) + + // Update the specific array item that changed + const updatedArrayItems = [...arrayItems] + const updatedItem = { ...updatedArrayItems[itemIndex] } + + // Reset the value of fields which has show/hide rules, so the old values don't persist + for (let i = 0; i < inputParam.array.length; i += 1) { + const fieldDef = inputParam.array[i] + if (fieldDef.show || fieldDef.hide) { + updatedItem[fieldDef.name] = fieldDef.default || '' + } + } + + // Set the new value for the changed field + updatedItem[changedParam.name] = newValue + updatedArrayItems[itemIndex] = updatedItem + + // Update local state and parent data + setArrayItems(updatedArrayItems) + data.inputs[inputParam.name] = updatedArrayItems + clonedData.inputs[inputParam.name] = updatedArrayItems + + // Recalculate display parameters based on new values + const newItemParams = showHideInputs(clonedData, 'inputParams', cloneDeep(inputParam.array), itemIndex) + + if (newItemParams.length) { + const updatedItemParams = [...itemParameters] + updatedItemParams[itemIndex] = newItemParams + setItemParameters(updatedItemParams) + } + } + + // Initialize array items and parameters when component mounts or data changes + useEffect(() => { + const initialArrayItems = data.inputs[inputParam.name] || [] + setArrayItems(initialArrayItems) + + // Calculate initial display parameters for each array item + const initialItemParameters = [] + for (let i = 0; i < initialArrayItems.length; i += 1) { + const itemParams = showHideInputs(data, 'inputParams', cloneDeep(inputParam.array), i) + if (itemParams.length) { + initialItemParameters.push(itemParams) + } + } + + setItemParameters(initialItemParameters) + }, [data, inputParam]) + + const updateOutputAnchors = (items, type, indexToDelete) => { + if (data.name !== 'conditionAgentflow' && data.name !== 'conditionAgentAgentflow') return + + const updatedOutputs = items.map((_, i) => ({ + id: `${data.id}-output-${i}`, + label: i, + name: i, + description: `Condition ${i}` + })) + + // always append additional output anchor for ELSE for condition + if (data.name === 'conditionAgentflow') { + updatedOutputs.push({ + id: `${data.id}-output-${items.length}`, + label: items.length, + name: items.length, + description: 'Else' + }) + } + data.outputAnchors = updatedOutputs + + const nodes = reactFlowInstance.getNodes() + + // Update the current node with new output anchors + const updatedNodes = nodes.map((node) => { + if (node.id === data.id) { + return { + ...node, + data: { + ...node.data, + outputAnchors: updatedOutputs + } + } + } + return node + }) + + reactFlowInstance.setNodes(updatedNodes) + + // Update edges if an item is deleted + if (type === 'DELETE') { + const edges = reactFlowInstance.getEdges() + const updatedEdges = edges.filter((edge) => { + if (edge.sourceHandle && edge.sourceHandle.includes(data.id)) { + const sourceHandleIndex = edge.sourceHandle.split('-').pop() + if (sourceHandleIndex === indexToDelete.toString()) { + return false + } + } + return true + }) + reactFlowInstance.setEdges(updatedEdges) + } + } + + // Handler for adding new array items + const handleAddItem = () => { + // Initialize new item with default values + let newItem = {} + + for (const fieldDef of inputParam.array) { + newItem[fieldDef.name] = fieldDef.default || '' + } + + /*if (inputParam.default?.length) { + newItem = inputParam.default[0] + }*/ + + // Update array items + const updatedArrayItems = [...arrayItems, newItem] + setArrayItems(updatedArrayItems) + data.inputs[inputParam.name] = updatedArrayItems + + // Calculate display parameters for all items including new one + const updatedItemParameters = [] + for (let i = 0; i < updatedArrayItems.length; i += 1) { + const itemParams = showHideInputs(data, 'inputParams', cloneDeep(inputParam.array), i) + if (itemParams.length) { + updatedItemParameters.push(itemParams) + } + } + setItemParameters(updatedItemParameters) + + updateOutputAnchors(updatedArrayItems, 'ADD') + } + + // Handler for deleting array items + const handleDeleteItem = (indexToDelete) => { + const updatedArrayItems = arrayItems.filter((_, i) => i !== indexToDelete) + setArrayItems(updatedArrayItems) + data.inputs[inputParam.name] = updatedArrayItems + + const updatedItemParameters = itemParameters.filter((_, i) => i !== indexToDelete) + setItemParameters(updatedItemParameters) + + updateOutputAnchors(updatedArrayItems, 'DELETE', indexToDelete) + } + + const isDeleteButtonVisible = (data.name !== 'conditionAgentflow' && data.name !== 'conditionAgentAgentflow') || arrayItems.length > 1 + + return ( + <> + {/* Render each array item */} + {arrayItems.map((itemValues, index) => { + // Create item data directly from parent data + const itemData = { + ...data, + inputs: itemValues, + inputParams: itemParameters[index] || [] + } + + return ( + + {/* Delete button for array item */} + {isDeleteButtonVisible && ( + handleDeleteItem(index)} + sx={{ + position: 'absolute', + height: '35px', + width: '35px', + right: 10, + top: 10, + color: customization?.isDarkMode ? theme.palette.grey[300] : 'inherit', + '&:hover': { color: 'red' } + }} + > + + + )} + + + + {/* Render input fields for array item */} + {itemParameters[index] + .filter((param) => param.display !== false) + .map((param, _index) => ( + { + handleItemInputChange({ inputParam, newValue }, index) + }} + /> + ))} + + ) + })} + + {/* Add new item button */} + + + ) +} + +ArrayRenderer.propTypes = { + inputParam: PropTypes.object.isRequired, + data: PropTypes.object.isRequired, + disabled: PropTypes.bool +} diff --git a/packages/ui/src/ui-component/button/FlowListMenu.jsx b/packages/ui/src/ui-component/button/FlowListMenu.jsx index f63365597..35b644da7 100644 --- a/packages/ui/src/ui-component/button/FlowListMenu.jsx +++ b/packages/ui/src/ui-component/button/FlowListMenu.jsx @@ -166,7 +166,11 @@ export default function FlowListMenu({ chatflow, isAgentCanvas, setError, update } try { await updateChatflowApi.request(chatflow.id, updateBody) - await updateFlowsApi.request() + if (isAgentCanvas && localStorage.getItem('agentFlowVersion') === 'v2') { + await updateFlowsApi.request('AGENTFLOW') + } else { + await updateFlowsApi.request(isAgentCanvas ? 'MULTIAGENT' : undefined) + } } catch (error) { if (setError) setError(error) enqueueSnackbar({ @@ -205,7 +209,7 @@ export default function FlowListMenu({ chatflow, isAgentCanvas, setError, update } try { await updateChatflowApi.request(chatflow.id, updateBody) - await updateFlowsApi.request() + await updateFlowsApi.request(isAgentCanvas ? 'AGENTFLOW' : undefined) } catch (error) { if (setError) setError(error) enqueueSnackbar({ @@ -237,7 +241,11 @@ export default function FlowListMenu({ chatflow, isAgentCanvas, setError, update if (isConfirmed) { try { await chatflowsApi.deleteChatflow(chatflow.id) - await updateFlowsApi.request() + if (isAgentCanvas && localStorage.getItem('agentFlowVersion') === 'v2') { + await updateFlowsApi.request('AGENTFLOW') + } else { + await updateFlowsApi.request(isAgentCanvas ? 'MULTIAGENT' : undefined) + } } catch (error) { if (setError) setError(error) enqueueSnackbar({ diff --git a/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx b/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx index 75aceb85b..0ad96fb36 100644 --- a/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx +++ b/packages/ui/src/ui-component/cards/FollowUpPromptsCard.jsx @@ -12,25 +12,26 @@ const FollowUpPromptsCard = ({ isGrid, followUpPrompts, sx, onPromptClick }) => className={'button-container'} sx={{ width: '100%', maxWidth: isGrid ? 'inherit' : '400px', p: 1.5, display: 'flex', gap: 1, ...sx }} > - {followUpPrompts.map((fp, index) => ( - onPromptClick(fp, e)} - sx={{ - backgroundColor: 'transparent', - border: '1px solid', - boxShadow: '0px 2px 1px -1px rgba(0,0,0,0.2)', - color: '#2196f3', - transition: 'all 300ms cubic-bezier(0.4, 0, 0.2, 1) 0ms', - '&:hover': { - backgroundColor: customization.isDarkMode ? 'rgba(0, 0, 0, 0.12)' : 'rgba(0, 0, 0, 0.05)', - border: '1px solid' - } - }} - /> - ))} + {Array.isArray(followUpPrompts) && + followUpPrompts.map((fp, index) => ( + onPromptClick(fp, e)} + sx={{ + backgroundColor: 'transparent', + border: '1px solid', + boxShadow: '0px 2px 1px -1px rgba(0,0,0,0.2)', + color: '#2196f3', + transition: 'all 300ms cubic-bezier(0.4, 0, 0.2, 1) 0ms', + '&:hover': { + backgroundColor: customization.isDarkMode ? 'rgba(0, 0, 0, 0.12)' : 'rgba(0, 0, 0, 0.05)', + border: '1px solid' + } + }} + /> + ))} ) } diff --git a/packages/ui/src/ui-component/cards/ItemCard.jsx b/packages/ui/src/ui-component/cards/ItemCard.jsx index d51f5fead..c2342c763 100644 --- a/packages/ui/src/ui-component/cards/ItemCard.jsx +++ b/packages/ui/src/ui-component/cards/ItemCard.jsx @@ -29,7 +29,7 @@ const CardWrapper = styled(MainCard)(({ theme }) => ({ // ===========================|| CONTRACT CARD ||=========================== // -const ItemCard = ({ data, images, onClick }) => { +const ItemCard = ({ data, images, icons, onClick }) => { const theme = useTheme() const customization = useSelector((state) => state.customization) @@ -106,7 +106,7 @@ const ItemCard = ({ data, images, onClick }) => { )} - {images && ( + {(images?.length > 0 || icons?.length > 0) && ( { gap: 1 }} > - {images.slice(0, images.length > 3 ? 3 : images.length).map((img) => ( - - - - ))} - {images.length > 3 && ( + {[ + ...(images || []).map((img) => ({ type: 'image', src: img })), + ...(icons || []).map((ic) => ({ type: 'icon', icon: ic.icon, color: ic.color })) + ] + .slice(0, 3) + .map((item, index) => + item.type === 'image' ? ( + + + + ) : ( +
+ +
+ ) + )} + {images?.length + (icons?.length || 0) > 3 && ( - + {images.length - 3} More + + {images?.length + (icons?.length || 0) - 3} More )}
@@ -146,6 +170,7 @@ const ItemCard = ({ data, images, onClick }) => { ItemCard.propTypes = { data: PropTypes.object, images: PropTypes.array, + icons: PropTypes.array, onClick: PropTypes.func } diff --git a/packages/ui/src/ui-component/cards/NodeCardWrapper.js b/packages/ui/src/ui-component/cards/NodeCardWrapper.js index 7d7cafe59..968d0aa94 100644 --- a/packages/ui/src/ui-component/cards/NodeCardWrapper.js +++ b/packages/ui/src/ui-component/cards/NodeCardWrapper.js @@ -7,14 +7,14 @@ import MainCard from './MainCard' const NodeCardWrapper = styled(MainCard)(({ theme }) => ({ background: theme.palette.card.main, color: theme.darkTextPrimary, - border: 'solid 1px', - borderColor: theme.palette.primary[200] + 75, + border: `1px solid ${theme.customization?.isDarkMode ? theme.palette.grey[900] + 25 : theme.palette.primary[200] + 75}`, width: '300px', height: 'auto', padding: '10px', - boxShadow: '0 2px 14px 0 rgb(32 40 45 / 8%)', + boxShadow: `rgba(0, 0, 0, 0.05) 0px 0px 0px 1px`, '&:hover': { - borderColor: theme.palette.primary.main + borderColor: theme.palette.primary.main, + boxShadow: `rgba(0, 0, 0, 0.1) 0px 10px 15px -3px, rgba(0, 0, 0, 0.05) 0px 4px 6px -2px` } })) diff --git a/packages/ui/src/ui-component/dialog/AdditionalParamsDialog.jsx b/packages/ui/src/ui-component/dialog/AdditionalParamsDialog.jsx index 53cbebd8f..63035e53f 100644 --- a/packages/ui/src/ui-component/dialog/AdditionalParamsDialog.jsx +++ b/packages/ui/src/ui-component/dialog/AdditionalParamsDialog.jsx @@ -47,15 +47,17 @@ const AdditionalParamsDialog = ({ show, dialogProps, onCancel }) => { overflowX: 'hidden' }} > - {inputParams.map((inputParam, index) => ( - - ))} + {inputParams + .filter((inputParam) => inputParam.display !== false) + .map((inputParam, index) => ( + + ))} diff --git a/packages/ui/src/ui-component/dialog/AgentflowGeneratorDialog.jsx b/packages/ui/src/ui-component/dialog/AgentflowGeneratorDialog.jsx new file mode 100644 index 000000000..2819e2e62 --- /dev/null +++ b/packages/ui/src/ui-component/dialog/AgentflowGeneratorDialog.jsx @@ -0,0 +1,369 @@ +import { createPortal } from 'react-dom' +import { cloneDeep } from 'lodash' +import { useState, useEffect, useContext } from 'react' +import { useDispatch, useSelector } from 'react-redux' +import PropTypes from 'prop-types' +import { Box, Typography, OutlinedInput, DialogActions, Button, Dialog, DialogContent, DialogTitle, LinearProgress } from '@mui/material' +import chatflowsApi from '@/api/chatflows' +import { closeSnackbar as closeSnackbarAction, enqueueSnackbar as enqueueSnackbarAction } from '@/store/actions' +import { IconX, IconSparkles, IconArrowLeft } from '@tabler/icons-react' +import useNotifier from '@/utils/useNotifier' +import { LoadingButton } from '@mui/lab' +import generatorGIF from '@/assets/images/agentflow-generator.gif' +import { flowContext } from '@/store/context/ReactFlowContext' +import { Dropdown } from '@/ui-component/dropdown/Dropdown' +import { useTheme } from '@mui/material/styles' +import assistantsApi from '@/api/assistants' +import { baseURL } from '@/store/constant' +import { initNode } from '@/utils/genericHelper' +import DocStoreInputHandler from '@/views/docstore/DocStoreInputHandler' +import useApi from '@/hooks/useApi' + +const defaultInstructions = [ + { + text: 'An agent that can autonomously search the web and generate report' + }, + { + text: 'Summarize a document' + }, + { + text: 'Generate response to user queries and send it to Slack' + }, + { + text: 'A team of agents that can handle all customer queries' + } +] + +const AgentflowGeneratorDialog = ({ show, dialogProps, onCancel, onConfirm }) => { + const portalElement = document.getElementById('portal') + const [customAssistantInstruction, setCustomAssistantInstruction] = useState('') + const [generatedInstruction, setGeneratedInstruction] = useState('') + const [loading, setLoading] = useState(false) + const [progress, setProgress] = useState(0) + const [chatModelsComponents, setChatModelsComponents] = useState([]) + const [chatModelsOptions, setChatModelsOptions] = useState([]) + const [selectedChatModel, setSelectedChatModel] = useState({}) + const customization = useSelector((state) => state.customization) + + const getChatModelsApi = useApi(assistantsApi.getChatModels) + const { reactFlowInstance } = useContext(flowContext) + const theme = useTheme() + + // ==============================|| Snackbar ||============================== // + const dispatch = useDispatch() + useNotifier() + const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) + const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) + + useEffect(() => { + if (getChatModelsApi.data) { + setChatModelsComponents(getChatModelsApi.data) + + // Set options + const options = getChatModelsApi.data.map((chatModel) => ({ + label: chatModel.label, + name: chatModel.name, + imageSrc: `${baseURL}/api/v1/node-icon/${chatModel.name}` + })) + setChatModelsOptions(options) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [getChatModelsApi.data]) + + // Simulate progress for the fake progress bar + useEffect(() => { + let timer + if (loading) { + setProgress(0) + timer = setInterval(() => { + setProgress((prevProgress) => { + // Slowly increase to 95% to give the impression of work happening + // Last 5% will complete when the actual work is done + if (prevProgress >= 95) { + clearInterval(timer) + return 95 + } + // Speed up in the middle, slow at the beginning and end + const increment = prevProgress < 30 ? 3 : prevProgress < 60 ? 5 : prevProgress < 80 ? 2 : 0.5 + return Math.min(prevProgress + increment, 95) + }) + }, 500) + } else { + // When loading is done, immediately set to 100% + setProgress(100) + } + + return () => { + if (timer) { + clearInterval(timer) + } + } + }, [loading]) + + const onGenerate = async () => { + if (!customAssistantInstruction.trim()) return + + try { + setLoading(true) + + const response = await chatflowsApi.generateAgentflow({ + question: customAssistantInstruction.trim(), + selectedChatModel: selectedChatModel + }) + + if (response.data && response.data.nodes && response.data.edges) { + reactFlowInstance.setNodes(response.data.nodes) + reactFlowInstance.setEdges(response.data.edges) + onConfirm() + } else { + enqueueSnackbar({ + message: response.error || 'Failed to generate agentflow', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + } catch (error) { + enqueueSnackbar({ + message: error.response?.data?.message || 'Failed to generate agentflow', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } finally { + setLoading(false) + } + } + + // clear the state when dialog is closed + useEffect(() => { + if (!show) { + setCustomAssistantInstruction('') + setGeneratedInstruction('') + setProgress(0) + } else { + getChatModelsApi.request() + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [show]) + + const component = show ? ( + <> + + + {dialogProps.title} + + + {loading ? ( +
+ Generating Agentflow + + Generating your Agentflow... + + + + + {`${Math.round(progress)}%`} + + +
+ ) : ( + <> + {dialogProps.description} +
+ {defaultInstructions.map((instruction, index) => { + return ( + + ) + })} +
+ {!generatedInstruction && ( + setCustomAssistantInstruction(event.target.value)} + /> + )} + {generatedInstruction && ( + setGeneratedInstruction(event.target.value)} + /> + )} + +
+ + Select model to generate agentflow * + +
+ { + if (!newValue) { + setSelectedChatModel({}) + } else { + const foundChatComponent = chatModelsComponents.find((chatModel) => chatModel.name === newValue) + if (foundChatComponent) { + const chatModelId = `${foundChatComponent.name}_0` + const clonedComponent = cloneDeep(foundChatComponent) + const initChatModelData = initNode(clonedComponent, chatModelId) + setSelectedChatModel(initChatModelData) + } + } + }} + value={selectedChatModel ? selectedChatModel?.name : 'choose an option'} + /> +
+ {selectedChatModel && Object.keys(selectedChatModel).length > 0 && ( + + {(selectedChatModel.inputParams ?? []) + .filter((inputParam) => !inputParam.hidden) + .map((inputParam, index) => ( + + ))} + + )} + + )} +
+ + {loading ? null : ( + <> + {!generatedInstruction && ( + { + onGenerate() + }} + sx={{ + background: 'linear-gradient(45deg, #FF6B6B 30%, #FF8E53 90%)', + '&:hover': { background: 'linear-gradient(45deg, #FF8E53 30%, #FF6B6B 90%)' } + }} + startIcon={} + disabled={ + loading || + !customAssistantInstruction.trim() || + !selectedChatModel || + !Object.keys(selectedChatModel).length + } + > + Generate + + )} + {generatedInstruction && ( + + )} + + )} + +
+ + ) : null + + return createPortal(component, portalElement) +} + +AgentflowGeneratorDialog.propTypes = { + show: PropTypes.bool, + dialogProps: PropTypes.object, + onConfirm: PropTypes.func, + onCancel: PropTypes.func +} + +export default AgentflowGeneratorDialog diff --git a/packages/ui/src/ui-component/dialog/ChatflowConfigurationDialog.jsx b/packages/ui/src/ui-component/dialog/ChatflowConfigurationDialog.jsx index 081f6c91e..cfa10cfbe 100644 --- a/packages/ui/src/ui-component/dialog/ChatflowConfigurationDialog.jsx +++ b/packages/ui/src/ui-component/dialog/ChatflowConfigurationDialog.jsx @@ -11,6 +11,7 @@ import StarterPrompts from '@/ui-component/extended/StarterPrompts' import Leads from '@/ui-component/extended/Leads' import FollowUpPrompts from '@/ui-component/extended/FollowUpPrompts' import FileUpload from '@/ui-component/extended/FileUpload' +import PostProcessing from '@/ui-component/extended/PostProcessing' const CHATFLOW_CONFIGURATION_TABS = [ { @@ -44,6 +45,11 @@ const CHATFLOW_CONFIGURATION_TABS = [ { label: 'File Upload', id: 'fileUpload' + }, + { + label: 'Post Processing', + id: 'postProcessing', + hideInAgentFlow: true } ] @@ -76,10 +82,12 @@ function a11yProps(index) { } } -const ChatflowConfigurationDialog = ({ show, dialogProps, onCancel }) => { +const ChatflowConfigurationDialog = ({ show, isAgentCanvas, dialogProps, onCancel }) => { const portalElement = document.getElementById('portal') const [tabValue, setTabValue] = useState(0) + const filteredTabs = CHATFLOW_CONFIGURATION_TABS.filter((tab) => !isAgentCanvas || !tab.hideInAgentFlow) + const component = show ? ( { variant='scrollable' scrollButtons='auto' > - {CHATFLOW_CONFIGURATION_TABS.map((item, index) => ( + {filteredTabs.map((item, index) => ( { > ))} - {CHATFLOW_CONFIGURATION_TABS.map((item, index) => ( + {filteredTabs.map((item, index) => ( {item.id === 'security' && } {item.id === 'conversationStarters' ? : null} @@ -133,6 +141,7 @@ const ChatflowConfigurationDialog = ({ show, dialogProps, onCancel }) => { {item.id === 'analyseChatflow' ? : null} {item.id === 'leads' ? : null} {item.id === 'fileUpload' ? : null} + {item.id === 'postProcessing' ? : null} ))} @@ -144,6 +153,7 @@ const ChatflowConfigurationDialog = ({ show, dialogProps, onCancel }) => { ChatflowConfigurationDialog.propTypes = { show: PropTypes.bool, + isAgentCanvas: PropTypes.bool, dialogProps: PropTypes.object, onCancel: PropTypes.func } diff --git a/packages/ui/src/ui-component/dialog/ConditionDialog.jsx b/packages/ui/src/ui-component/dialog/ConditionDialog.jsx index 5c5367f02..546ff1abe 100644 --- a/packages/ui/src/ui-component/dialog/ConditionDialog.jsx +++ b/packages/ui/src/ui-component/dialog/ConditionDialog.jsx @@ -58,17 +58,19 @@ const ConditionDialog = ({ show, dialogProps, onCancel, onConfirm }) => { ))} - {inputParam.tabs.map((inputChildParam, index) => ( - - - - ))} + {inputParam.tabs + .filter((inputParam) => inputParam.display !== false) + .map((inputChildParam, index) => ( + + + + ))} )} diff --git a/packages/ui/src/ui-component/dialog/ExpandRichInputDialog.jsx b/packages/ui/src/ui-component/dialog/ExpandRichInputDialog.jsx new file mode 100644 index 000000000..4fb5204e7 --- /dev/null +++ b/packages/ui/src/ui-component/dialog/ExpandRichInputDialog.jsx @@ -0,0 +1,231 @@ +import { createPortal } from 'react-dom' +import { useState, useEffect } from 'react' +import { useDispatch } from 'react-redux' +import PropTypes from 'prop-types' +import PerfectScrollbar from 'react-perfect-scrollbar' + +// MUI +import { Button, Dialog, DialogActions, DialogContent, Typography, Box } from '@mui/material' +import { styled } from '@mui/material/styles' + +// Project Import +import { StyledButton } from '@/ui-component/button/StyledButton' + +// TipTap +import { useEditor, EditorContent } from '@tiptap/react' +import Placeholder from '@tiptap/extension-placeholder' +import { mergeAttributes } from '@tiptap/core' +import StarterKit from '@tiptap/starter-kit' +import Mention from '@tiptap/extension-mention' +import { suggestionOptions } from '@/ui-component/input/suggestionOption' +import { getAvailableNodesForVariable } from '@/utils/genericHelper' + +// Store +import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from '@/store/actions' + +// Add styled component for editor wrapper +const StyledEditorContent = styled(EditorContent)(({ theme, rows }) => ({ + '& .ProseMirror': { + padding: '0px 14px', + height: rows ? `${rows * 1.4375}rem` : '2.4rem', + overflowY: rows ? 'auto' : 'hidden', + overflowX: rows ? 'auto' : 'hidden', + lineHeight: rows ? '1.4375em' : '0.875em', + fontWeight: 500, + color: theme.palette.grey[900], + border: `1px solid ${theme.palette.textBackground.border}`, + borderRadius: '10px', + backgroundColor: theme.palette.textBackground.main, + boxSizing: 'border-box', + whiteSpace: rows ? 'pre-wrap' : 'nowrap', + '&:hover': { + borderColor: theme.palette.text.primary, + cursor: 'text' + }, + '&:focus': { + borderColor: theme.palette.primary.main, + boxShadow: `0 0 0 0px ${theme.palette.primary.main}`, + outline: 'none' + }, + '&[disabled]': { + backgroundColor: theme.palette.action.disabledBackground, + color: theme.palette.action.disabled + }, + // Placeholder for first paragraph when editor is empty + '& p.is-editor-empty:first-of-type::before': { + content: 'attr(data-placeholder)', + float: 'left', + color: theme.palette.text.primary, + opacity: 0.4, + pointerEvents: 'none', + height: 0 + } + } +})) + +// define your extension array +const extensions = (availableNodesForVariable, availableState, acceptNodeOutputAsVariable, nodes, nodeData, isNodeInsideInteration) => [ + StarterKit, + Mention.configure({ + HTMLAttributes: { + class: 'variable' + }, + renderHTML({ options, node }) { + return [ + 'span', + mergeAttributes(this.HTMLAttributes, options.HTMLAttributes), + `${options.suggestion.char} ${node.attrs.label ?? node.attrs.id} }}` + ] + }, + suggestion: suggestionOptions( + availableNodesForVariable, + availableState, + acceptNodeOutputAsVariable, + nodes, + nodeData, + isNodeInsideInteration + ), + deleteTriggerWithBackspace: true + }) +] + +const ExpandRichInputDialog = ({ show, dialogProps, onCancel, onInputHintDialogClicked, onConfirm }) => { + const portalElement = document.getElementById('portal') + + const dispatch = useDispatch() + + const [inputValue, setInputValue] = useState('') + const [inputParam, setInputParam] = useState(null) + const [availableNodesForVariable, setAvailableNodesForVariable] = useState([]) + const [availableState, setAvailableState] = useState([]) + const [nodeData, setNodeData] = useState({}) + const [isNodeInsideInteration, setIsNodeInsideInteration] = useState(false) + + useEffect(() => { + if (dialogProps.value) { + setInputValue(dialogProps.value) + } + if (dialogProps.inputParam) { + setInputParam(dialogProps.inputParam) + } + + return () => { + setInputValue('') + setInputParam(null) + } + }, [dialogProps]) + + useEffect(() => { + if (show) dispatch({ type: SHOW_CANVAS_DIALOG }) + else dispatch({ type: HIDE_CANVAS_DIALOG }) + return () => dispatch({ type: HIDE_CANVAS_DIALOG }) + }, [show, dispatch]) + + useEffect(() => { + if (!dialogProps.disabled && dialogProps.nodes && dialogProps.edges && dialogProps.nodeId && inputParam) { + const nodesForVariable = inputParam?.acceptVariable + ? getAvailableNodesForVariable(dialogProps.nodes, dialogProps.edges, dialogProps.nodeId, inputParam.id) + : [] + setAvailableNodesForVariable(nodesForVariable) + + const startAgentflowNode = dialogProps.nodes.find((node) => node.data.name === 'startAgentflow') + const state = startAgentflowNode?.data?.inputs?.startState + setAvailableState(state) + + const agentflowNode = dialogProps.nodes.find((node) => node.data.id === dialogProps.nodeId) + setNodeData(agentflowNode?.data) + + setIsNodeInsideInteration(dialogProps.nodes.find((node) => node.data.id === dialogProps.nodeId)?.extent === 'parent') + } + }, [dialogProps.disabled, inputParam, dialogProps.nodes, dialogProps.edges, dialogProps.nodeId]) + + const editor = useEditor( + { + extensions: [ + ...extensions( + availableNodesForVariable, + availableState, + inputParam?.acceptNodeOutputAsVariable, + dialogProps.nodes, + nodeData, + isNodeInsideInteration + ), + Placeholder.configure({ placeholder: inputParam?.placeholder }) + ], + content: inputValue, + onUpdate: ({ editor }) => { + setInputValue(editor.getHTML()) + }, + editable: !dialogProps.disabled + }, + [availableNodesForVariable] + ) + + // Focus the editor when dialog opens + useEffect(() => { + if (show && editor) { + setTimeout(() => { + editor.commands.focus() + }, 100) + } + }, [show, editor]) + + const component = show ? ( + + +
+ {inputParam && ( +
+
+ {inputParam.label} +
+ {inputParam.hint && ( + + )} +
+ + + + + +
+ )} +
+ + + + onConfirm(inputValue, inputParam.name)}> + {dialogProps.confirmButtonName} + + +
+ ) : null + + return createPortal(component, portalElement) +} + +ExpandRichInputDialog.propTypes = { + show: PropTypes.bool, + dialogProps: PropTypes.object, + onCancel: PropTypes.func, + onConfirm: PropTypes.func, + onInputHintDialogClicked: PropTypes.func +} + +export default ExpandRichInputDialog diff --git a/packages/ui/src/ui-component/dialog/ExportAsTemplateDialog.jsx b/packages/ui/src/ui-component/dialog/ExportAsTemplateDialog.jsx index e7251c257..539517682 100644 --- a/packages/ui/src/ui-component/dialog/ExportAsTemplateDialog.jsx +++ b/packages/ui/src/ui-component/dialog/ExportAsTemplateDialog.jsx @@ -44,7 +44,13 @@ const ExportAsTemplateDialog = ({ show, dialogProps, onCancel }) => { useEffect(() => { if (dialogProps.chatflow) { setName(dialogProps.chatflow.name) - setFlowType(dialogProps.chatflow.type === 'MULTIAGENT' ? 'Agentflow' : 'Chatflow') + if (dialogProps.chatflow.type === 'AGENTFLOW') { + setFlowType('AgentflowV2') + } else if (dialogProps.chatflow.type === 'MULTIAGENT') { + setFlowType('Agentflow') + } else if (dialogProps.chatflow.type === 'CHATFLOW') { + setFlowType('Chatflow') + } } if (dialogProps.tool) { diff --git a/packages/ui/src/ui-component/dialog/InputHintDialog.jsx b/packages/ui/src/ui-component/dialog/InputHintDialog.jsx index 0eb865141..498dfb139 100644 --- a/packages/ui/src/ui-component/dialog/InputHintDialog.jsx +++ b/packages/ui/src/ui-component/dialog/InputHintDialog.jsx @@ -1,11 +1,6 @@ import { createPortal } from 'react-dom' import PropTypes from 'prop-types' -import rehypeMathjax from 'rehype-mathjax' -import rehypeRaw from 'rehype-raw' -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' import { MemoizedReactMarkdown } from '@/ui-component/markdown/MemoizedReactMarkdown' -import { CodeBlock } from '@/ui-component/markdown/CodeBlock' import { Dialog, DialogContent, DialogTitle } from '@mui/material' const InputHintDialog = ({ show, dialogProps, onCancel }) => { @@ -24,29 +19,7 @@ const InputHintDialog = ({ show, dialogProps, onCancel }) => { {dialogProps.label} - - ) : ( - - {children} - - ) - } - }} - > - {dialogProps?.value} - + {dialogProps?.value}
) : null diff --git a/packages/ui/src/ui-component/dialog/NodeInfoDialog.jsx b/packages/ui/src/ui-component/dialog/NodeInfoDialog.jsx index a0bc8aed5..58affea28 100644 --- a/packages/ui/src/ui-component/dialog/NodeInfoDialog.jsx +++ b/packages/ui/src/ui-component/dialog/NodeInfoDialog.jsx @@ -7,10 +7,11 @@ import PropTypes from 'prop-types' import { Button, Dialog, DialogContent, DialogTitle } from '@mui/material' import { TableViewOnly } from '@/ui-component/table/Table' import { IconBook2 } from '@tabler/icons-react' +import { useTheme } from '@mui/material/styles' // Store import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from '@/store/actions' -import { baseURL } from '@/store/constant' +import { baseURL, AGENTFLOW_ICONS } from '@/store/constant' // API import configApi from '@/api/config' @@ -19,9 +20,17 @@ import useApi from '@/hooks/useApi' const NodeInfoDialog = ({ show, dialogProps, onCancel }) => { const portalElement = document.getElementById('portal') const dispatch = useDispatch() + const theme = useTheme() const getNodeConfigApi = useApi(configApi.getNodeConfig) + const renderIcon = (node) => { + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === node.name) + + if (!foundIcon) return null + return + } + useEffect(() => { if (dialogProps.data) { getNodeConfigApi.request(dialogProps.data) @@ -48,27 +57,46 @@ const NodeInfoDialog = ({ show, dialogProps, onCancel }) => { {dialogProps.data && dialogProps.data.name && dialogProps.data.label && (
-
- {dialogProps.data.name} -
+ > + {renderIcon(dialogProps.data)} +
+ ) : ( +
+ {dialogProps.data.name} +
+ )}
{dialogProps.data.label}
diff --git a/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx b/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx new file mode 100644 index 000000000..a0f7b9e1d --- /dev/null +++ b/packages/ui/src/ui-component/dialog/NvidiaNIMDialog.jsx @@ -0,0 +1,477 @@ +import { + Button, + CircularProgress, + Dialog, + DialogActions, + DialogContent, + DialogTitle, + FormControl, + InputLabel, + MenuItem, + Select, + Step, + StepLabel, + Stepper, + TextField +} from '@mui/material' +import axios from 'axios' +import PropTypes from 'prop-types' +import { useEffect, useState } from 'react' +import { createPortal } from 'react-dom' + +const NvidiaNIMDialog = ({ open, onClose, onComplete }) => { + const portalElement = document.getElementById('portal') + + const modelOptions = { + 'nvcr.io/nim/meta/llama-3.1-8b-instruct:1.8.0-RTX': { + label: 'Llama 3.1 8B Instruct', + licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nim/teams/meta/containers/llama-3.1-8b-instruct' + }, + 'nvcr.io/nim/deepseek-ai/deepseek-r1-distill-llama-8b:1.8.0-RTX': { + label: 'DeepSeek R1 Distill Llama 8B', + licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nim/teams/deepseek-ai/containers/deepseek-r1-distill-llama-8b' + }, + 'nvcr.io/nim/nv-mistralai/mistral-nemo-12b-instruct:1.8.0-rtx': { + label: 'Mistral Nemo 12B Instruct', + licenseUrl: 'https://catalog.ngc.nvidia.com/orgs/nim/teams/nv-mistralai/containers/mistral-nemo-12b-instruct' + } + } + + const [activeStep, setActiveStep] = useState(0) + const [loading, setLoading] = useState(false) + const [imageTag, setImageTag] = useState('') + const [pollInterval, setPollInterval] = useState(null) + const [nimRelaxMemConstraints, setNimRelaxMemConstraints] = useState('0') + const [hostPort, setHostPort] = useState('8080') + const [showContainerConfirm, setShowContainerConfirm] = useState(false) + const [existingContainer, setExistingContainer] = useState(null) + + const steps = ['Download Installer', 'Pull Image', 'Start Container'] + + const handleDownloadInstaller = async () => { + try { + setLoading(true) + await axios.get('/api/v1/nvidia-nim/download-installer') + setLoading(false) + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to download installer: ' + errorData) + setLoading(false) + } + } + + const preload = async () => { + try { + setLoading(true) + await axios.get('/api/v1/nvidia-nim/preload') + setLoading(false) + setActiveStep(1) + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to preload: ' + errorData) + setLoading(false) + } + } + + const handlePullImage = async () => { + try { + setLoading(true) + try { + const imageResponse = await axios.post('/api/v1/nvidia-nim/get-image', { imageTag }) + if (imageResponse.data && imageResponse.data.tag === imageTag) { + setLoading(false) + setActiveStep(2) + return + } + } catch (err) { + // Continue if image not found + if (err.response?.status !== 404) { + throw err + } + } + + // Get token first + const tokenResponse = await axios.get('/api/v1/nvidia-nim/get-token') + const apiKey = tokenResponse.data.access_token + + // Pull image + await axios.post('/api/v1/nvidia-nim/pull-image', { + imageTag, + apiKey + }) + + // Start polling for image status + const interval = setInterval(async () => { + try { + const imageResponse = await axios.post('/api/v1/nvidia-nim/get-image', { imageTag }) + if (imageResponse.data) { + clearInterval(interval) + setLoading(false) + setActiveStep(2) + } + } catch (err) { + // Continue polling if image not found + if (err.response?.status !== 404) { + clearInterval(interval) + alert('Failed to check image status: ' + err.message) + setLoading(false) + } + } + }, 5000) + + setPollInterval(interval) + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to pull image: ' + errorData) + setLoading(false) + } + } + + const handleStartContainer = async () => { + try { + setLoading(true) + try { + const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { + imageTag, + port: parseInt(hostPort) + }) + if (containerResponse.data) { + setExistingContainer(containerResponse.data) + setShowContainerConfirm(true) + setLoading(false) + return + } + } catch (err) { + // Handle port in use by non-model container + if (err.response?.status === 409) { + alert(`Port ${hostPort} is already in use by another container. Please choose a different port.`) + setLoading(false) + return + } + // Continue if container not found + if (err.response?.status !== 404) { + throw err + } + } + + // No container found with this port, proceed with starting new container + await startNewContainer() + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to check container status: ' + errorData) + setLoading(false) + } + } + + const startNewContainer = async () => { + try { + setLoading(true) + const tokenResponse = await axios.get('/api/v1/nvidia-nim/get-token') + const apiKey = tokenResponse.data.access_token + + await axios.post('/api/v1/nvidia-nim/start-container', { + imageTag, + apiKey, + nimRelaxMemConstraints: parseInt(nimRelaxMemConstraints), + hostPort: parseInt(hostPort) + }) + + // Start polling for container status + const interval = setInterval(async () => { + try { + const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { + imageTag, + port: parseInt(hostPort) + }) + if (containerResponse.data) { + clearInterval(interval) + setLoading(false) + onComplete(containerResponse.data) + onClose() + } + } catch (err) { + // Continue polling if container not found + if (err.response?.status !== 404) { + clearInterval(interval) + alert('Failed to check container status: ' + err.message) + setLoading(false) + } + } + }, 5000) + + setPollInterval(interval) + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to start container: ' + errorData) + setLoading(false) + } + } + + const handleUseExistingContainer = async () => { + try { + setLoading(true) + // Start polling for container status + const interval = setInterval(async () => { + try { + const containerResponse = await axios.post('/api/v1/nvidia-nim/get-container', { + imageTag, + port: parseInt(hostPort) + }) + if (containerResponse.data) { + clearInterval(interval) + setLoading(false) + onComplete(containerResponse.data) + onClose() + } + } catch (err) { + // Continue polling if container not found + if (err.response?.status !== 404) { + clearInterval(interval) + alert('Failed to check container status: ' + err.message) + setLoading(false) + } + } + }, 5000) + + setPollInterval(interval) + } catch (err) { + let errorData = err.message + if (typeof err === 'string') { + errorData = err + } else if (err.response?.data) { + errorData = err.response.data.message + } + alert('Failed to check container status: ' + errorData) + setLoading(false) + } + } + + const handleNext = () => { + if (activeStep === 1 && !imageTag) { + alert('Please enter an image tag') + return + } + + if (activeStep === 2) { + const port = parseInt(hostPort) + if (isNaN(port) || port < 1 || port > 65535) { + alert('Please enter a valid port number between 1 and 65535') + return + } + } + + switch (activeStep) { + case 0: + preload() + break + case 1: + handlePullImage() + break + case 2: + handleStartContainer() + break + default: + setActiveStep((prev) => prev + 1) + } + } + + // Cleanup polling on unmount + useEffect(() => { + return () => { + if (pollInterval) { + clearInterval(pollInterval) + } + } + }, [pollInterval]) + + // clear state on close + useEffect(() => { + if (!open) { + setActiveStep(0) + setLoading(false) + setImageTag('') + } + }, [open]) + + const component = open ? ( + <> + + NIM Setup + + + {steps.map((label) => ( + + {label} + + ))} + + + {activeStep === 0 && ( +
+

+ Would you like to download the NIM installer? Click Next if it has been installed +

+ {loading && } +
+ )} + + {activeStep === 1 && ( +
+ + Model + + + {imageTag && ( + + )} + {loading && ( +
+
+ +

Pulling image...

+
+ )} +
+ )} + + {activeStep === 2 && ( +
+ {loading ? ( + <> +
+ +

Starting container...

+ + ) : ( + <> + + Relax Memory Constraints + + + setHostPort(e.target.value)} + inputProps={{ min: 1, max: 65535 }} + sx={{ mt: 2 }} + /> +

Click Next to start the container.

+ + )} +
+ )} + + + + {activeStep === 0 && ( + + )} + + +
+ setShowContainerConfirm(false)}> + Container Already Exists + +

A container for this image already exists:

+
+

+ Name: {existingContainer?.name || 'N/A'} +

+

+ Status: {existingContainer?.status || 'N/A'} +

+
+

You can:

+
    +
  • Use the existing container (recommended)
  • +
  • Change the port and try again
  • +
+
+ + + + +
+ + ) : null + + return createPortal(component, portalElement) +} + +NvidiaNIMDialog.propTypes = { + open: PropTypes.bool, + onClose: PropTypes.func, + onComplete: PropTypes.func +} + +export default NvidiaNIMDialog diff --git a/packages/ui/src/ui-component/dialog/PromptLangsmithHubDialog.jsx b/packages/ui/src/ui-component/dialog/PromptLangsmithHubDialog.jsx index acc246fa9..54ce99e13 100644 --- a/packages/ui/src/ui-component/dialog/PromptLangsmithHubDialog.jsx +++ b/packages/ui/src/ui-component/dialog/PromptLangsmithHubDialog.jsx @@ -2,12 +2,6 @@ import { createPortal } from 'react-dom' import { useState, useEffect } from 'react' import { useDispatch, useSelector } from 'react-redux' import PropTypes from 'prop-types' - -import rehypeMathjax from 'rehype-mathjax' -import rehypeRaw from 'rehype-raw' -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' - // MUI import { Box, @@ -44,7 +38,6 @@ import { styled } from '@mui/material/styles' //Project Import import { StyledButton } from '@/ui-component/button/StyledButton' import { MemoizedReactMarkdown } from '@/ui-component/markdown/MemoizedReactMarkdown' -import { CodeBlock } from '@/ui-component/markdown/CodeBlock' import promptEmptySVG from '@/assets/images/prompt_empty.svg' import useApi from '@/hooks/useApi' @@ -536,30 +529,7 @@ const PromptLangsmithHubDialog = ({ promptType, show, onCancel, onSubmit }) => { } }} > - - ) : ( - - {children} - - ) - } - }} - > - {selectedPrompt?.readme} - + {selectedPrompt?.readme}
diff --git a/packages/ui/src/ui-component/dialog/SourceDocDialog.jsx b/packages/ui/src/ui-component/dialog/SourceDocDialog.jsx index f2a231e65..02589a523 100644 --- a/packages/ui/src/ui-component/dialog/SourceDocDialog.jsx +++ b/packages/ui/src/ui-component/dialog/SourceDocDialog.jsx @@ -2,7 +2,7 @@ import { createPortal } from 'react-dom' import { useState, useEffect } from 'react' import { useSelector } from 'react-redux' import PropTypes from 'prop-types' -import { Dialog, DialogContent, DialogTitle } from '@mui/material' +import { Box, Dialog, DialogContent, DialogTitle, Typography } from '@mui/material' import ReactJson from 'flowise-react-json-view' const SourceDocDialog = ({ show, dialogProps, onCancel }) => { @@ -32,6 +32,25 @@ const SourceDocDialog = ({ show, dialogProps, onCancel }) => { {dialogProps.title ?? 'Source Documents'} + {data.error && ( + + + Error: + + + {data.error} + + + )} {
) } else { - return ( - - ) : ( - - {children} - - ) - } - }} - > - {item.data} - - ) + return {item.data} } } @@ -1214,10 +1183,30 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { key={index} label={tool.tool} component='a' - sx={{ mr: 1, mt: 1 }} + sx={{ + mr: 1, + mt: 1, + borderColor: tool.error + ? 'error.main' + : undefined, + color: tool.error + ? 'error.main' + : undefined + }} variant='outlined' clickable - icon={} + icon={ + + } onClick={() => onSourceDialogClick( tool, @@ -1284,44 +1273,7 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { )} {agent.messages.length > 0 && ( - ) : ( - - {children} - - ) - } - }} + chatflowid={dialogProps.chatflow.id} > {agent.messages.length > 1 ? agent.messages.join('\\n') @@ -1407,9 +1359,24 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { key={index} label={tool.tool} component='a' - sx={{ mr: 1, mt: 1 }} + sx={{ + mr: 1, + mt: 1, + borderColor: tool.error ? 'error.main' : undefined, + color: tool.error ? 'error.main' : undefined + }} variant='outlined' clickable + icon={ + + } onClick={() => onSourceDialogClick(tool, 'Used Tools')} /> ) @@ -1433,30 +1400,7 @@ const ViewMessagesDialog = ({ show, dialogProps, onCancel }) => { )}
- {/* Messages are being rendered in Markdown format */} - - ) : ( - - {children} - - ) - } - }} - > + {message.message}
diff --git a/packages/ui/src/ui-component/dropdown/AsyncDropdown.jsx b/packages/ui/src/ui-component/dropdown/AsyncDropdown.jsx index 4469fdf99..f8ad6f91e 100644 --- a/packages/ui/src/ui-component/dropdown/AsyncDropdown.jsx +++ b/packages/ui/src/ui-component/dropdown/AsyncDropdown.jsx @@ -1,4 +1,4 @@ -import { useState, useEffect, Fragment } from 'react' +import { useState, useEffect, useContext, Fragment } from 'react' import { useSelector } from 'react-redux' import PropTypes from 'prop-types' import axios from 'axios' @@ -6,13 +6,15 @@ import axios from 'axios' // Material import Autocomplete, { autocompleteClasses } from '@mui/material/Autocomplete' import { Popper, CircularProgress, TextField, Box, Typography } from '@mui/material' -import { styled } from '@mui/material/styles' +import { useTheme, styled } from '@mui/material/styles' // API import credentialsApi from '@/api/credentials' // const import { baseURL } from '@/store/constant' +import { flowContext } from '@/store/context/ReactFlowContext' +import { getAvailableNodesForVariable } from '@/utils/genericHelper' const StyledPopper = styled(Popper)({ boxShadow: '0px 8px 10px -5px rgb(0 0 0 / 20%), 0px 16px 24px 2px rgb(0 0 0 / 14%), 0px 6px 30px 5px rgb(0 0 0 / 12%)', @@ -26,15 +28,16 @@ const StyledPopper = styled(Popper)({ } }) -const fetchList = async ({ name, nodeData }) => { - const loadMethod = nodeData.inputParams.find((param) => param.name === name)?.loadMethod +const fetchList = async ({ name, nodeData, previousNodes, currentNode }) => { + const selectedParam = nodeData.inputParams.find((param) => param.name === name) + const loadMethod = selectedParam?.loadMethod const username = localStorage.getItem('username') const password = localStorage.getItem('password') let lists = await axios .post( `${baseURL}/api/v1/node-load-method/${nodeData.name}`, - { ...nodeData, loadMethod }, + { ...nodeData, loadMethod, previousNodes, currentNode }, { auth: username && password ? { username, password } : undefined, headers: { 'Content-type': 'application/json', 'x-request-from': 'internal' } @@ -59,17 +62,31 @@ export const AsyncDropdown = ({ credentialNames = [], disabled = false, freeSolo = false, - disableClearable = false + disableClearable = false, + multiple = false }) => { const customization = useSelector((state) => state.customization) + const theme = useTheme() const [open, setOpen] = useState(false) const [options, setOptions] = useState([]) const [loading, setLoading] = useState(false) - const findMatchingOptions = (options = [], value) => options.find((option) => option.name === value) - const getDefaultOptionValue = () => '' + const findMatchingOptions = (options = [], value) => { + if (multiple) { + let values = [] + if ('choose an option' !== value && value && typeof value === 'string') { + values = JSON.parse(value) + } else { + values = value + } + return options.filter((option) => values.includes(option.name)) + } + return options.find((option) => option.name === value) + } + const getDefaultOptionValue = () => (multiple ? [] : '') const addNewOption = [{ label: '- Create New -', name: '-create-' }] let [internalValue, setInternalValue] = useState(value ?? 'choose an option') + const { reactFlowInstance } = useContext(flowContext) const fetchCredentialList = async () => { try { @@ -100,7 +117,45 @@ export const AsyncDropdown = ({ setLoading(true) ;(async () => { const fetchData = async () => { - let response = credentialNames.length ? await fetchCredentialList() : await fetchList({ name, nodeData }) + let response = [] + if (credentialNames.length) { + response = await fetchCredentialList() + } else { + const body = { + name, + nodeData + } + if (reactFlowInstance) { + const previousNodes = getAvailableNodesForVariable( + reactFlowInstance.getNodes(), + reactFlowInstance.getEdges(), + nodeData.id, + `${nodeData.id}-input-${name}-${nodeData.inputParams.find((param) => param.name === name)?.type || ''}`, + true + ).map((node) => ({ id: node.id, name: node.data.name, label: node.data.label, inputs: node.data.inputs })) + + let currentNode = reactFlowInstance.getNodes().find((node) => node.id === nodeData.id) + if (currentNode) { + currentNode = { + id: currentNode.id, + name: currentNode.data.name, + label: currentNode.data.label, + inputs: currentNode.data.inputs + } + body.currentNode = currentNode + } + + body.previousNodes = previousNodes + } + + response = await fetchList(body) + } + for (let j = 0; j < response.length; j += 1) { + if (response[j].imageSrc) { + const imageSrc = `${baseURL}/api/v1/node-icon/${response[j].name}` + response[j].imageSrc = imageSrc + } + } if (isCreateNewOption) setOptions([...response, ...addNewOption]) else setOptions([...response]) setLoading(false) @@ -118,6 +173,8 @@ export const AsyncDropdown = ({ freeSolo={freeSolo} disabled={disabled} disableClearable={disableClearable} + multiple={multiple} + filterSelectedOptions={multiple} size='small' sx={{ mt: 1, width: '100%' }} open={open} @@ -130,34 +187,90 @@ export const AsyncDropdown = ({ options={options} value={findMatchingOptions(options, internalValue) || getDefaultOptionValue()} onChange={(e, selection) => { - const value = selection ? selection.name : '' - if (isCreateNewOption && value === '-create-') { - onCreateNew() - } else { + if (multiple) { + let value = '' + if (selection.length) { + const selectionNames = selection.map((item) => item.name) + value = JSON.stringify(selectionNames) + } setInternalValue(value) onSelect(value) + } else { + const value = selection ? selection.name : '' + if (isCreateNewOption && value === '-create-') { + onCreateNew() + } else { + setInternalValue(value) + onSelect(value) + } } }} PopperComponent={StyledPopper} loading={loading} - renderInput={(params) => ( - - {loading ? : null} - {params.InputProps.endAdornment} - - ) - }} - sx={{ height: '100%', '& .MuiInputBase-root': { height: '100%' } }} - /> - )} + renderInput={(params) => { + const matchingOptions = multiple + ? findMatchingOptions(options, internalValue) + : [findMatchingOptions(options, internalValue)].filter(Boolean) + return ( + + {matchingOptions.map((option) => + option?.imageSrc ? ( + + ) : null + )} + {params.InputProps.startAdornment} + + ), + endAdornment: ( + + {loading ? : null} + {params.InputProps.endAdornment} + + ) + }} + /> + ) + }} renderOption={(props, option) => ( - + + {option.imageSrc && ( + {option.description} + )}
{option.label} {option.description && ( @@ -181,5 +294,6 @@ AsyncDropdown.propTypes = { freeSolo: PropTypes.bool, credentialNames: PropTypes.array, disableClearable: PropTypes.bool, - isCreateNewOption: PropTypes.bool + isCreateNewOption: PropTypes.bool, + multiple: PropTypes.bool } diff --git a/packages/ui/src/ui-component/dropdown/Dropdown.jsx b/packages/ui/src/ui-component/dropdown/Dropdown.jsx index 333b3cca1..c18f52c94 100644 --- a/packages/ui/src/ui-component/dropdown/Dropdown.jsx +++ b/packages/ui/src/ui-component/dropdown/Dropdown.jsx @@ -3,7 +3,7 @@ import { useSelector } from 'react-redux' import { Popper, FormControl, TextField, Box, Typography } from '@mui/material' import Autocomplete, { autocompleteClasses } from '@mui/material/Autocomplete' -import { styled } from '@mui/material/styles' +import { useTheme, styled } from '@mui/material/styles' import PropTypes from 'prop-types' const StyledPopper = styled(Popper)({ @@ -23,6 +23,7 @@ export const Dropdown = ({ name, value, loading, options, onSelect, disabled = f const findMatchingOptions = (options = [], value) => options.find((option) => option.name === value) const getDefaultOptionValue = () => '' let [internalValue, setInternalValue] = useState(value ?? 'choose an option') + const theme = useTheme() return ( @@ -49,7 +50,12 @@ export const Dropdown = ({ name, value, loading, options, onSelect, disabled = f value={internalValue} sx={{ height: '100%', - '& .MuiInputBase-root': { height: '100%' } + '& .MuiInputBase-root': { + height: '100%', + '& fieldset': { + borderColor: theme.palette.grey[900] + 25 + } + } }} InputProps={{ ...params.InputProps, diff --git a/packages/ui/src/ui-component/dropdown/MultiDropdown.jsx b/packages/ui/src/ui-component/dropdown/MultiDropdown.jsx index b1c125b35..e8a0e3ac1 100644 --- a/packages/ui/src/ui-component/dropdown/MultiDropdown.jsx +++ b/packages/ui/src/ui-component/dropdown/MultiDropdown.jsx @@ -3,7 +3,7 @@ import { useSelector } from 'react-redux' import { Popper, FormControl, TextField, Box, Typography } from '@mui/material' import Autocomplete, { autocompleteClasses } from '@mui/material/Autocomplete' -import { styled } from '@mui/material/styles' +import { useTheme, styled } from '@mui/material/styles' import PropTypes from 'prop-types' const StyledPopper = styled(Popper)({ @@ -28,6 +28,7 @@ export const MultiDropdown = ({ name, value, options, onSelect, formControlSx = } const getDefaultOptionValue = () => [] let [internalValue, setInternalValue] = useState(value ?? []) + const theme = useTheme() return ( @@ -54,7 +55,19 @@ export const MultiDropdown = ({ name, value, options, onSelect, formControlSx = }} PopperComponent={StyledPopper} renderInput={(params) => ( - + )} renderOption={(props, option) => ( diff --git a/packages/ui/src/ui-component/editor/CodeEditor.jsx b/packages/ui/src/ui-component/editor/CodeEditor.jsx index 13cf2b466..98f9ec9d1 100644 --- a/packages/ui/src/ui-component/editor/CodeEditor.jsx +++ b/packages/ui/src/ui-component/editor/CodeEditor.jsx @@ -31,7 +31,7 @@ export const CodeEditor = ({ '.cm-content': lang !== 'js' ? { - fontFamily: 'Roboto, sans-serif', + fontFamily: `'Inter', 'Roboto', 'Arial', sans-serif`, fontSize: '0.95rem', letterSpacing: '0em', fontWeight: 400, @@ -47,11 +47,7 @@ export const CodeEditor = ({ value={value} height={height ?? 'calc(100vh - 220px)'} theme={theme === 'dark' ? (lang === 'js' ? vscodeDark : sublime) : 'none'} - extensions={ - lang === 'js' - ? [javascript({ jsx: true }), EditorView.lineWrapping, customStyle] - : [json(), EditorView.lineWrapping, customStyle] - } + extensions={[lang === 'js' ? javascript({ jsx: true }) : json(), EditorView.lineWrapping, customStyle]} onChange={onValueChange} readOnly={disabled} editable={!disabled} diff --git a/packages/ui/src/ui-component/extended/AnalyseFlow.jsx b/packages/ui/src/ui-component/extended/AnalyseFlow.jsx index d9001368e..de162e51a 100644 --- a/packages/ui/src/ui-component/extended/AnalyseFlow.jsx +++ b/packages/ui/src/ui-component/extended/AnalyseFlow.jsx @@ -30,6 +30,7 @@ import lunarySVG from '@/assets/images/lunary.svg' import langwatchSVG from '@/assets/images/langwatch.svg' import arizePNG from '@/assets/images/arize.png' import phoenixPNG from '@/assets/images/phoenix.png' +import opikPNG from '@/assets/images/opik.png' // store import useNotifier from '@/utils/useNotifier' @@ -188,6 +189,33 @@ const analyticProviders = [ optional: true } ] + }, + { + label: 'Opik', + name: 'opik', + icon: opikPNG, + url: 'https://www.comet.com/opik', + inputs: [ + { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['opikApi'] + }, + { + label: 'Project Name', + name: 'opikProjectName', + type: 'string', + description: 'Name of your Opik project', + placeholder: 'default' + }, + { + label: 'On/Off', + name: 'status', + type: 'boolean', + optional: true + } + ] } ] diff --git a/packages/ui/src/ui-component/extended/FileUpload.jsx b/packages/ui/src/ui-component/extended/FileUpload.jsx index d6fc8f02a..02063775b 100644 --- a/packages/ui/src/ui-component/extended/FileUpload.jsx +++ b/packages/ui/src/ui-component/extended/FileUpload.jsx @@ -5,7 +5,7 @@ import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackba import parser from 'html-react-parser' // material-ui -import { Button, Box } from '@mui/material' +import { Button, Box, Typography, FormControl, RadioGroup, FormControlLabel, Radio } from '@mui/material' import { IconX, IconBulb } from '@tabler/icons-react' // Project import @@ -22,6 +22,20 @@ const message = `Uploaded files will be parsed as strings and sent to the LLM. I
Refer
docs for more details.` +const availableFileTypes = [ + { name: 'CSS', ext: 'text/css' }, + { name: 'CSV', ext: 'text/csv' }, + { name: 'HTML', ext: 'text/html' }, + { name: 'JSON', ext: 'application/json' }, + { name: 'Markdown', ext: 'text/markdown' }, + { name: 'PDF', ext: 'application/pdf' }, + { name: 'SQL', ext: 'application/sql' }, + { name: 'Text File', ext: 'text/plain' }, + { name: 'XML', ext: 'application/xml' }, + { name: 'DOC', ext: 'application/msword' }, + { name: 'DOCX', ext: 'application/vnd.openxmlformats-officedocument.wordprocessingml.document' } +] + const FileUpload = ({ dialogProps }) => { const dispatch = useDispatch() @@ -31,16 +45,41 @@ const FileUpload = ({ dialogProps }) => { const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) const [fullFileUpload, setFullFileUpload] = useState(false) + const [allowedFileTypes, setAllowedFileTypes] = useState([]) const [chatbotConfig, setChatbotConfig] = useState({}) + const [pdfUsage, setPdfUsage] = useState('perPage') + const [pdfLegacyBuild, setPdfLegacyBuild] = useState(false) const handleChange = (value) => { setFullFileUpload(value) } + const handleAllowedFileTypesChange = (event) => { + const { checked, value } = event.target + if (checked) { + setAllowedFileTypes((prev) => [...prev, value]) + } else { + setAllowedFileTypes((prev) => prev.filter((item) => item !== value)) + } + } + + const handlePdfUsageChange = (event) => { + setPdfUsage(event.target.value) + } + + const handleLegacyBuildChange = (value) => { + setPdfLegacyBuild(value) + } + const onSave = async () => { try { const value = { - status: fullFileUpload + status: fullFileUpload, + allowedUploadFileTypes: allowedFileTypes.join(','), + pdfFile: { + usage: pdfUsage, + legacyBuild: pdfLegacyBuild + } } chatbotConfig.fullFileUpload = value @@ -82,6 +121,9 @@ const FileUpload = ({ dialogProps }) => { } useEffect(() => { + /* backward compatibility - by default, allow all */ + const allowedFileTypes = availableFileTypes.map((fileType) => fileType.ext) + setAllowedFileTypes(allowedFileTypes) if (dialogProps.chatflow) { if (dialogProps.chatflow.chatbotConfig) { try { @@ -90,6 +132,18 @@ const FileUpload = ({ dialogProps }) => { if (chatbotConfig.fullFileUpload) { setFullFileUpload(chatbotConfig.fullFileUpload.status) } + if (chatbotConfig.fullFileUpload?.allowedUploadFileTypes) { + const allowedFileTypes = chatbotConfig.fullFileUpload.allowedUploadFileTypes.split(',') + setAllowedFileTypes(allowedFileTypes) + } + if (chatbotConfig.fullFileUpload?.pdfFile) { + if (chatbotConfig.fullFileUpload.pdfFile.usage) { + setPdfUsage(chatbotConfig.fullFileUpload.pdfFile.usage) + } + if (chatbotConfig.fullFileUpload.pdfFile.legacyBuild !== undefined) { + setPdfLegacyBuild(chatbotConfig.fullFileUpload.pdfFile.legacyBuild) + } + } } catch (e) { setChatbotConfig({}) } @@ -135,8 +189,64 @@ const FileUpload = ({ dialogProps }) => {
- {/* TODO: Allow selection of allowed file types*/} - + + Allow Uploads of Type +
+ {availableFileTypes.map((fileType) => ( +
+ + +
+ ))} +
+ + + PDF Usage + + + } label='One document per page' /> + } label='One document per file' /> + + + + + + + + + Save diff --git a/packages/ui/src/ui-component/extended/FollowUpPrompts.jsx b/packages/ui/src/ui-component/extended/FollowUpPrompts.jsx index 44da9bc61..680e4f11c 100644 --- a/packages/ui/src/ui-component/extended/FollowUpPrompts.jsx +++ b/packages/ui/src/ui-component/extended/FollowUpPrompts.jsx @@ -2,6 +2,7 @@ import PropTypes from 'prop-types' import { Box, Button, FormControl, ListItem, ListItemAvatar, ListItemText, MenuItem, Select, Typography } from '@mui/material' import { useEffect, useState } from 'react' import { useDispatch } from 'react-redux' +import { useTheme } from '@mui/material/styles' // Project Imports import { StyledButton } from '@/ui-component/button/StyledButton' @@ -269,6 +270,14 @@ const followUpPromptsOptions = { name: FollowUpPromptProviders.OLLAMA, icon: ollamaIcon, inputs: [ + { + label: 'Base URL', + name: 'baseUrl', + type: 'string', + placeholder: 'http://127.0.0.1:11434', + description: 'Base URL of your Ollama instance', + default: 'http://127.0.0.1:11434' + }, { label: 'Model Name', name: 'modelName', @@ -302,6 +311,7 @@ const FollowUpPrompts = ({ dialogProps }) => { const dispatch = useDispatch() useNotifier() + const theme = useTheme() const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) @@ -465,7 +475,16 @@ const FollowUpPrompts = ({ dialogProps }) => { <> Providers - {Object.values(followUpPromptsOptions).map((provider) => ( {provider.label} diff --git a/packages/ui/src/ui-component/extended/Logo.jsx b/packages/ui/src/ui-component/extended/Logo.jsx index c7ea54fda..677f03687 100644 --- a/packages/ui/src/ui-component/extended/Logo.jsx +++ b/packages/ui/src/ui-component/extended/Logo.jsx @@ -1,5 +1,5 @@ -import logo from '@/assets/images/flowise_logo.png' -import logoDark from '@/assets/images/flowise_logo_dark.png' +import logo from '@/assets/images/flowise_white.svg' +import logoDark from '@/assets/images/flowise_dark.svg' import { useSelector } from 'react-redux' @@ -9,7 +9,7 @@ const Logo = () => { const customization = useSelector((state) => state.customization) return ( -
+
{ onToggle(row, enabled) } + const renderCellContent = (key, row) => { + if (key === 'enabled') { + return handleChange(enabled, row)} value={row.enabled} /> + } else if (key === 'type' && row.schema) { + // If there's schema information, add a tooltip + const schemaContent = + '[
' + + row.schema + .map( + (item) => + `  ${JSON.stringify( + { + [item.name]: item.type + }, + null, + 2 + )}` + ) + .join(',
') + + '
]' + + return ( + + {row[key]} + Schema:
${schemaContent}
`} /> + + ) + } else { + return row[key] + } + } + return ( @@ -57,16 +89,8 @@ const OverrideConfigTable = ({ columns, onToggle, rows, sx }) => { {rows.map((row, index) => ( {Object.keys(row).map((key, index) => { - if (key !== 'id') { - return ( - - {key === 'enabled' ? ( - handleChange(enabled, row)} value={row.enabled} /> - ) : ( - row[key] - )} - - ) + if (key !== 'id' && key !== 'schema') { + return {renderCellContent(key, row)} } })} @@ -116,25 +140,27 @@ const OverrideConfig = ({ dialogProps }) => { } const formatObj = () => { - const obj = { - overrideConfig: { status: overrideConfigStatus } + let apiConfig = JSON.parse(dialogProps.chatflow.apiConfig) + if (apiConfig === null || apiConfig === undefined) { + apiConfig = {} } + let overrideConfig = { status: overrideConfigStatus } if (overrideConfigStatus) { - // loop through each key in nodeOverrides and filter out the enabled ones const filteredNodeOverrides = {} for (const key in nodeOverrides) { filteredNodeOverrides[key] = nodeOverrides[key].filter((node) => node.enabled) } - obj.overrideConfig = { - ...obj.overrideConfig, + overrideConfig = { + ...overrideConfig, nodes: filteredNodeOverrides, variables: variableOverrides.filter((node) => node.enabled) } } + apiConfig.overrideConfig = overrideConfig - return obj + return apiConfig } const onNodeOverrideToggle = (node, property, status) => { @@ -167,7 +193,7 @@ const OverrideConfig = ({ dialogProps }) => { const seenNodes = new Set() nodes.forEach((item) => { - const { node, nodeId, label, name, type } = item + const { node, nodeId, label, name, type, schema } = item seenNodes.add(node) if (!result[node]) { @@ -184,7 +210,7 @@ const OverrideConfig = ({ dialogProps }) => { if (!result[node].nodeIds.includes(nodeId)) result[node].nodeIds.push(nodeId) - const param = { label, name, type } + const param = { label, name, type, schema } if (!result[node].params.some((existingParam) => JSON.stringify(existingParam) === JSON.stringify(param))) { result[node].params.push(param) @@ -206,7 +232,7 @@ const OverrideConfig = ({ dialogProps }) => { if (!overrideConfigStatus) { setNodeOverrides(newNodeOverrides) } else { - const updatedNodeOverrides = { ...nodeOverrides } + const updatedNodeOverrides = { ...newNodeOverrides } Object.keys(updatedNodeOverrides).forEach((node) => { if (!seenNodes.has(node)) { @@ -393,7 +419,9 @@ const OverrideConfig = ({ dialogProps }) => { rows={nodeOverrides[nodeLabel]} columns={ nodeOverrides[nodeLabel].length > 0 - ? Object.keys(nodeOverrides[nodeLabel][0]) + ? Object.keys(nodeOverrides[nodeLabel][0]).filter( + (key) => key !== 'schema' && key !== 'id' + ) : [] } onToggle={(property, status) => diff --git a/packages/ui/src/ui-component/extended/PostProcessing.jsx b/packages/ui/src/ui-component/extended/PostProcessing.jsx new file mode 100644 index 000000000..fd56a3eb6 --- /dev/null +++ b/packages/ui/src/ui-component/extended/PostProcessing.jsx @@ -0,0 +1,228 @@ +import { useDispatch } from 'react-redux' +import { useState, useEffect } from 'react' +import PropTypes from 'prop-types' +import { useSelector } from 'react-redux' + +// material-ui +import { IconButton, Button, Box, Typography } from '@mui/material' +import { IconArrowsMaximize, IconBulb, IconX } from '@tabler/icons-react' +import { useTheme } from '@mui/material/styles' + +// Project import +import { StyledButton } from '@/ui-component/button/StyledButton' +import { SwitchInput } from '@/ui-component/switch/Switch' +import { CodeEditor } from '@/ui-component/editor/CodeEditor' +import ExpandTextDialog from '@/ui-component/dialog/ExpandTextDialog' + +// store +import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackbarAction, SET_CHATFLOW } from '@/store/actions' +import useNotifier from '@/utils/useNotifier' + +// API +import chatflowsApi from '@/api/chatflows' + +const sampleFunction = `return $flow.rawOutput + " This is a post processed response!";` + +const PostProcessing = ({ dialogProps }) => { + const dispatch = useDispatch() + + useNotifier() + const theme = useTheme() + const customization = useSelector((state) => state.customization) + + const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) + const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) + + const [postProcessingEnabled, setPostProcessingEnabled] = useState(false) + const [postProcessingFunction, setPostProcessingFunction] = useState('') + const [chatbotConfig, setChatbotConfig] = useState({}) + const [showExpandDialog, setShowExpandDialog] = useState(false) + const [expandDialogProps, setExpandDialogProps] = useState({}) + + const handleChange = (value) => { + setPostProcessingEnabled(value) + } + + const onExpandDialogClicked = (value) => { + const dialogProps = { + value, + inputParam: { + label: 'Post Processing Function', + name: 'postProcessingFunction', + type: 'code', + placeholder: sampleFunction, + hideCodeExecute: true + }, + languageType: 'js', + confirmButtonName: 'Save', + cancelButtonName: 'Cancel' + } + setExpandDialogProps(dialogProps) + setShowExpandDialog(true) + } + + const onSave = async () => { + try { + let value = { + postProcessing: { + enabled: postProcessingEnabled, + customFunction: JSON.stringify(postProcessingFunction) + } + } + chatbotConfig.postProcessing = value.postProcessing + const saveResp = await chatflowsApi.updateChatflow(dialogProps.chatflow.id, { + chatbotConfig: JSON.stringify(chatbotConfig) + }) + if (saveResp.data) { + enqueueSnackbar({ + message: 'Post Processing Settings Saved', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + dispatch({ type: SET_CHATFLOW, chatflow: saveResp.data }) + } + } catch (error) { + enqueueSnackbar({ + message: `Failed to save Post Processing Settings: ${ + typeof error.response.data === 'object' ? error.response.data.message : error.response.data + }`, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + } + + useEffect(() => { + if (dialogProps.chatflow && dialogProps.chatflow.chatbotConfig) { + let chatbotConfig = JSON.parse(dialogProps.chatflow.chatbotConfig) + setChatbotConfig(chatbotConfig || {}) + if (chatbotConfig.postProcessing) { + setPostProcessingEnabled(chatbotConfig.postProcessing.enabled) + if (chatbotConfig.postProcessing.customFunction) { + setPostProcessingFunction(JSON.parse(chatbotConfig.postProcessing.customFunction)) + } + } + } + + return () => {} + }, [dialogProps]) + + return ( + <> + + + + + + JS Function + +
+ onExpandDialogClicked(postProcessingFunction)} + > + + + + +
+ setPostProcessingFunction(code)} + basicSetup={{ highlightActiveLine: false, highlightActiveLineGutter: false }} + /> +
+ +
+
+ + + The following variables are available to use in the custom function:{' '} +
$flow.rawOutput, $flow.input, $flow.chatflowId, $flow.sessionId, $flow.chatId
+
+
+
+ + Save + + setShowExpandDialog(false)} + onConfirm={(newValue) => { + setPostProcessingFunction(newValue) + setShowExpandDialog(false) + }} + > + + ) +} + +PostProcessing.propTypes = { + dialogProps: PropTypes.object +} + +export default PostProcessing diff --git a/packages/ui/src/ui-component/extended/RateLimit.jsx b/packages/ui/src/ui-component/extended/RateLimit.jsx index c57b20e79..1b3ca3b01 100644 --- a/packages/ui/src/ui-component/extended/RateLimit.jsx +++ b/packages/ui/src/ui-component/extended/RateLimit.jsx @@ -19,7 +19,7 @@ import chatflowsApi from '@/api/chatflows' // utils import useNotifier from '@/utils/useNotifier' -const RateLimit = () => { +const RateLimit = ({ dialogProps }) => { const dispatch = useDispatch() const chatflow = useSelector((state) => state.canvas.chatflow) const chatflowid = chatflow.id @@ -36,9 +36,11 @@ const RateLimit = () => { const [limitMsg, setLimitMsg] = useState(apiConfig?.rateLimit?.limitMsg ?? '') const formatObj = () => { - const obj = { - rateLimit: { status: rateLimitStatus } + let apiConfig = JSON.parse(dialogProps.chatflow.apiConfig) + if (apiConfig === null || apiConfig === undefined) { + apiConfig = {} } + let obj = { status: rateLimitStatus } if (rateLimitStatus) { const rateLimitValuesBoolean = [!limitMax, !limitDuration, !limitMsg] @@ -46,16 +48,16 @@ const RateLimit = () => { if (rateLimitFilledValues.length >= 1 && rateLimitFilledValues.length <= 2) { throw new Error('Need to fill all rate limit input fields') } else if (rateLimitFilledValues.length === 3) { - obj.rateLimit = { - ...obj.rateLimit, + obj = { + ...obj, limitMax, limitDuration, limitMsg } } } - - return obj + apiConfig.rateLimit = obj + return apiConfig } const handleChange = (value) => { @@ -173,7 +175,8 @@ const RateLimit = () => { } RateLimit.propTypes = { - isSessionMemory: PropTypes.bool + isSessionMemory: PropTypes.bool, + dialogProps: PropTypes.object } export default RateLimit diff --git a/packages/ui/src/ui-component/extended/Security.jsx b/packages/ui/src/ui-component/extended/Security.jsx index b46847fcb..57fff04ba 100644 --- a/packages/ui/src/ui-component/extended/Security.jsx +++ b/packages/ui/src/ui-component/extended/Security.jsx @@ -12,7 +12,7 @@ const Security = ({ dialogProps }) => { return ( } spacing={4}> - + diff --git a/packages/ui/src/ui-component/extended/SpeechToText.jsx b/packages/ui/src/ui-component/extended/SpeechToText.jsx index 23a5a1e9a..d119e5f89 100644 --- a/packages/ui/src/ui-component/extended/SpeechToText.jsx +++ b/packages/ui/src/ui-component/extended/SpeechToText.jsx @@ -6,6 +6,7 @@ import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackba // material-ui import { Typography, Box, Button, FormControl, ListItem, ListItemAvatar, ListItemText, MenuItem, Select } from '@mui/material' import { IconX } from '@tabler/icons-react' +import { useTheme } from '@mui/material/styles' // Project import import CredentialInputHandler from '@/views/canvas/CredentialInputHandler' @@ -242,6 +243,7 @@ const SpeechToText = ({ dialogProps }) => { const dispatch = useDispatch() useNotifier() + const theme = useTheme() const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) @@ -349,7 +351,16 @@ const SpeechToText = ({ dialogProps }) => { Providers - None {Object.values(speechToTextProviders).map((provider) => ( diff --git a/packages/ui/src/ui-component/input/Input.jsx b/packages/ui/src/ui-component/input/Input.jsx index 79f515ddd..7571726f9 100644 --- a/packages/ui/src/ui-component/input/Input.jsx +++ b/packages/ui/src/ui-component/input/Input.jsx @@ -1,10 +1,12 @@ import { useState, useEffect, useRef } from 'react' import PropTypes from 'prop-types' import { FormControl, OutlinedInput, InputBase, Popover } from '@mui/material' +import { useTheme } from '@mui/material/styles' import SelectVariable from '@/ui-component/json/SelectVariable' import { getAvailableNodesForVariable } from '@/utils/genericHelper' export const Input = ({ inputParam, value, nodes, edges, nodeId, onChange, disabled = false }) => { + const theme = useTheme() const [myValue, setMyValue] = useState(value ?? '') const [anchorEl, setAnchorEl] = useState(null) const [availableNodesForVariable, setAvailableNodesForVariable] = useState([]) @@ -71,7 +73,7 @@ export const Input = ({ inputParam, value, nodes, edges, nodeId, onChange, disab style: { border: 'none', background: 'none', - color: '#212121' + color: 'inherit' } }} sx={{ @@ -108,6 +110,11 @@ export const Input = ({ inputParam, value, nodes, edges, nodeId, onChange, disab height: inputParam.rows ? '90px' : 'inherit' } }} + sx={{ + '& .MuiOutlinedInput-notchedOutline': { + borderColor: theme.palette.grey[900] + 25 + } + }} /> )} diff --git a/packages/ui/src/ui-component/input/RichInput.jsx b/packages/ui/src/ui-component/input/RichInput.jsx new file mode 100644 index 000000000..50c7e7cf3 --- /dev/null +++ b/packages/ui/src/ui-component/input/RichInput.jsx @@ -0,0 +1,137 @@ +import { useState, useEffect } from 'react' +import PropTypes from 'prop-types' +import { useEditor, EditorContent } from '@tiptap/react' +import Placeholder from '@tiptap/extension-placeholder' +import { mergeAttributes } from '@tiptap/core' +import StarterKit from '@tiptap/starter-kit' +import { styled } from '@mui/material/styles' +import { Box } from '@mui/material' +import Mention from '@tiptap/extension-mention' +import { suggestionOptions } from './suggestionOption' +import { getAvailableNodesForVariable } from '@/utils/genericHelper' + +// define your extension array +const extensions = (availableNodesForVariable, availableState, acceptNodeOutputAsVariable, nodes, nodeData, isNodeInsideInteration) => [ + StarterKit, + Mention.configure({ + HTMLAttributes: { + class: 'variable' + }, + renderHTML({ options, node }) { + return [ + 'span', + mergeAttributes(this.HTMLAttributes, options.HTMLAttributes), + `${options.suggestion.char} ${node.attrs.label ?? node.attrs.id} }}` + ] + }, + suggestion: suggestionOptions( + availableNodesForVariable, + availableState, + acceptNodeOutputAsVariable, + nodes, + nodeData, + isNodeInsideInteration + ), + deleteTriggerWithBackspace: true + }) +] + +// Add styled component for editor wrapper +const StyledEditorContent = styled(EditorContent)(({ theme, rows }) => ({ + '& .ProseMirror': { + padding: '0px 14px', + height: rows ? `${rows * 1.4375}rem` : '2.4rem', + overflowY: rows ? 'auto' : 'hidden', + overflowX: rows ? 'auto' : 'hidden', + lineHeight: rows ? '1.4375em' : '0.875em', + fontWeight: 500, + color: theme.palette.grey[900], + border: `1px solid ${theme.palette.grey[900] + 25}`, + borderRadius: '10px', + backgroundColor: theme.palette.textBackground.main, + boxSizing: 'border-box', + whiteSpace: rows ? 'pre-wrap' : 'nowrap', + '&:hover': { + borderColor: theme.palette.text.primary, + cursor: 'text' + }, + '&:focus': { + borderColor: theme.palette.primary.main, + outline: 'none' + }, + '&[disabled]': { + backgroundColor: theme.palette.action.disabledBackground, + color: theme.palette.action.disabled + }, + // Placeholder for first paragraph when editor is empty + '& p.is-editor-empty:first-of-type::before': { + content: 'attr(data-placeholder)', + float: 'left', + color: theme.palette.text.primary, + opacity: 0.4, + pointerEvents: 'none', + height: 0 + } + } +})) + +export const RichInput = ({ inputParam, value, nodes, edges, nodeId, onChange, disabled = false }) => { + const [availableNodesForVariable, setAvailableNodesForVariable] = useState([]) + const [availableState, setAvailableState] = useState([]) + const [nodeData, setNodeData] = useState({}) + const [isNodeInsideInteration, setIsNodeInsideInteration] = useState(false) + + useEffect(() => { + if (!disabled && nodes && edges && nodeId && inputParam) { + const nodesForVariable = inputParam?.acceptVariable ? getAvailableNodesForVariable(nodes, edges, nodeId, inputParam.id) : [] + setAvailableNodesForVariable(nodesForVariable) + + const startAgentflowNode = nodes.find((node) => node.data.name === 'startAgentflow') + const state = startAgentflowNode?.data?.inputs?.startState + setAvailableState(state) + + const agentflowNode = nodes.find((node) => node.data.id === nodeId) + setNodeData(agentflowNode?.data) + + setIsNodeInsideInteration(nodes.find((node) => node.data.id === nodeId)?.extent === 'parent') + } + }, [disabled, inputParam, nodes, edges, nodeId]) + + const editor = useEditor( + { + extensions: [ + ...extensions( + availableNodesForVariable, + availableState, + inputParam?.acceptNodeOutputAsVariable, + nodes, + nodeData, + isNodeInsideInteration + ), + Placeholder.configure({ placeholder: inputParam?.placeholder }) + ], + content: value, + onUpdate: ({ editor }) => { + onChange(editor.getHTML()) + }, + editable: !disabled + }, + [availableNodesForVariable] + ) + + return ( + + + + ) +} + +RichInput.propTypes = { + inputParam: PropTypes.object, + value: PropTypes.oneOfType([PropTypes.string, PropTypes.number]), + onChange: PropTypes.func, + disabled: PropTypes.bool, + nodes: PropTypes.array, + edges: PropTypes.array, + nodeId: PropTypes.string +} diff --git a/packages/ui/src/ui-component/input/SuggestionList.jsx b/packages/ui/src/ui-component/input/SuggestionList.jsx new file mode 100644 index 000000000..cae004af5 --- /dev/null +++ b/packages/ui/src/ui-component/input/SuggestionList.jsx @@ -0,0 +1,192 @@ +import { List, ListItem, ListItemButton, Paper, Typography, Divider } from '@mui/material' +import { forwardRef, useEffect, useImperativeHandle, useState } from 'react' +import { useSelector } from 'react-redux' +import { useTheme } from '@mui/material/styles' +import PropTypes from 'prop-types' + +const SuggestionList = forwardRef((props, ref) => { + const [selectedIndex, setSelectedIndex] = useState(0) + const customization = useSelector((state) => state.customization) + const theme = useTheme() + + useEffect(() => { + // Configure tippy to auto-adjust placement + const tippyOptions = { + placement: 'bottom-start', + flip: true, + flipOnUpdate: true, + // Optional: you can add an offset to give some spacing + offset: [0, 8] + } + + // Update tippy instance with new options + if (props.tippyInstance) { + Object.assign(props.tippyInstance, tippyOptions) + } + }, [props.tippyInstance]) + + const selectItem = (index) => { + if (index >= props.items.length) { + // Make sure we actually have enough items to select the given index. For + // instance, if a user presses "Enter" when there are no options, the index will + // be 0 but there won't be any items, so just ignore the callback here + return + } + + const suggestion = props.items[index] + + // Set all of the attributes of our Mention node based on the suggestion + // data. The fields of `suggestion` will depend on whatever data you + // return from your `items` function in your "suggestion" options handler. + // Our suggestion handler returns `MentionSuggestion`s (which we've + // indicated via SuggestionProps). We are passing an + // object of the `MentionNodeAttrs` shape when calling `command` (utilized + // by the Mention extension to create a Mention Node). + const mentionItem = { + id: suggestion.id, + label: suggestion.mentionLabel + } + // @ts-expect-error there is currently a bug in the Tiptap SuggestionProps + // type where if you specify the suggestion type (like + // `SuggestionProps`), it will incorrectly require that + // type variable for `command`'s argument as well (whereas instead the + // type of that argument should be the Mention Node attributes). This + // should be fixed once https://github.com/ueberdosis/tiptap/pull/4136 is + // merged and we can add a separate type arg to `SuggestionProps` to + // specify the type of the commanded selected item. + props.command(mentionItem) + } + + const upHandler = () => { + setSelectedIndex((selectedIndex + props.items.length - 1) % props.items.length) + } + + const downHandler = () => { + setSelectedIndex((selectedIndex + 1) % props.items.length) + } + + const enterHandler = () => { + selectItem(selectedIndex) + } + + useEffect(() => setSelectedIndex(0), [props.items]) + + useImperativeHandle(ref, () => ({ + onKeyDown: ({ event }) => { + if (event.key === 'ArrowUp') { + upHandler() + return true + } + + if (event.key === 'ArrowDown') { + downHandler() + return true + } + + if (event.key === 'Enter') { + enterHandler() + return true + } + + return false + } + })) + + // Group items by category + const groupedItems = props.items.reduce((acc, item) => { + const category = item.category || 'Other' + if (!acc[category]) { + acc[category] = [] + } + acc[category].push(item) + return acc + }, {}) + + return props.items.length > 0 ? ( + + + {Object.entries(groupedItems).map(([category, items], categoryIndex) => ( +
+ {/* Add divider before each category except the first one */} + {categoryIndex > 0 && } + + {/* Category header */} + + + {category} + + + + {/* Category items */} + {items.map((item) => { + const itemIndex = props.items.findIndex((i) => i.id === item.id) + return ( + + selectItem(itemIndex)} + sx={{ + display: 'flex', + flexDirection: 'column', + alignItems: 'flex-start' + }} + > + + {item.label || item.mentionLabel} + + {item.description && ( + + {item.description} + + )} + + + ) + })} +
+ ))} +
+
+ ) : null +}) + +SuggestionList.displayName = 'SuggestionList' + +// Add PropTypes validation +SuggestionList.propTypes = { + items: PropTypes.arrayOf( + PropTypes.shape({ + id: PropTypes.string.isRequired, + mentionLabel: PropTypes.string.isRequired, + label: PropTypes.string, + description: PropTypes.string, + category: PropTypes.string + }) + ).isRequired, + command: PropTypes.func.isRequired, + tippyInstance: PropTypes.object +} + +export default SuggestionList diff --git a/packages/ui/src/ui-component/input/suggestionOption.js b/packages/ui/src/ui-component/input/suggestionOption.js new file mode 100644 index 000000000..229870e2b --- /dev/null +++ b/packages/ui/src/ui-component/input/suggestionOption.js @@ -0,0 +1,223 @@ +import { ReactRenderer } from '@tiptap/react' +import tippy from 'tippy.js' +import SuggestionList from './SuggestionList' +import variablesApi from '@/api/variables' + +/** + * Workaround for the current typing incompatibility between Tippy.js and Tiptap + * Suggestion utility. + * + * @see https://github.com/ueberdosis/tiptap/issues/2795#issuecomment-1160623792 + * + * Adopted from + * https://github.com/Doist/typist/blob/a1726a6be089e3e1452def641dfcfc622ac3e942/stories/typist-editor/constants/suggestions.ts#L169-L186 + */ +const DOM_RECT_FALLBACK = { + bottom: 0, + height: 0, + left: 0, + right: 0, + top: 0, + width: 0, + x: 0, + y: 0, + toJSON() { + return {} + } +} + +// Cache for storing variables +let cachedVariables = [] + +// Function to fetch variables +const fetchVariables = async () => { + try { + const response = await variablesApi.getAllVariables() + cachedVariables = response.data || [] + return cachedVariables + } catch (error) { + console.error('Failed to fetch variables:', error) + return [] + } +} + +export const suggestionOptions = ( + availableNodesForVariable, + availableState, + acceptNodeOutputAsVariable, + nodes, + nodeData, + isNodeInsideInteration +) => ({ + char: '{{', + items: async ({ query }) => { + const defaultItems = [ + { id: 'question', mentionLabel: 'question', description: "User's question from chatbox", category: 'Chat Context' }, + { + id: 'chat_history', + mentionLabel: 'chat_history', + description: 'Past conversation history between user and AI', + category: 'Chat Context' + }, + { + id: 'runtime_messages_length', + mentionLabel: 'runtime_messages_length', + description: 'Total messsages between LLM and Agent', + category: 'Chat Context' + }, + { + id: 'file_attachment', + mentionLabel: 'file_attachment', + description: 'Files uploaded from the chat', + category: 'Chat Context' + }, + { id: '$flow.sessionId', mentionLabel: '$flow.sessionId', description: 'Current session ID', category: 'Flow Variables' }, + { id: '$flow.chatId', mentionLabel: '$flow.chatId', description: 'Current chat ID', category: 'Flow Variables' }, + { id: '$flow.chatflowId', mentionLabel: '$flow.chatflowId', description: 'Current chatflow ID', category: 'Flow Variables' } + ] + + const stateItems = (availableState || []).map((state) => ({ + id: `$flow.state.${state.key}`, + mentionLabel: `$flow.state.${state.key}`, + category: 'Flow State' + })) + + if (isNodeInsideInteration) { + defaultItems.unshift({ + id: '$iteration', + mentionLabel: '$iteration', + description: 'Iteration item. For JSON, use dot notation: $iteration.name', + category: 'Iteration' + }) + } + + // Add output option if acceptNodeOutputAsVariable is true + if (acceptNodeOutputAsVariable) { + defaultItems.unshift({ + id: 'output', + mentionLabel: 'output', + description: 'Output from the current node', + category: 'Node Outputs' + }) + + const structuredOutputs = nodeData?.inputs?.llmStructuredOutput ?? [] + if (structuredOutputs && structuredOutputs.length > 0) { + structuredOutputs.forEach((item) => { + defaultItems.unshift({ + id: `output.${item.key}`, + mentionLabel: `output.${item.key}`, + description: `${item.description}`, + category: 'Node Outputs' + }) + }) + } + } + + // Fetch variables if cache is empty + if (cachedVariables.length === 0) { + await fetchVariables() + } + + const variableItems = cachedVariables.map((variable) => ({ + id: `$vars.${variable.name}`, + mentionLabel: `$vars.${variable.name}`, + description: `Variable: ${variable.value} (${variable.type})`, + category: 'Custom Variables' + })) + + const startAgentflowNode = nodes.find((node) => node.data.name === 'startAgentflow') + const formInputTypes = startAgentflowNode?.data?.inputs?.formInputTypes + + let formItems = [] + if (formInputTypes) { + formItems = (formInputTypes || []).map((input) => ({ + id: `$form.${input.name}`, + mentionLabel: `$form.${input.name}`, + description: `Form Input: ${input.label}`, + category: 'Form Inputs' + })) + } + + const nodeItems = (availableNodesForVariable || []).map((node) => { + const selectedOutputAnchor = node.data.outputAnchors?.[0]?.options?.find((ancr) => ancr.name === node.data.outputs['output']) + + return { + id: `${node.id}`, + mentionLabel: node.data.inputs.chainName ?? node.data.inputs.functionName ?? node.data.inputs.variableName ?? node.data.id, + description: + node.data.name === 'ifElseFunction' + ? node.data.description + : `${selectedOutputAnchor?.label ?? 'Output'} from ${node.data.label}`, + category: 'Node Outputs' + } + }) + + const allItems = [...defaultItems, ...formItems, ...nodeItems, ...stateItems, ...variableItems] + + return allItems.filter( + (item) => item.mentionLabel.toLowerCase().includes(query.toLowerCase()) || item.id.toLowerCase().includes(query.toLowerCase()) + ) + }, + render: () => { + let component + let popup + + return { + onStart: (props) => { + component = new ReactRenderer(SuggestionList, { + props, + editor: props.editor + }) + + popup = tippy('body', { + getReferenceClientRect: () => props.clientRect?.() ?? DOM_RECT_FALLBACK, + appendTo: () => document.body, + content: component.element, + showOnCreate: true, + interactive: true, + trigger: 'manual', + placement: 'bottom-start' + })[0] + }, + + onUpdate(props) { + component?.updateProps(props) + + popup?.setProps({ + getReferenceClientRect: () => props.clientRect?.() ?? DOM_RECT_FALLBACK + }) + }, + + onKeyDown(props) { + if (props.event.key === 'Escape') { + popup?.hide() + return true + } + + if (!component?.ref) { + return false + } + + return component.ref.onKeyDown(props) + }, + + onExit() { + popup?.destroy() + component?.destroy() + + // Remove references to the old popup and component upon destruction/exit. + // (This should prevent redundant calls to `popup.destroy()`, which Tippy + // warns in the console is a sign of a memory leak, as the `suggestion` + // plugin seems to call `onExit` both when a suggestion menu is closed after + // a user chooses an option, *and* when the editor itself is destroyed.) + popup = undefined + component = undefined + } + } + } +}) + +// Export function to refresh variables cache +export const refreshVariablesCache = () => { + return fetchVariables() +} diff --git a/packages/ui/src/ui-component/json/JsonEditor.jsx b/packages/ui/src/ui-component/json/JsonEditor.jsx index ada97cb4d..9da0ca4bc 100644 --- a/packages/ui/src/ui-component/json/JsonEditor.jsx +++ b/packages/ui/src/ui-component/json/JsonEditor.jsx @@ -69,7 +69,18 @@ export const JsonEditorInput = ({ /> )} {!disabled && ( -
+
e.stopPropagation()} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.stopPropagation() + } + }} + role='button' + aria-label='JSON Editor' + tabIndex={0} + key={JSON.stringify(myValue)} + > /g, '>') + + return json.replace( + // eslint-disable-next-line + /("(\\u[a-zA-Z0-9]{4}|\\[^u]|[^\\"])*"(\s*:)?|\b(true|false|null)\b|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?)/g, + function (match) { + let cls = 'number' + if (/^"/.test(match)) { + if (/:$/.test(match)) { + cls = 'key' + } else { + cls = 'string' + } + } else if (/true|false/.test(match)) { + cls = 'boolean' + } else if (/null/.test(match)) { + cls = 'null' + } + return '' + match + '' + } + ) +} + +export const JSONViewer = ({ data, maxHeight = '400px' }) => { + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const isDarkMode = customization.isDarkMode + + return ( + + +
+        
+    )
+}
+
+JSONViewer.propTypes = {
+    data: PropTypes.object,
+    maxHeight: PropTypes.string
+}
diff --git a/packages/ui/src/ui-component/markdown/CodeBlock.jsx b/packages/ui/src/ui-component/markdown/CodeBlock.jsx
index e6c503807..670a8ebe0 100644
--- a/packages/ui/src/ui-component/markdown/CodeBlock.jsx
+++ b/packages/ui/src/ui-component/markdown/CodeBlock.jsx
@@ -32,7 +32,7 @@ const programmingLanguages = {
     css: '.css'
 }
 
-export const CodeBlock = memo(({ language, chatflowid, isDialog, value }) => {
+export const CodeBlock = memo(({ language, chatflowid, isFullWidth, value }) => {
     const theme = useTheme()
     const [anchorEl, setAnchorEl] = useState(null)
     const openPopOver = Boolean(anchorEl)
@@ -76,7 +76,7 @@ export const CodeBlock = memo(({ language, chatflowid, isDialog, value }) => {
     }
 
     return (
-        
+
{language} @@ -118,6 +118,6 @@ CodeBlock.displayName = 'CodeBlock' CodeBlock.propTypes = { language: PropTypes.string, chatflowid: PropTypes.string, - isDialog: PropTypes.bool, + isFullWidth: PropTypes.bool, value: PropTypes.string } diff --git a/packages/ui/src/ui-component/markdown/MemoizedReactMarkdown.jsx b/packages/ui/src/ui-component/markdown/MemoizedReactMarkdown.jsx index 523585fd7..c0e3b2054 100644 --- a/packages/ui/src/ui-component/markdown/MemoizedReactMarkdown.jsx +++ b/packages/ui/src/ui-component/markdown/MemoizedReactMarkdown.jsx @@ -1,19 +1,165 @@ -import { memo } from 'react' +import { memo, useMemo } from 'react' import PropTypes from 'prop-types' import ReactMarkdown from 'react-markdown' import './Markdown.css' +import { CodeBlock } from '../markdown/CodeBlock' +import remarkGfm from 'remark-gfm' +import remarkMath from 'remark-math' +import rehypeMathjax from 'rehype-mathjax' +import rehypeRaw from 'rehype-raw' +/** + * Checks if text likely contains LaTeX math notation + * @param {string} text - Text to check for LaTeX math + * @param {Object[]} customPatterns - Additional regex patterns to check + * @returns {boolean} - Whether LaTeX math is likely present + */ +const containsLaTeX = (text, customPatterns = []) => { + if (!text || typeof text !== 'string') return false + + // Common LaTeX patterns - more permissive to catch edge cases + const defaultPatterns = [ + { regex: /\$\$.+?\$\$/s, name: 'Block math: $$...$$' }, + { regex: /\\\(.+?\\\)/s, name: 'Inline math: \\(...\\)' }, + { regex: /\\\[[\s\S]*?\\\]/, name: 'Display math: \\[...\\]' }, + { regex: /\\begin{(equation|align|gather|math|matrix|bmatrix|pmatrix|vmatrix|cases)}.+?\\end{\1}/s, name: 'Environment math' }, + { regex: /\$(.*?[\\{}_^].*?)\$/, name: 'Inline math with $' }, + { regex: /\\frac/, name: 'LaTeX command: \\frac' }, + { regex: /\\sqrt/, name: 'LaTeX command: \\sqrt' }, + { regex: /\\pm/, name: 'LaTeX command: \\pm' }, + { regex: /\\cdot/, name: 'LaTeX command: \\cdot' }, + { regex: /\\text/, name: 'LaTeX command: \\text' }, + { regex: /\\sum/, name: 'LaTeX command: \\sum' }, + { regex: /\\prod/, name: 'LaTeX command: \\prod' }, + { regex: /\\int/, name: 'LaTeX command: \\int' } + ] + + // Combine default and custom patterns + const patterns = [...defaultPatterns, ...customPatterns] + + for (const pattern of patterns) { + if (pattern.regex.test(text)) { + return true + } + } + + return false +} + +/** + * Preprocesses text to make LaTeX syntax more compatible with Markdown + * @param {string} text - Original text with potentially problematic LaTeX syntax + * @returns {string} - Text with LaTeX syntax adjusted for better compatibility + */ +const preprocessLatex = (text) => { + if (!text || typeof text !== 'string') return text + + // Replace problematic LaTeX patterns with more compatible alternatives + const processedText = text + // Convert display math with indentation to dollar-dollar format + .replace(/(\n\s*)\\\[([\s\S]*?)\\\](\s*\n|$)/g, (match, before, content, after) => { + // Preserve indentation but use $$ format which is more reliably handled + return `${before}$$${content}$$${after}` + }) + // Convert inline math to dollar format with spaces to avoid conflicts + .replace(/\\\(([\s\S]*?)\\\)/g, '$ $1 $') + + return processedText +} + +/** + * Enhanced Markdown component with memoization for better performance + * Supports various plugins and custom rendering components + */ export const MemoizedReactMarkdown = memo( - ({ children, ...props }) => ( -
- {children} -
- ), - (prevProps, nextProps) => prevProps.children === nextProps.children + ({ children, ...props }) => { + // Preprocess text to improve LaTeX compatibility + const processedChildren = useMemo(() => (typeof children === 'string' ? preprocessLatex(children) : children), [children]) + + // Enable math by default unless explicitly disabled + const shouldEnableMath = useMemo(() => { + const hasLatex = processedChildren && containsLaTeX(processedChildren, props.mathPatterns || []) + + return props.disableMath === true ? false : props.forceMath || hasLatex + }, [processedChildren, props.forceMath, props.disableMath, props.mathPatterns]) + + // Configure plugins based on content + const remarkPlugins = useMemo(() => { + if (props.remarkPlugins) return props.remarkPlugins + return shouldEnableMath ? [remarkGfm, remarkMath] : [remarkGfm] + }, [props.remarkPlugins, shouldEnableMath]) + + const rehypePlugins = useMemo(() => { + if (props.rehypePlugins) return props.rehypePlugins + return shouldEnableMath ? [rehypeMathjax, rehypeRaw] : [rehypeRaw] + }, [props.rehypePlugins, shouldEnableMath]) + + return ( +
+ + ) : ( + + {children} + + ) + }, + p({ children }) { + return

{children}

+ }, + ...props.components + }} + {...props} + > + {processedChildren} +
+
+ ) + }, + (prevProps, nextProps) => { + // More detailed comparison for better memoization + if (prevProps.children !== nextProps.children) return false + + // Check if other props have changed + const prevEntries = Object.entries(prevProps).filter(([key]) => key !== 'children') + const nextEntries = Object.entries(nextProps).filter(([key]) => key !== 'children') + + if (prevEntries.length !== nextEntries.length) return false + + // Simple shallow comparison of remaining props + for (const [key, value] of prevEntries) { + if (key === 'components' || key === 'remarkPlugins' || key === 'rehypePlugins') continue // Skip complex objects + + if (nextProps[key] !== value) return false + } + + return true + } ) MemoizedReactMarkdown.displayName = 'MemoizedReactMarkdown' MemoizedReactMarkdown.propTypes = { - children: PropTypes.any + children: PropTypes.any, + chatflowid: PropTypes.string, + isFullWidth: PropTypes.bool, + remarkPlugins: PropTypes.array, + rehypePlugins: PropTypes.array, + components: PropTypes.object, + forceMath: PropTypes.bool, + disableMath: PropTypes.bool, + mathPatterns: PropTypes.array } diff --git a/packages/ui/src/ui-component/table/ExecutionsListTable.jsx b/packages/ui/src/ui-component/table/ExecutionsListTable.jsx new file mode 100644 index 000000000..1b7dd68f9 --- /dev/null +++ b/packages/ui/src/ui-component/table/ExecutionsListTable.jsx @@ -0,0 +1,306 @@ +import { useState } from 'react' +import PropTypes from 'prop-types' +import { useSelector } from 'react-redux' +import moment from 'moment' +import { styled } from '@mui/material/styles' +import { + Box, + Paper, + Skeleton, + Table, + TableBody, + TableCell, + TableContainer, + TableHead, + TableRow, + TableSortLabel, + useTheme, + Checkbox +} from '@mui/material' +import { tableCellClasses } from '@mui/material/TableCell' +import CheckCircleIcon from '@mui/icons-material/CheckCircle' +import StopCircleIcon from '@mui/icons-material/StopCircle' +import ErrorIcon from '@mui/icons-material/Error' +import { IconLoader, IconCircleXFilled } from '@tabler/icons-react' + +const StyledTableCell = styled(TableCell)(({ theme }) => ({ + borderColor: theme.palette.grey[900] + 25, + + [`&.${tableCellClasses.head}`]: { + color: theme.palette.grey[900] + }, + [`&.${tableCellClasses.body}`]: { + fontSize: 14, + height: 64 + } +})) + +const StyledTableRow = styled(TableRow)(() => ({ + // hide last border + '&:last-child td, &:last-child th': { + border: 0 + } +})) + +const getIconFromStatus = (state, theme) => { + switch (state) { + case 'FINISHED': + return CheckCircleIcon + case 'ERROR': + case 'TIMEOUT': + return ErrorIcon + case 'TERMINATED': + // eslint-disable-next-line react/display-name + return (props) => { + const IconWrapper = (props) => + IconWrapper.displayName = 'TerminatedIcon' + return + } + case 'STOPPED': + return StopCircleIcon + case 'INPROGRESS': + // eslint-disable-next-line react/display-name + return (props) => { + const IconWrapper = (props) => ( + // eslint-disable-next-line + + ) + IconWrapper.displayName = 'InProgressIcon' + return + } + } +} + +const getIconColor = (state) => { + switch (state) { + case 'FINISHED': + return 'success.dark' + case 'ERROR': + case 'TIMEOUT': + return 'error.main' + case 'TERMINATED': + case 'STOPPED': + return 'error.main' + case 'INPROGRESS': + return 'warning.main' + } +} + +export const ExecutionsListTable = ({ data, isLoading, onExecutionRowClick, onSelectionChange }) => { + const theme = useTheme() + const customization = useSelector((state) => state.customization) + + const localStorageKeyOrder = 'executions_order' + const localStorageKeyOrderBy = 'executions_orderBy' + + const [order, setOrder] = useState(localStorage.getItem(localStorageKeyOrder) || 'desc') + const [orderBy, setOrderBy] = useState(localStorage.getItem(localStorageKeyOrderBy) || 'updatedDate') + const [selected, setSelected] = useState([]) + + const handleRequestSort = (property) => { + const isAsc = orderBy === property && order === 'asc' + const newOrder = isAsc ? 'desc' : 'asc' + setOrder(newOrder) + setOrderBy(property) + localStorage.setItem(localStorageKeyOrder, newOrder) + localStorage.setItem(localStorageKeyOrderBy, property) + } + + const handleSelectAllClick = (event) => { + if (event.target.checked) { + const newSelected = data.map((n) => n.id) + setSelected(newSelected) + onSelectionChange && onSelectionChange(newSelected) + } else { + setSelected([]) + onSelectionChange && onSelectionChange([]) + } + } + + const handleClick = (event, id) => { + event.stopPropagation() + const selectedIndex = selected.indexOf(id) + let newSelected = [] + + if (selectedIndex === -1) { + newSelected = newSelected.concat(selected, id) + } else if (selectedIndex === 0) { + newSelected = newSelected.concat(selected.slice(1)) + } else if (selectedIndex === selected.length - 1) { + newSelected = newSelected.concat(selected.slice(0, -1)) + } else if (selectedIndex > 0) { + newSelected = newSelected.concat(selected.slice(0, selectedIndex), selected.slice(selectedIndex + 1)) + } + + setSelected(newSelected) + onSelectionChange && onSelectionChange(newSelected) + } + + const isSelected = (id) => selected.indexOf(id) !== -1 + + const sortedData = data + ? [...data].sort((a, b) => { + if (orderBy === 'name') { + return order === 'asc' ? (a.name || '').localeCompare(b.name || '') : (b.name || '').localeCompare(a.name || '') + } else if (orderBy === 'updatedDate') { + return order === 'asc' + ? new Date(a.updatedDate) - new Date(b.updatedDate) + : new Date(b.updatedDate) - new Date(a.updatedDate) + } + return 0 + }) + : [] + + return ( + <> + +
+ + + + 0 && selected.length < data.length} + checked={data.length > 0 && selected.length === data.length} + onChange={handleSelectAllClick} + inputProps={{ + 'aria-label': 'select all executions' + }} + /> + + Status + + handleRequestSort('updatedDate')} + > + Last Updated + + + + handleRequestSort('name')}> + Agentflow + + + Session + + handleRequestSort('createdDate')} + > + Created + + + + + + {isLoading ? ( + <> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ) : ( + <> + {sortedData.map((row, index) => { + const isItemSelected = isSelected(row.id) + const labelId = `enhanced-table-checkbox-${index}` + + return ( + + + handleClick(event, row.id)} + inputProps={{ + 'aria-labelledby': labelId + }} + /> + + onExecutionRowClick(row)}> + + + onExecutionRowClick(row)}> + {moment(row.updatedDate).format('MMM D, YYYY h:mm A')} + + onExecutionRowClick(row)}> + {row.agentflow?.name} + + onExecutionRowClick(row)}>{row.sessionId} + onExecutionRowClick(row)}> + {moment(row.createdDate).format('MMM D, YYYY h:mm A')} + + + ) + })} + + )} + +
+
+ + ) +} + +ExecutionsListTable.propTypes = { + data: PropTypes.array, + isLoading: PropTypes.bool, + onExecutionRowClick: PropTypes.func, + onSelectionChange: PropTypes.func, + className: PropTypes.string +} + +ExecutionsListTable.displayName = 'ExecutionsListTable' diff --git a/packages/ui/src/ui-component/table/FlowListTable.jsx b/packages/ui/src/ui-component/table/FlowListTable.jsx index dea00791c..01ab92436 100644 --- a/packages/ui/src/ui-component/table/FlowListTable.jsx +++ b/packages/ui/src/ui-component/table/FlowListTable.jsx @@ -47,7 +47,7 @@ const getLocalStorageKeyName = (name, isAgentCanvas) => { return (isAgentCanvas ? 'agentcanvas' : 'chatflowcanvas') + '_' + name } -export const FlowListTable = ({ data, images, isLoading, filterFunction, updateFlowsApi, setError, isAgentCanvas }) => { +export const FlowListTable = ({ data, images = {}, icons = {}, isLoading, filterFunction, updateFlowsApi, setError, isAgentCanvas }) => { const theme = useTheme() const customization = useSelector((state) => state.customization) @@ -66,6 +66,14 @@ export const FlowListTable = ({ data, images, isLoading, filterFunction, updateF localStorage.setItem(localStorageKeyOrderBy, property) } + const onFlowClick = (row) => { + if (!isAgentCanvas) { + return `/canvas/${row.id}` + } else { + return localStorage.getItem('agentFlowVersion') === 'v2' ? `/v2/agentcanvas/${row.id}` : `/agentcanvas/${row.id}` + } + } + const sortedData = data ? [...data].sort((a, b) => { if (orderBy === 'name') { @@ -170,10 +178,7 @@ export const FlowListTable = ({ data, images, isLoading, filterFunction, updateF overflow: 'hidden' }} > - + {row.templateName || row.name} @@ -198,7 +203,7 @@ export const FlowListTable = ({ data, images, isLoading, filterFunction, updateF
- {images[row.id] && ( + {(images[row.id] || icons[row.id]) && ( - {images[row.id] - .slice(0, images[row.id].length > 5 ? 5 : images[row.id].length) - .map((img) => ( - - ({ type: 'image', src: img })), + ...(icons[row.id] || []).map((ic) => ({ + type: 'icon', + icon: ic.icon, + color: ic.color + })) + ] + .slice(0, 5) + .map((item, index) => + item.type === 'image' ? ( + - - ))} - {images[row.id].length > 5 && ( + > + + + ) : ( +
+ +
+ ) + )} + {(images[row.id]?.length || 0) + (icons[row.id]?.length || 0) > 5 && ( - + {images[row.id].length - 5} More + + {(images[row.id]?.length || 0) + (icons[row.id]?.length || 0) - 5} More )}
)}
- {moment(row.updatedDate).format('MMMM Do, YYYY')} + + {moment(row.updatedDate).format('MMMM Do, YYYY HH:mm:ss')} + { + // Helper function to safely render cell content + const renderCellContent = (key, row) => { + if (row[key] === null || row[key] === undefined) { + return '' + } else if (key === 'enabled') { + return row[key] ? : + } else if (key === 'type' && row.schema) { + // If there's schema information, add a tooltip + const schemaContent = + '[
' + + row.schema + .map( + (item) => + `  ${JSON.stringify( + { + [item.name]: item.type + }, + null, + 2 + )}` + ) + .join(',
') + + '
]' + + return ( + + {row[key]} + Schema:
${schemaContent}`} /> +
+ ) + } else if (typeof row[key] === 'object') { + // For other objects (that are not handled by special cases above) + return JSON.stringify(row[key]) + } else { + return row[key] + } + } + return ( <> @@ -32,20 +70,8 @@ export const TableViewOnly = ({ columns, rows, sx }) => { {rows.map((row, index) => ( {Object.keys(row).map((key, index) => { - if (key !== 'id') { - return ( - - {key === 'enabled' ? ( - row[key] ? ( - - ) : ( - - ) - ) : ( - row[key] - )} - - ) + if (key !== 'id' && key !== 'schema') { + return {renderCellContent(key, row)} } })} diff --git a/packages/ui/src/utils/exportImport.js b/packages/ui/src/utils/exportImport.js index b160583d0..1d8b3005f 100644 --- a/packages/ui/src/utils/exportImport.js +++ b/packages/ui/src/utils/exportImport.js @@ -57,7 +57,8 @@ const sanitizeAssistant = (Assistant) => { id: assistant.id, details: assistant.details, credential: assistant.credential, - iconSrc: assistant.iconSrc + iconSrc: assistant.iconSrc, + type: assistant.type } }) } catch (error) { @@ -76,11 +77,21 @@ export const stringify = (object) => { export const exportData = (exportAllData) => { try { return { - Tool: sanitizeTool(exportAllData.Tool), - ChatFlow: sanitizeChatflow(exportAllData.ChatFlow), AgentFlow: sanitizeChatflow(exportAllData.AgentFlow), - Variable: sanitizeVariable(exportAllData.Variable), - Assistant: sanitizeAssistant(exportAllData.Assistant) + AgentFlowV2: sanitizeChatflow(exportAllData.AgentFlowV2), + AssistantFlow: sanitizeChatflow(exportAllData.AssistantFlow), + AssistantCustom: sanitizeAssistant(exportAllData.AssistantCustom), + AssistantOpenAI: sanitizeAssistant(exportAllData.AssistantOpenAI), + AssistantAzure: sanitizeAssistant(exportAllData.AssistantAzure), + ChatFlow: sanitizeChatflow(exportAllData.ChatFlow), + ChatMessage: exportAllData.ChatMessage, + ChatMessageFeedback: exportAllData.ChatMessageFeedback, + CustomTemplate: exportAllData.CustomTemplate, + DocumentStore: exportAllData.DocumentStore, + DocumentStoreFileChunk: exportAllData.DocumentStoreFileChunk, + Execution: exportAllData.Execution, + Tool: sanitizeTool(exportAllData.Tool), + Variable: sanitizeVariable(exportAllData.Variable) } } catch (error) { throw new Error(`exportImport.exportData ${getErrorMessage(error)}`) diff --git a/packages/ui/src/utils/genericHelper.js b/packages/ui/src/utils/genericHelper.js index 5b6e87a96..97542114e 100644 --- a/packages/ui/src/utils/genericHelper.js +++ b/packages/ui/src/utils/genericHelper.js @@ -1,4 +1,4 @@ -import { uniq } from 'lodash' +import { uniq, get, isEqual } from 'lodash' import moment from 'moment' export const getUniqueNodeId = (nodeData, nodes) => { @@ -16,6 +16,94 @@ export const getUniqueNodeId = (nodeData, nodes) => { return baseId } +export const getUniqueNodeLabel = (nodeData, nodes) => { + if (nodeData.type === 'StickyNote') return nodeData.label + if (nodeData.name === 'startAgentflow') return nodeData.label + + let suffix = 0 + + // Construct base ID + let baseId = `${nodeData.name}_${suffix}` + + // Increment suffix until a unique ID is found + while (nodes.some((node) => node.id === baseId)) { + suffix += 1 + baseId = `${nodeData.name}_${suffix}` + } + + return `${nodeData.label} ${suffix}` +} + +const createAgentFlowOutputs = (nodeData, newNodeId) => { + if (nodeData.hideOutput) return [] + + if (nodeData.outputs?.length) { + return nodeData.outputs.map((_, index) => ({ + id: `${newNodeId}-output-${index}`, + label: nodeData.label, + name: nodeData.name + })) + } + + return [ + { + id: `${newNodeId}-output-${nodeData.name}`, + label: nodeData.label, + name: nodeData.name + } + ] +} + +const createOutputOption = (output, newNodeId) => { + const outputBaseClasses = output.baseClasses ?? [] + const baseClasses = outputBaseClasses.length > 1 ? outputBaseClasses.join('|') : outputBaseClasses[0] || '' + + const type = outputBaseClasses.length > 1 ? outputBaseClasses.join(' | ') : outputBaseClasses[0] || '' + + return { + id: `${newNodeId}-output-${output.name}-${baseClasses}`, + name: output.name, + label: output.label, + description: output.description ?? '', + type, + isAnchor: output?.isAnchor, + hidden: output?.hidden + } +} + +const createStandardOutputs = (nodeData, newNodeId) => { + if (nodeData.hideOutput) return [] + + if (nodeData.outputs?.length) { + const outputOptions = nodeData.outputs.map((output) => createOutputOption(output, newNodeId)) + + return [ + { + name: 'output', + label: 'Output', + type: 'options', + description: nodeData.outputs[0].description ?? '', + options: outputOptions, + default: nodeData.outputs[0].name + } + ] + } + + return [ + { + id: `${newNodeId}-output-${nodeData.name}-${nodeData.baseClasses.join('|')}`, + name: nodeData.name, + label: nodeData.type, + description: nodeData.description ?? '', + type: nodeData.baseClasses.join(' | ') + } + ] +} + +const initializeOutputAnchors = (nodeData, newNodeId, isAgentflow) => { + return isAgentflow ? createAgentFlowOutputs(nodeData, newNodeId) : createStandardOutputs(nodeData, newNodeId) +} + export const initializeDefaultNodeData = (nodeParams) => { const initialValues = {} @@ -27,16 +115,17 @@ export const initializeDefaultNodeData = (nodeParams) => { return initialValues } -export const initNode = (nodeData, newNodeId) => { +export const initNode = (nodeData, newNodeId, isAgentflow) => { const inputAnchors = [] const inputParams = [] const incoming = nodeData.inputs ? nodeData.inputs.length : 0 - const outgoing = 1 const whitelistTypes = [ 'asyncOptions', + 'asyncMultiOptions', 'options', 'multiOptions', + 'array', 'datagrid', 'string', 'number', @@ -74,55 +163,7 @@ export const initNode = (nodeData, newNodeId) => { } // Outputs - const outputAnchors = [] - for (let i = 0; i < outgoing; i += 1) { - if (nodeData.hideOutput) continue - if (nodeData.outputs && nodeData.outputs.length) { - const options = [] - for (let j = 0; j < nodeData.outputs.length; j += 1) { - let baseClasses = '' - let type = '' - - const outputBaseClasses = nodeData.outputs[j].baseClasses ?? [] - if (outputBaseClasses.length > 1) { - baseClasses = outputBaseClasses.join('|') - type = outputBaseClasses.join(' | ') - } else if (outputBaseClasses.length === 1) { - baseClasses = outputBaseClasses[0] - type = outputBaseClasses[0] - } - - const newOutputOption = { - id: `${newNodeId}-output-${nodeData.outputs[j].name}-${baseClasses}`, - name: nodeData.outputs[j].name, - label: nodeData.outputs[j].label, - description: nodeData.outputs[j].description ?? '', - type, - isAnchor: nodeData.outputs[j]?.isAnchor, - hidden: nodeData.outputs[j]?.hidden - } - options.push(newOutputOption) - } - const newOutput = { - name: 'output', - label: 'Output', - type: 'options', - description: nodeData.outputs[0].description ?? '', - options, - default: nodeData.outputs[0].name - } - outputAnchors.push(newOutput) - } else { - const newOutput = { - id: `${newNodeId}-output-${nodeData.name}-${nodeData.baseClasses.join('|')}`, - name: nodeData.name, - label: nodeData.type, - description: nodeData.description ?? '', - type: nodeData.baseClasses.join(' | ') - } - outputAnchors.push(newOutput) - } - } + let outputAnchors = initializeOutputAnchors(nodeData, newNodeId, isAgentflow) /* Initial inputs = [ @@ -159,9 +200,10 @@ export const initNode = (nodeData, newNodeId) => { // Inputs if (nodeData.inputs) { - nodeData.inputAnchors = inputAnchors - nodeData.inputParams = inputParams - nodeData.inputs = initializeDefaultNodeData(nodeData.inputs) + const defaultInputs = initializeDefaultNodeData(nodeData.inputs) + nodeData.inputAnchors = showHideInputAnchors({ ...nodeData, inputAnchors, inputs: defaultInputs }) + nodeData.inputParams = showHideInputParams({ ...nodeData, inputParams, inputs: defaultInputs }) + nodeData.inputs = defaultInputs } else { nodeData.inputAnchors = [] nodeData.inputParams = [] @@ -184,8 +226,10 @@ export const initNode = (nodeData, newNodeId) => { return nodeData } -export const updateOutdatedNodeData = (newComponentNodeData, existingComponentNodeData) => { - const initNewComponentNodeData = initNode(newComponentNodeData, existingComponentNodeData.id) +export const updateOutdatedNodeData = (newComponentNodeData, existingComponentNodeData, isAgentflow) => { + const initNewComponentNodeData = initNode(newComponentNodeData, existingComponentNodeData.id, isAgentflow) + + const isAgentFlowV2 = newComponentNodeData.category === 'Agent Flows' || existingComponentNodeData.category === 'Agent Flows' // Update credentials with existing credentials if (existingComponentNodeData.credential) { @@ -219,6 +263,11 @@ export const updateOutdatedNodeData = (newComponentNodeData, existingComponentNo } } + if (isAgentFlowV2) { + // persists the label from the existing node + initNewComponentNodeData.label = existingComponentNodeData.label + } + // Special case for Condition node to update outputAnchors if (initNewComponentNodeData.name.includes('seqCondition')) { const options = existingComponentNodeData.outputAnchors[0].options || [] @@ -242,22 +291,34 @@ export const updateOutdatedNodeData = (newComponentNodeData, existingComponentNo export const updateOutdatedNodeEdge = (newComponentNodeData, edges) => { const removedEdges = [] + + const isAgentFlowV2 = newComponentNodeData.category === 'Agent Flows' + for (const edge of edges) { const targetNodeId = edge.targetHandle.split('-')[0] const sourceNodeId = edge.sourceHandle.split('-')[0] if (targetNodeId === newComponentNodeData.id) { - // Check if targetHandle is in inputParams or inputAnchors - const inputParam = newComponentNodeData.inputParams.find((param) => param.id === edge.targetHandle) - const inputAnchor = newComponentNodeData.inputAnchors.find((param) => param.id === edge.targetHandle) + if (isAgentFlowV2) { + if (edge.targetHandle !== newComponentNodeData.id) { + removedEdges.push(edge) + } + } else { + // Check if targetHandle is in inputParams or inputAnchors + const inputParam = newComponentNodeData.inputParams.find((param) => param.id === edge.targetHandle) + const inputAnchor = newComponentNodeData.inputAnchors.find((param) => param.id === edge.targetHandle) - if (!inputParam && !inputAnchor) { - removedEdges.push(edge) + if (!inputParam && !inputAnchor) { + removedEdges.push(edge) + } } } if (sourceNodeId === newComponentNodeData.id) { - if (newComponentNodeData.outputAnchors?.length) { + if (isAgentFlowV2) { + // AgentFlow v2 doesn't have specific output anchors, connections are directly from node + // No need to remove edges for AgentFlow v2 outputs + } else if (newComponentNodeData.outputAnchors?.length) { for (const outputAnchor of newComponentNodeData.outputAnchors) { const outputAnchorType = outputAnchor.type if (outputAnchorType === 'options') { @@ -314,6 +375,63 @@ export const isValidConnection = (connection, reactFlowInstance) => { return false } +export const isValidConnectionAgentflowV2 = (connection, reactFlowInstance) => { + const source = connection.source + const target = connection.target + + // Prevent self connections + if (source === target) { + return false + } + + // Check if this connection would create a cycle in the graph + if (wouldCreateCycle(source, target, reactFlowInstance)) { + return false + } + + return true +} + +// Function to check if a new connection would create a cycle +const wouldCreateCycle = (sourceId, targetId, reactFlowInstance) => { + // The most direct cycle check: if target connects back to source + if (sourceId === targetId) { + return true + } + + // Build directed graph from existing edges + const graph = {} + const edges = reactFlowInstance.getEdges() + + // Initialize graph + edges.forEach((edge) => { + if (!graph[edge.source]) graph[edge.source] = [] + graph[edge.source].push(edge.target) + }) + + // Check if there's a path from target to source (which would create a cycle when we add source → target) + const visited = new Set() + + function hasPath(current, destination) { + if (current === destination) return true + if (visited.has(current)) return false + + visited.add(current) + + const neighbors = graph[current] || [] + for (const neighbor of neighbors) { + if (hasPath(neighbor, destination)) { + return true + } + } + + return false + } + + // If there's a path from target to source, adding an edge from source to target will create a cycle + return hasPath(targetId, sourceId) +} + export const convertDateStringToDateObject = (dateString) => { if (dateString === undefined || !dateString) return undefined @@ -366,6 +484,21 @@ export const getFolderName = (base64ArrayStr) => { } } +const _removeCredentialId = (obj) => { + if (!obj || typeof obj !== 'object') return obj + + if (Array.isArray(obj)) { + return obj.map((item) => _removeCredentialId(item)) + } + + const newObj = {} + for (const [key, value] of Object.entries(obj)) { + if (key === 'FLOWISE_CREDENTIAL_ID') continue + newObj[key] = _removeCredentialId(value) + } + return newObj +} + export const generateExportFlowData = (flowData) => { const nodes = flowData.nodes const edges = flowData.edges @@ -380,6 +513,9 @@ export const generateExportFlowData = (flowData) => { version: node.data.version, name: node.data.name, type: node.data.type, + color: node.data.color, + hideOutput: node.data.hideOutput, + hideInput: node.data.hideInput, baseClasses: node.data.baseClasses, tags: node.data.tags, category: node.data.category, @@ -405,7 +541,7 @@ export const generateExportFlowData = (flowData) => { newNodeData.inputs = nodeDataInputs } - nodes[i].data = newNodeData + nodes[i].data = _removeCredentialId(newNodeData) } const exportJson = { nodes, @@ -414,11 +550,13 @@ export const generateExportFlowData = (flowData) => { return exportJson } -export const getAvailableNodesForVariable = (nodes, edges, target, targetHandle) => { +export const getAvailableNodesForVariable = (nodes, edges, target, targetHandle, includesStart = false) => { // example edge id = "llmChain_0-llmChain_0-output-outputPrediction-string|json-llmChain_1-llmChain_1-input-promptValues-string" // {source} -{sourceHandle} -{target} -{targetHandle} const parentNodes = [] + const isAgentFlowV2 = nodes.find((nd) => nd.id === target)?.data?.category === 'Agent Flows' + const isSeqAgent = nodes.find((nd) => nd.id === target)?.data?.category === 'Sequential Agents' function collectParentNodes(targetNodeId, nodes, edges) { @@ -441,10 +579,35 @@ export const getAvailableNodesForVariable = (nodes, edges, target, targetHandle) } }) } + function collectAgentFlowV2ParentNodes(targetNodeId, nodes, edges) { + const inputEdges = edges.filter((edg) => edg.target === targetNodeId && edg.targetHandle === targetNodeId) + + // Traverse each edge found + inputEdges.forEach((edge) => { + const parentNode = nodes.find((nd) => nd.id === edge.source) + if (!parentNode) return + + // Recursive call to explore further up the tree + collectAgentFlowV2ParentNodes(parentNode.id, nodes, edges) + + // Check and add the parent node to the list if it does not include specific names + const excludeNodeNames = ['startAgentflow'] + if (!excludeNodeNames.includes(parentNode.data.name) || includesStart) { + parentNodes.push(parentNode) + } + }) + } if (isSeqAgent) { collectParentNodes(target, nodes, edges) return uniq(parentNodes) + } else if (isAgentFlowV2) { + collectAgentFlowV2ParentNodes(target, nodes, edges) + const parentNodeId = nodes.find((nd) => nd.id === target)?.parentNode + if (parentNodeId) { + collectAgentFlowV2ParentNodes(parentNodeId, nodes, edges) + } + return uniq(parentNodes) } else { const inputEdges = edges.filter((edg) => edg.target === target && edg.targetHandle === targetHandle) if (inputEdges && inputEdges.length) { @@ -930,3 +1093,84 @@ export const getCustomConditionOutputs = (value, nodeId, existingEdges, isDataGr return { outputAnchors, toBeRemovedEdgeIds } } + +const _showHideOperation = (nodeData, inputParam, displayType, index) => { + const displayOptions = inputParam[displayType] + /* For example: + show: { + enableMemory: true + } + */ + Object.keys(displayOptions).forEach((path) => { + const comparisonValue = displayOptions[path] + if (path.includes('$index')) { + path = path.replace('$index', index) + } + const groundValue = get(nodeData.inputs, path, '') + + if (Array.isArray(comparisonValue)) { + if (displayType === 'show' && !comparisonValue.includes(groundValue)) { + inputParam.display = false + } + if (displayType === 'hide' && comparisonValue.includes(groundValue)) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'string') { + if (displayType === 'show' && !(comparisonValue === groundValue || new RegExp(comparisonValue).test(groundValue))) { + inputParam.display = false + } + if (displayType === 'hide' && (comparisonValue === groundValue || new RegExp(comparisonValue).test(groundValue))) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'boolean') { + if (displayType === 'show' && comparisonValue !== groundValue) { + inputParam.display = false + } + if (displayType === 'hide' && comparisonValue === groundValue) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'object') { + if (displayType === 'show' && !isEqual(comparisonValue, groundValue)) { + inputParam.display = false + } + if (displayType === 'hide' && isEqual(comparisonValue, groundValue)) { + inputParam.display = false + } + } else if (typeof comparisonValue === 'number') { + if (displayType === 'show' && comparisonValue !== groundValue) { + inputParam.display = false + } + if (displayType === 'hide' && comparisonValue === groundValue) { + inputParam.display = false + } + } + }) +} + +export const showHideInputs = (nodeData, inputType, overrideParams, arrayIndex) => { + const params = overrideParams ?? nodeData[inputType] ?? [] + + for (let i = 0; i < params.length; i += 1) { + const inputParam = params[i] + + // Reset display flag to false for each inputParam + inputParam.display = true + + if (inputParam.show) { + _showHideOperation(nodeData, inputParam, 'show', arrayIndex) + } + if (inputParam.hide) { + _showHideOperation(nodeData, inputParam, 'hide', arrayIndex) + } + } + + return params +} + +export const showHideInputParams = (nodeData) => { + return showHideInputs(nodeData, 'inputParams') +} + +export const showHideInputAnchors = (nodeData) => { + return showHideInputs(nodeData, 'inputAnchors') +} diff --git a/packages/ui/src/views/agentexecutions/ExecutionDetails.jsx b/packages/ui/src/views/agentexecutions/ExecutionDetails.jsx new file mode 100644 index 000000000..21e81d64c --- /dev/null +++ b/packages/ui/src/views/agentexecutions/ExecutionDetails.jsx @@ -0,0 +1,983 @@ +import { useEffect, useState, useCallback, forwardRef } from 'react' +import PropTypes from 'prop-types' +import moment from 'moment' +import { useSelector, useDispatch } from 'react-redux' + +// MUI +import { RichTreeView } from '@mui/x-tree-view/RichTreeView' +import { Typography, Box, Drawer, Chip, Button, Tooltip } from '@mui/material' +import { styled, alpha } from '@mui/material/styles' +import { useTreeItem2 } from '@mui/x-tree-view/useTreeItem2' +import { + TreeItem2Content, + TreeItem2IconContainer, + TreeItem2GroupTransition, + TreeItem2Label, + TreeItem2Root, + TreeItem2Checkbox +} from '@mui/x-tree-view/TreeItem2' +import { TreeItem2Icon } from '@mui/x-tree-view/TreeItem2Icon' +import { TreeItem2Provider } from '@mui/x-tree-view/TreeItem2Provider' +import { TreeItem2DragAndDropOverlay } from '@mui/x-tree-view/TreeItem2DragAndDropOverlay' +import DragHandleIcon from '@mui/icons-material/DragHandle' +import CheckCircleIcon from '@mui/icons-material/CheckCircle' +import StopCircleIcon from '@mui/icons-material/StopCircle' +import ErrorIcon from '@mui/icons-material/Error' +import { IconButton } from '@mui/material' +import { + IconRefresh, + IconExternalLink, + IconCopy, + IconLoader, + IconCircleXFilled, + IconRelationOneToManyFilled, + IconShare, + IconWorld, + IconX +} from '@tabler/icons-react' + +// Project imports +import { useTheme } from '@mui/material/styles' +import { FLOWISE_CREDENTIAL_ID, AGENTFLOW_ICONS } from '@/store/constant' +import { NodeExecutionDetails } from '@/views/agentexecutions/NodeExecutionDetails' +import ShareExecutionDialog from './ShareExecutionDialog' +import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackbarAction } from '@/store/actions' + +// API +import executionsApi from '@/api/executions' + +// Hooks +import useApi from '@/hooks/useApi' + +const getIconColor = (status) => { + switch (status) { + case 'FINISHED': + return 'success.dark' + case 'ERROR': + case 'TIMEOUT': + return 'error.main' + case 'TERMINATED': + case 'STOPPED': + return 'error.main' + case 'INPROGRESS': + return 'warning.dark' + } +} + +const StyledTreeItemRoot = styled(TreeItem2Root)(({ theme }) => ({ + color: theme.palette.grey[400] +})) + +const CustomTreeItemContent = styled(TreeItem2Content)(({ theme }) => ({ + flexDirection: 'row-reverse', + borderRadius: theme.spacing(0.7), + marginBottom: theme.spacing(0.5), + marginTop: theme.spacing(0.5), + padding: theme.spacing(0.5), + paddingRight: theme.spacing(1), + fontWeight: 500, + [`&.Mui-expanded `]: { + '&:not(.Mui-focused, .Mui-selected, .Mui-selected.Mui-focused) .labelIcon': { + color: theme.palette.primary.dark, + ...theme.applyStyles('light', { + color: theme.palette.primary.main + }) + }, + '&::before': { + content: '""', + display: 'block', + position: 'absolute', + left: '16px', + top: '44px', + height: 'calc(100% - 48px)', + width: '1.5px', + backgroundColor: theme.palette.grey[700], + ...theme.applyStyles('light', { + backgroundColor: theme.palette.grey[300] + }) + } + }, + '&:hover': { + backgroundColor: alpha(theme.palette.primary.main, 0.1), + color: 'white', + ...theme.applyStyles('light', { + color: theme.palette.primary.main + }) + }, + [`&.Mui-focused, &.Mui-selected, &.Mui-selected.Mui-focused`]: { + backgroundColor: theme.palette.primary.dark, + color: theme.palette.primary.contrastText, + ...theme.applyStyles('light', { + backgroundColor: theme.palette.primary.main + }) + } +})) + +const StyledTreeItemLabelText = styled(Typography)(({ theme }) => ({ + color: theme.palette.text.primary +})) + +function CustomLabel({ icon: Icon, itemStatus, children, name, ...other }) { + // Check if this is an iteration node + const isIterationNode = name === 'iterationAgentflow' + + return ( + + {(() => { + // Display iteration icon for iteration nodes + if (isIterationNode) { + return ( + + + + ) + } + + // Otherwise display the node icon + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === name) + if (foundIcon) { + return ( + + + + ) + } + return null + })()} + + {children} + + {Icon && } + + ) +} + +CustomLabel.propTypes = { + icon: PropTypes.func, + itemStatus: PropTypes.string, + children: PropTypes.node, + name: PropTypes.string +} + +CustomLabel.displayName = 'CustomLabel' + +const isExpandable = (reactChildren) => { + if (Array.isArray(reactChildren)) { + return reactChildren.length > 0 && reactChildren.some(isExpandable) + } + return Boolean(reactChildren) +} + +const getIconFromStatus = (status, theme) => { + switch (status) { + case 'FINISHED': + return CheckCircleIcon + case 'ERROR': + case 'TIMEOUT': + return ErrorIcon + case 'TERMINATED': + // eslint-disable-next-line react/display-name + return (props) => { + const IconWrapper = (props) => + IconWrapper.displayName = 'TerminatedIcon' + return + } + case 'STOPPED': + return StopCircleIcon + case 'INPROGRESS': + // eslint-disable-next-line react/display-name + return (props) => { + const IconWrapper = (props) => ( + // eslint-disable-next-line + + ) + IconWrapper.displayName = 'InProgressIcon' + return + } + } +} + +const CustomTreeItem = forwardRef(function CustomTreeItem(props, ref) { + const { id, itemId, label, disabled, children, ...other } = props + const theme = useTheme() + + const { + getRootProps, + getContentProps, + getIconContainerProps, + getCheckboxProps, + getLabelProps, + getGroupTransitionProps, + getDragAndDropOverlayProps, + status, + publicAPI + } = useTreeItem2({ id, itemId, children, label, disabled, rootRef: ref }) + + const item = publicAPI.getItem(itemId) + const expandable = isExpandable(children) + let icon + if (item.status) { + icon = getIconFromStatus(item.status, theme) + } + + return ( + + + + + + + + + + + {children && ( + { + const nodeName = item.name || item.id?.split('_')[0] + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodeName) + return foundIcon ? foundIcon.color : theme.palette.primary.main + })()}`, + marginLeft: '13px', + paddingLeft: '8px' + }} + /> + )} + + + ) +}) + +CustomTreeItem.propTypes = { + id: PropTypes.string, + itemId: PropTypes.string, + label: PropTypes.string, + disabled: PropTypes.bool, + children: PropTypes.node, + className: PropTypes.string +} + +const MIN_DRAWER_WIDTH = 400 +const DEFAULT_DRAWER_WIDTH = window.innerWidth - 400 +const MAX_DRAWER_WIDTH = window.innerWidth + +export const ExecutionDetails = ({ open, isPublic, execution, metadata, onClose, onProceedSuccess, onUpdateSharing, onRefresh }) => { + const [drawerWidth, setDrawerWidth] = useState(Math.min(DEFAULT_DRAWER_WIDTH, MAX_DRAWER_WIDTH)) + const [executionTree, setExecution] = useState([]) + const [expandedItems, setExpandedItems] = useState([]) + const [selectedItem, setSelectedItem] = useState(null) + const [showShareDialog, setShowShareDialog] = useState(false) + const [copied, setCopied] = useState(false) + const [localMetadata, setLocalMetadata] = useState({}) + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const updateExecutionApi = useApi(executionsApi.updateExecution) + + const dispatch = useDispatch() + + // useEffect to initialize localMetadata when metadata changes + useEffect(() => { + if (metadata) { + setLocalMetadata(metadata) + } + }, [metadata]) + + const copyToClipboard = () => { + navigator.clipboard.writeText(localMetadata?.id) + setCopied(true) + + // Show success message + dispatch( + enqueueSnackbarAction({ + message: 'ID copied to clipboard', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + ) + + // Reset copied state after 2 seconds + setTimeout(() => { + setCopied(false) + }, 2000) + } + + const handleMouseDown = () => { + document.addEventListener('mousemove', handleMouseMove) + document.addEventListener('mouseup', handleMouseUp) + } + + const handleMouseMove = useCallback((e) => { + const newWidth = document.body.offsetWidth - e.clientX + if (newWidth >= MIN_DRAWER_WIDTH && newWidth <= MAX_DRAWER_WIDTH) { + setDrawerWidth(newWidth) + } + }, []) + + const handleMouseUp = () => { + document.removeEventListener('mousemove', handleMouseMove) + document.removeEventListener('mouseup', handleMouseUp) + } + + const getAllNodeIds = (nodes) => { + let ids = [] + nodes.forEach((node) => { + ids.push(node.id) + if (node.children && node.children.length > 0) { + ids = [...ids, ...getAllNodeIds(node.children)] + } + }) + return ids + } + + // Transform the execution data into a tree structure + const buildTreeData = (nodes) => { + // for each node, loop through each and every nested key of node.data, and remove the key if it is equal to FLOWISE_CREDENTIAL_ID + nodes.forEach((node) => { + const removeFlowiseCredentialId = (data) => { + for (const key in data) { + if (key === FLOWISE_CREDENTIAL_ID) { + delete data[key] + } + if (typeof data[key] === 'object') { + removeFlowiseCredentialId(data[key]) + } + } + } + removeFlowiseCredentialId(node.data) + }) + + // Create a map for quick node lookup + // Use execution index to make each node instance unique + const nodeMap = new Map() + nodes.forEach((node, index) => { + const uniqueNodeId = `${node.nodeId}_${index}` + nodeMap.set(uniqueNodeId, { ...node, uniqueNodeId, children: [], executionIndex: index }) + }) + + // Identify iteration nodes and their children + const iterationGroups = new Map() // parentId -> Map of iterationIndex -> nodes + + // Group iteration child nodes by their parent and iteration index + nodes.forEach((node, index) => { + if (node.data?.parentNodeId && node.data?.iterationIndex !== undefined) { + const parentId = node.data.parentNodeId + const iterationIndex = node.data.iterationIndex + + if (!iterationGroups.has(parentId)) { + iterationGroups.set(parentId, new Map()) + } + + const iterationMap = iterationGroups.get(parentId) + if (!iterationMap.has(iterationIndex)) { + iterationMap.set(iterationIndex, []) + } + + iterationMap.get(iterationIndex).push(`${node.nodeId}_${index}`) + } + }) + + // Create virtual iteration container nodes + iterationGroups.forEach((iterationMap, parentId) => { + iterationMap.forEach((nodeIds, iterationIndex) => { + // Find the parent iteration node + let parentNode = null + for (let i = 0; i < nodes.length; i++) { + if (nodes[i].nodeId === parentId) { + parentNode = nodes[i] + break + } + } + + if (!parentNode) return + + // Get iteration context from first child node + const firstChildId = nodeIds[0] + const firstChild = nodeMap.get(firstChildId) + const iterationContext = firstChild?.data?.iterationContext || { index: iterationIndex } + + // Create a virtual node for this iteration + const iterationNodeId = `${parentId}_${iterationIndex}` + const iterationLabel = `Iteration #${iterationIndex}` + + // Determine status based on child nodes + const childNodes = nodeIds.map((id) => nodeMap.get(id)) + const iterationStatus = childNodes.some((n) => n.status === 'ERROR') + ? 'ERROR' + : childNodes.some((n) => n.status === 'INPROGRESS') + ? 'INPROGRESS' + : childNodes.every((n) => n.status === 'FINISHED') + ? 'FINISHED' + : 'UNKNOWN' + + // Create the virtual node and add to nodeMap + const virtualNode = { + nodeId: iterationNodeId, + nodeLabel: iterationLabel, + data: { + name: 'iterationAgentflow', + iterationIndex, + iterationContext, + isVirtualNode: true, + parentIterationId: parentId + }, + previousNodeIds: [], // Will be handled in the main tree building + status: iterationStatus, + uniqueNodeId: iterationNodeId, + children: [], + executionIndex: -1 // Flag as a virtual node + } + + nodeMap.set(iterationNodeId, virtualNode) + + // Set this virtual node as the parent for all nodes in this iteration + nodeIds.forEach((childId) => { + const childNode = nodeMap.get(childId) + if (childNode) { + childNode.virtualParentId = iterationNodeId + } + }) + }) + }) + + // Root nodes have no previous nodes + const rootNodes = [] + const processedNodes = new Set() + + // First pass: Build the main tree structure (excluding iteration children) + nodes.forEach((node, index) => { + const uniqueNodeId = `${node.nodeId}_${index}` + const treeNode = nodeMap.get(uniqueNodeId) + + // Skip nodes that belong to an iteration (they'll be added to their virtual parent) + if (node.data?.parentNodeId && node.data?.iterationIndex !== undefined) { + return + } + + if (node.previousNodeIds.length === 0) { + rootNodes.push(treeNode) + } else { + // Find the most recent (latest) parent node among all previous nodes + let mostRecentParentIndex = -1 + let mostRecentParentId = null + + node.previousNodeIds.forEach((parentId) => { + // Find the most recent instance of this parent node + for (let i = 0; i < index; i++) { + if (nodes[i].nodeId === parentId && i > mostRecentParentIndex) { + mostRecentParentIndex = i + mostRecentParentId = parentId + } + } + }) + + // Only add to the most recent parent + if (mostRecentParentIndex !== -1) { + const parentUniqueId = `${mostRecentParentId}_${mostRecentParentIndex}` + const parentNode = nodeMap.get(parentUniqueId) + if (parentNode) { + parentNode.children.push(treeNode) + processedNodes.add(uniqueNodeId) + } + } + } + }) + + // Second pass: Build the iteration sub-trees + iterationGroups.forEach((iterationMap, parentId) => { + // Find all instances of the parent node + const parentInstances = [] + nodes.forEach((node, index) => { + if (node.nodeId === parentId) { + parentInstances.push(`${node.nodeId}_${index}`) + } + }) + + // Find the latest instance of the parent node that exists in the tree + let latestParent = null + for (let i = parentInstances.length - 1; i >= 0; i--) { + const parentId = parentInstances[i] + const parent = nodeMap.get(parentId) + if (parent) { + latestParent = parent + break + } + } + + if (!latestParent) return + + // Add all virtual iteration nodes to the parent + iterationMap.forEach((nodeIds, iterationIndex) => { + const iterationNodeId = `${parentId}_${iterationIndex}` + const virtualNode = nodeMap.get(iterationNodeId) + if (virtualNode) { + latestParent.children.push(virtualNode) + } + }) + }) + + // Third pass: Build the structure inside each virtual iteration node + nodeMap.forEach((node) => { + if (node.virtualParentId) { + const virtualParent = nodeMap.get(node.virtualParentId) + if (virtualParent) { + if (node.previousNodeIds.length === 0) { + // This is a root node within the iteration + virtualParent.children.push(node) + } else { + // Find its parent within the same iteration + let parentFound = false + for (const prevNodeId of node.previousNodeIds) { + // Look for nodes with the same previous node ID in the same iteration + nodeMap.forEach((potentialParent) => { + if ( + potentialParent.nodeId === prevNodeId && + potentialParent.data?.iterationIndex === node.data?.iterationIndex && + potentialParent.data?.parentNodeId === node.data?.parentNodeId && + !parentFound + ) { + potentialParent.children.push(node) + parentFound = true + } + }) + } + + // If no parent was found within the iteration, add directly to virtual parent + if (!parentFound) { + virtualParent.children.push(node) + } + } + } + } + }) + + // Final pass: Sort all children arrays to ensure iteration nodes appear first + const sortChildrenNodes = (node) => { + if (node.children && node.children.length > 0) { + // Sort children: iteration nodes first, then others by their original execution order + node.children.sort((a, b) => { + // Check if a is an iteration node + const aIsIteration = a.data?.name === 'iterationAgentflow' || a.data?.isVirtualNode + // Check if b is an iteration node + const bIsIteration = b.data?.name === 'iterationAgentflow' || b.data?.isVirtualNode + + // If both are iterations or both are not iterations, preserve original order + if (aIsIteration === bIsIteration) { + return a.executionIndex - b.executionIndex + } + + // Otherwise, put iterations first + return aIsIteration ? -1 : 1 + }) + + // Recursively sort children's children + node.children.forEach(sortChildrenNodes) + } + } + + // Apply sorting to all root nodes and their children + rootNodes.forEach(sortChildrenNodes) + + // Transform to the required format + const transformNode = (node) => ({ + id: node.uniqueNodeId, + label: node.nodeLabel, + name: node.data?.name, + status: node.status, + data: node.data, + children: node.children.map(transformNode) + }) + + return rootNodes.map(transformNode) + } + + const handleExpandedItemsChange = (event, itemIds) => { + setExpandedItems(itemIds) + } + + const onSharePublicly = () => { + const newIsPublic = !localMetadata.isPublic + updateExecutionApi.request(localMetadata.id, { isPublic: newIsPublic }).then(() => { + // Update local metadata to reflect the change + setLocalMetadata((prev) => ({ + ...prev, + isPublic: newIsPublic + })) + + // Show success message + dispatch( + enqueueSnackbarAction({ + message: newIsPublic ? 'Execution shared publicly' : 'Execution is no longer public', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + ) + + // Notify parent component to refresh data + if (onUpdateSharing) { + onUpdateSharing() + } + }) + } + + useEffect(() => { + if (execution) { + const newTree = buildTreeData(execution) + + // Find first stopped item if metadata state is STOPPED + if (metadata?.state === 'STOPPED') { + const findFirstStoppedNode = (nodes) => { + for (const node of nodes) { + if (node.status === 'STOPPED') return node + if (node.children) { + const found = findFirstStoppedNode(node.children) + if (found) return found + } + } + return null + } + const stoppedNode = findFirstStoppedNode(newTree) + + if (stoppedNode) { + setExpandedItems(getAllNodeIds(newTree)) + setSelectedItem(stoppedNode) + } else { + setExpandedItems(getAllNodeIds(newTree)) + // Set the first item as default selected item + if (newTree.length > 0) { + setSelectedItem(newTree[0]) + } + } + } else { + setExpandedItems(getAllNodeIds(newTree)) + // Set the first item as default selected item + if (newTree.length > 0) { + setSelectedItem(newTree[0]) + } + } + setExecution(newTree) + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [execution, metadata]) + + const handleNodeSelect = (event, itemId) => { + const findNode = (nodes, id) => { + for (const node of nodes) { + if (node.id === id) return node + if (node.children) { + const found = findNode(node.children, id) + if (found) return found + } + } + return null + } + const selectedNode = findNode(executionTree, itemId) + setSelectedItem(selectedNode) + } + + // Content to be rendered in both drawer and full page modes + const contentComponent = ( + + + theme.palette.background.paper, + borderBottom: 1, + borderColor: 'divider' + }} + > + + {!isPublic && ( + } + variant='outlined' + label={metadata?.agentflow?.name || metadata?.agentflow?.id || 'Go to AgentFlow'} + className={'button'} + onClick={() => window.open(`/v2/agentcanvas/${metadata?.agentflow?.id}`, '_blank')} + /> + )} + + {!isPublic && ( + + } + variant='outlined' + label={copied ? 'Copied!' : 'Copy ID'} + className={'button'} + onClick={copyToClipboard} + /> + + )} + + {!isPublic && !localMetadata.isPublic && ( + + ) : ( + + ) + } + variant='outlined' + label={updateExecutionApi.loading ? 'Updating...' : 'Share'} + className={'button'} + onClick={() => onSharePublicly()} + disabled={updateExecutionApi.loading} + /> + )} + + {!isPublic && localMetadata.isPublic && ( + + ) : ( + + ) + } + variant='outlined' + label={updateExecutionApi.loading ? 'Updating...' : 'Public'} + className={'button'} + onClick={() => setShowShareDialog(true)} + disabled={updateExecutionApi.loading} + /> + )} + + + + {metadata?.updatedDate ? moment(metadata.updatedDate).format('MMM D, YYYY h:mm A') : 'N/A'} + + onRefresh(localMetadata?.id)} + size='small' + sx={{ + color: theme.palette.text.primary, + '&:hover': { + backgroundColor: (theme) => theme.palette.primary.main + '20' + } + }} + title='Refresh execution data' + > + + + + + + + + + {selectedItem && selectedItem.data ? ( + + ) : ( + No data available for this item + )} + + + ) + + // Resize handle component (shared between modes) + const resizeHandle = ( + + ) + + // Render as full page component if isPublic is true + if (isPublic) { + return ( + theme.palette.background.paper + }} + > + + {contentComponent} + + + ) + } + + // Render as drawer component (original behavior) + return ( + <> + + {resizeHandle} + {contentComponent} + + setShowShareDialog(false)} + onUnshare={() => { + updateExecutionApi.request(localMetadata.id, { isPublic: false }).then(() => { + // Update local metadata to reflect the change + setLocalMetadata((prev) => ({ + ...prev, + isPublic: false + })) + setShowShareDialog(false) + + // Notify parent component to refresh data + if (onUpdateSharing) { + onUpdateSharing() + } + }) + }} + /> + + ) +} + +ExecutionDetails.propTypes = { + open: PropTypes.bool, + isPublic: PropTypes.bool, + execution: PropTypes.array, + metadata: PropTypes.object, + onClose: PropTypes.func, + onProceedSuccess: PropTypes.func, + onUpdateSharing: PropTypes.func, + onRefresh: PropTypes.func +} + +ExecutionDetails.displayName = 'ExecutionDetails' diff --git a/packages/ui/src/views/agentexecutions/NodeExecutionDetails.jsx b/packages/ui/src/views/agentexecutions/NodeExecutionDetails.jsx new file mode 100644 index 000000000..5a5296912 --- /dev/null +++ b/packages/ui/src/views/agentexecutions/NodeExecutionDetails.jsx @@ -0,0 +1,1035 @@ +import { useState } from 'react' +import { useSelector } from 'react-redux' +import PropTypes from 'prop-types' + +// MUI +import { + Typography, + Box, + ToggleButton, + ToggleButtonGroup, + Chip, + Button, + Dialog, + DialogTitle, + DialogContent, + DialogActions, + TextField, + CircularProgress, + Accordion, + AccordionSummary, + AccordionDetails, + Card, + CardMedia +} from '@mui/material' +import { useTheme, darken } from '@mui/material/styles' +import { useSnackbar } from 'notistack' +import { IconCoins, IconClock, IconChevronDown } from '@tabler/icons-react' +import toolSVG from '@/assets/images/tool.svg' + +// Project imports +import { MemoizedReactMarkdown } from '@/ui-component/markdown/MemoizedReactMarkdown' +import { AGENTFLOW_ICONS, baseURL } from '@/store/constant' +import { JSONViewer } from '@/ui-component/json/JsonViewer' +import ReactJson from 'flowise-react-json-view' +import { CodeEditor } from '@/ui-component/editor/CodeEditor' + +import predictionApi from '@/api/prediction' + +export const NodeExecutionDetails = ({ data, label, status, metadata, isPublic, onProceedSuccess }) => { + const [dataView, setDataView] = useState('rendered') + const [openFeedbackDialog, setOpenFeedbackDialog] = useState(false) + const [feedback, setFeedback] = useState('') + const [feedbackType, setFeedbackType] = useState('') + const [isLoading, setIsLoading] = useState(false) + const [loadingMessage, setLoadingMessage] = useState('') + const customization = useSelector((state) => state.customization) + const theme = useTheme() + const { enqueueSnackbar } = useSnackbar() + + // Function to get role-based colors + const getRoleColors = (role) => { + const isDarkMode = customization.isDarkMode + + switch (role) { + case 'assistant': + case 'ai': + return { + bg: isDarkMode ? darken(theme.palette.success.dark, 0.5) : theme.palette.success.light, + color: isDarkMode ? 'white' : theme.palette.success.dark, + border: theme.palette.success.main + } + case 'system': + return { + bg: isDarkMode ? darken(theme.palette.warning.dark, 0.5) : theme.palette.warning.light, + color: isDarkMode ? 'white' : theme.palette.warning.dark, + border: theme.palette.warning.main + } + case 'developer': + return { + bg: isDarkMode ? darken(theme.palette.info.dark, 0.5) : theme.palette.info.light, + color: isDarkMode ? 'white' : theme.palette.info.dark, + border: theme.palette.info.main + } + case 'user': + case 'human': + return { + bg: isDarkMode ? darken(theme.palette.primary.main, 0.5) : theme.palette.primary.light, + color: isDarkMode ? 'white' : theme.palette.primary.dark, + border: theme.palette.primary.main + } + case 'tool': + case 'function': + return { + bg: isDarkMode ? darken(theme.palette.secondary.main, 0.5) : theme.palette.secondary.light, + color: isDarkMode ? 'white' : theme.palette.secondary.dark, + border: theme.palette.secondary.main + } + default: + return { + bg: isDarkMode ? darken(theme.palette.grey[700], 0.5) : theme.palette.grey[300], + color: isDarkMode ? 'white' : theme.palette.grey[800], + border: isDarkMode ? theme.palette.grey[600] : theme.palette.grey[500] + } + } + } + + const handleDataViewChange = (event, nextView) => { + event.stopPropagation() + if (nextView === null) return + setDataView(nextView) + } + + const onSubmitResponse = async (type, feedback = '') => { + setIsLoading(true) + setLoadingMessage(`Submitting feedback...`) + const params = { + question: feedback ? feedback : type.charAt(0).toUpperCase() + type.slice(1), + chatId: metadata?.sessionId, + humanInput: { + type: type, + startNodeId: data.id, + feedback + } + } + try { + let response + if (isPublic) { + response = await predictionApi.sendMessageAndGetPredictionPublic(metadata?.agentflowId, params) + } else { + response = await predictionApi.sendMessageAndGetPrediction(metadata?.agentflowId, params) + } + if (response && response.data) { + enqueueSnackbar('Successfully submitted response', { variant: 'success' }) + if (onProceedSuccess) onProceedSuccess(response.data) + } + } catch (error) { + console.error(error) + enqueueSnackbar(error?.message || 'Failed to submit response', { variant: 'error' }) + } finally { + setIsLoading(false) + setLoadingMessage('') + } + } + + const handleProceed = () => { + if (data.input && data.input.humanInputEnableFeedback) { + setFeedbackType('proceed') + setOpenFeedbackDialog(true) + } else { + onSubmitResponse('proceed') + } + } + + const handleReject = () => { + if (data.input && data.input.humanInputEnableFeedback) { + setFeedbackType('reject') + setOpenFeedbackDialog(true) + } else { + onSubmitResponse('reject') + } + } + + const onClipboardCopy = (e) => { + const src = e.src + if (Array.isArray(src) || typeof src === 'object') { + navigator.clipboard.writeText(JSON.stringify(src, null, ' ')) + } else { + navigator.clipboard.writeText(src) + } + } + + const handleSubmitFeedback = () => { + onSubmitResponse(feedbackType, feedback) + setOpenFeedbackDialog(false) + setFeedback('') + setFeedbackType('') + } + + const renderFullfilledConditions = (conditions) => { + const fullfilledConditions = conditions.filter((condition) => condition.isFulfilled) + return fullfilledConditions.map((condition, index) => { + if (condition.type === 'string' && condition.operation === 'equal' && condition.value1 === '' && condition.value2 === '') { + return ( + + + Else condition fulfilled + + + + ) + } + return ( + + + Condition {index} + + + + + ) + }) + } + + return ( + + + + {(() => { + const nodeName = data?.name || data?.id?.split('_')[0] + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodeName) + + if (foundIcon) { + return ( +
+ +
+ ) + } else { + return ( +
+ {nodeName} +
+ ) + } + })()} +
+ + {label} + +
+ {data.output && data.output.timeMetadata && data.output.timeMetadata.delta && ( + } + label={`${(data.output.timeMetadata.delta / 1000).toFixed(2)} seconds`} + variant='contained' + color='secondary' + size='small' + sx={{ ml: 1, '& .MuiChip-icon': { mr: 0.2, ml: 1 } }} + /> + )} + {data.output && data.output.usageMetadata && data.output.usageMetadata.total_tokens && ( + } + label={`${data.output.usageMetadata.total_tokens} tokens`} + variant='contained' + color='primary' + size='small' + sx={{ ml: 1, '& .MuiChip-icon': { mr: 0.2, ml: 1 } }} + /> + )} +
+ + + + Rendered + + + Raw + + + + + {dataView === 'rendered' && ( + + {data.output && data.output.availableTools && data.output.availableTools.length > 0 && ( + + + Tools + + {data.output.availableTools.map((tool, index) => { + // Check if this tool is in the usedTools array + const isToolUsed = + data.output.usedTools && + Array.isArray(data.output.usedTools) && + data.output.usedTools.some((usedTool) => usedTool.tool === tool.name) + + return ( + + } + aria-controls={`tool-${index}-content`} + id={`tool-${index}-header`} + > + +
+ { + // Find matching tool from availableTools + if ( + data.output && + data.output.availableTools && + Array.isArray(data.output.availableTools) + ) { + const matchingTool = data.output.availableTools.find( + (t) => t.name === tool.name + ) + if (matchingTool && matchingTool.toolNode && matchingTool.toolNode.name) { + return `${baseURL}/api/v1/node-icon/${matchingTool.toolNode.name}` + } + } + return `${baseURL}/api/v1/node-icon/${tool.name}` + })()} + alt={tool.name} + onError={(e) => { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = toolSVG + }} + /> +
+ + {(() => { + // Find matching tool from availableTools if they exist + if ( + data.output && + data.output.availableTools && + Array.isArray(data.output.availableTools) + ) { + const matchingTool = data.output.availableTools.find( + (t) => t.name === tool.name + ) + if (matchingTool && matchingTool.toolNode) { + return matchingTool.toolNode.label || tool.name + } + } + return tool.name || 'Tool Call' + })()} + + {isToolUsed && ( + + )} +
+
+ + + +
+ ) + })} +
+ )} + + Input + + {data && data.input && data.input.messages && Array.isArray(data.input.messages) && data.input.messages.length > 0 ? ( + data.input.messages.map((message, index) => ( + + + {message.name && ( + + )} + {message.tool_calls && + (Array.isArray(message.tool_calls) ? ( + message.tool_calls.map((toolCall, idx) => ( + + + } + aria-controls={`tool-call-${idx}-content`} + id={`tool-call-${idx}-header`} + > + +
+ { + // Find matching tool from availableTools + if ( + data.output && + data.output.availableTools && + Array.isArray(data.output.availableTools) + ) { + const matchingTool = data.output.availableTools.find( + (t) => t.name === toolCall.name + ) + if ( + matchingTool && + matchingTool.toolNode && + matchingTool.toolNode.name + ) { + return `${baseURL}/api/v1/node-icon/${matchingTool.toolNode.name}` + } + } + return `${baseURL}/api/v1/node-icon/${toolCall.name}` + })()} + alt={toolCall.name} + onError={(e) => { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = toolSVG + }} + /> +
+ + {(() => { + // Find matching tool from availableTools if they exist + if ( + data.output && + data.output.availableTools && + Array.isArray(data.output.availableTools) + ) { + const matchingTool = data.output.availableTools.find( + (t) => t.name === toolCall.name + ) + if (matchingTool && matchingTool.toolNode) { + return matchingTool.toolNode.label || toolCall.name + } + } + return toolCall.name || 'Tool Call' + })()} + + +
+
+ + + +
+ )) + ) : ( + + ))} + {message.role === 'tool' && message.name && ( + +
+ { + // Find matching tool from availableTools + if ( + data.output && + data.output.availableTools && + Array.isArray(data.output.availableTools) + ) { + const matchingTool = data.output.availableTools.find((t) => t.name === message.name) + if (matchingTool && matchingTool.toolNode && matchingTool.toolNode.name) { + return `${baseURL}/api/v1/node-icon/${matchingTool.toolNode.name}` + } + } + return `${baseURL}/api/v1/node-icon/${message.name}` + })()} + alt={message.name} + onError={(e) => { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = toolSVG + }} + /> +
+ + {(() => { + // Find matching tool from availableTools + if ( + data.output && + data.output.availableTools && + Array.isArray(data.output.availableTools) + ) { + const matchingTool = data.output.availableTools.find((t) => t.name === message.name) + if (matchingTool && matchingTool.toolNode) { + return matchingTool.toolNode.label || message.name + } + } + return message.name + })()} + {message.tool_call_id && ( + + )} + +
+ )} + {message.additional_kwargs?.artifacts && message.additional_kwargs.artifacts.length > 0 && ( + + + {message.additional_kwargs.artifacts.map((artifact, artifactIndex) => { + if (artifact.type === 'png' || artifact.type === 'jpeg') { + return ( + + + + ) + } else if (artifact.type === 'html') { + return ( + +
+
+ ) + } else { + return ( + + {artifact.data} + + ) + } + })} +
+
+ )} + {message.role === 'user' && + Array.isArray(message.content) && + message.content.length > 0 && + message.content.map((content, index) => { + return ( + + { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = toolSVG + }} + sx={{ + height: 'auto', + maxHeight: '500px', + width: '100%', + objectFit: 'contain', + display: 'block' + }} + alt={`file-uploads-${index}`} + /> + + ) + })} + {(() => { + // Check if the content is a stringified JSON or array + if (message.content) { + try { + // Try to parse as JSON + const parsedContent = JSON.parse(message.content) + // If it parses successfully, it's JSON - use JSONViewer + return ( +
+ +
+ ) + } catch (e) { + // Not valid JSON, render as markdown + return {message.content} + } + } else { + return {`*No data*`} + } + })()} +
+ )) + ) : data?.input?.form || data?.input?.http || data?.input?.conditions ? ( + + ) : data?.input?.code ? ( + + + + ) : ( + + {data?.input?.question || `*No data*`} + + )} + + Output + + {data?.output?.form || data?.output?.http ? ( + + ) : data?.output?.conditions ? ( + renderFullfilledConditions(data.output.conditions) + ) : ( + + {(() => { + // Check if the content is a stringified JSON or array + if (data?.output?.content) { + try { + // Try to parse as JSON + const parsedContent = JSON.parse(data.output.content) + // If it parses successfully, it's JSON - use JSONViewer + return ( +
+ +
+ ) + } catch (e) { + // Not valid JSON, render as markdown + return {data?.output?.content || `*No data*`} + } + } else { + return {`*No data*`} + } + })()} +
+ )} + {data.error && ( + <> + + Error + + + + {typeof data?.error === 'object' + ? JSON.stringify(data.error, null, 2) + : data?.error || `*No error details*`} + + + + )} + {data.state && Object.keys(data.state).length > 0 && ( + <> + + State + + + + )} +
+ )} + {dataView === 'raw' && ( + e.stopPropagation()} + > + onClipboardCopy(e)} + displayDataTypes={false} + collapsed={1} + /> + + )} + {status === 'STOPPED' && ( + <> + + + + + + !isLoading && setOpenFeedbackDialog(false)}> + Provide Feedback + + setFeedback(e.target.value)} + disabled={isLoading} + /> + + + + + + + + {/* Loading Dialog */} + + + + + + {loadingMessage} + + + + + + )} +
+ ) +} + +NodeExecutionDetails.propTypes = { + data: PropTypes.object.isRequired, + label: PropTypes.string, + status: PropTypes.string, + metadata: PropTypes.object, + isPublic: PropTypes.bool, + onProceedSuccess: PropTypes.func +} + +NodeExecutionDetails.displayName = 'NodeExecutionDetails' diff --git a/packages/ui/src/views/agentexecutions/PublicExecutionDetails.jsx b/packages/ui/src/views/agentexecutions/PublicExecutionDetails.jsx new file mode 100644 index 000000000..a0c9fa1eb --- /dev/null +++ b/packages/ui/src/views/agentexecutions/PublicExecutionDetails.jsx @@ -0,0 +1,96 @@ +import { useEffect, useState } from 'react' +import { useParams } from 'react-router-dom' +import { ExecutionDetails } from './ExecutionDetails' +import { omit } from 'lodash' + +// API +import executionsApi from '@/api/executions' + +// Hooks +import useApi from '@/hooks/useApi' + +// MUI +import { Box, Card, Stack, Typography, useTheme } from '@mui/material' +import { IconCircleXFilled } from '@tabler/icons-react' +import { alpha } from '@mui/material/styles' + +// ==============================|| PublicExecutionDetails ||============================== // + +const PublicExecutionDetails = () => { + const { id: executionId } = useParams() + const theme = useTheme() + + const [execution, setExecution] = useState(null) + const [selectedMetadata, setSelectedMetadata] = useState({}) + const [isLoading, setLoading] = useState(true) + + const getExecutionByIdPublicApi = useApi(executionsApi.getExecutionByIdPublic) + + useEffect(() => { + getExecutionByIdPublicApi.request(executionId) + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + useEffect(() => { + if (getExecutionByIdPublicApi.data) { + const execution = getExecutionByIdPublicApi.data + const executionDetails = + typeof execution.executionData === 'string' ? JSON.parse(execution.executionData) : execution.executionData + setExecution(executionDetails) + setSelectedMetadata(omit(execution, ['executionData'])) + } + }, [getExecutionByIdPublicApi.data]) + + useEffect(() => { + setLoading(getExecutionByIdPublicApi.loading) + }, [getExecutionByIdPublicApi.loading]) + + return ( + <> + {!isLoading ? ( + <> + {!execution || getExecutionByIdPublicApi.error ? ( + + + + + + + Invalid Execution + + + {`The execution you're looking for doesn't exist or you don't have permission to view it.`} + + + + + + ) : ( + { + getExecutionByIdPublicApi.request(executionId) + }} + onRefresh={(executionId) => { + getExecutionByIdPublicApi.request(executionId) + }} + /> + )} + + ) : null} + + ) +} + +export default PublicExecutionDetails diff --git a/packages/ui/src/views/agentexecutions/ShareExecutionDialog.jsx b/packages/ui/src/views/agentexecutions/ShareExecutionDialog.jsx new file mode 100644 index 000000000..b35635f7c --- /dev/null +++ b/packages/ui/src/views/agentexecutions/ShareExecutionDialog.jsx @@ -0,0 +1,126 @@ +import { createPortal } from 'react-dom' +import PropTypes from 'prop-types' +import { useState } from 'react' +import { useSelector, useDispatch } from 'react-redux' + +// Material +import { Typography, Box, Dialog, DialogContent, DialogTitle, Button, Tooltip } from '@mui/material' +import { useTheme } from '@mui/material/styles' +import { IconCopy, IconX, IconLink } from '@tabler/icons-react' + +// Constants +import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackbarAction } from '@/store/actions' + +// API +import executionsApi from '@/api/executions' +import useApi from '@/hooks/useApi' + +const ShareExecutionDialog = ({ show, executionId, onClose, onUnshare }) => { + const portalElement = document.getElementById('portal') + const theme = useTheme() + const dispatch = useDispatch() + const customization = useSelector((state) => state.customization) + const [copied, setCopied] = useState(false) + + const updateExecutionApi = useApi(executionsApi.updateExecution) + + // Create shareable link + const origin = window.location.origin + const shareableLink = `${origin}/execution/${executionId}` + + const copyToClipboard = () => { + navigator.clipboard.writeText(shareableLink) + setCopied(true) + + // Show success message + dispatch( + enqueueSnackbarAction({ + message: 'Link copied to clipboard', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + ) + + // Reset copied state after 2 seconds + setTimeout(() => { + setCopied(false) + }, 2000) + } + + const handleUnshare = () => { + updateExecutionApi.request(executionId, { isPublic: false }) + if (onUnshare) onUnshare() + onClose() + } + + const component = show ? ( + + + Public Trace Link + + + + Anyone with the link below can view this execution trace. + + + {/* Link Display Box */} + + + + {shareableLink} + + + + + + + {/* Actions */} + + + + + + + ) : null + + return createPortal(component, portalElement) +} + +ShareExecutionDialog.propTypes = { + show: PropTypes.bool, + executionId: PropTypes.string, + onClose: PropTypes.func, + onUnshare: PropTypes.func +} + +export default ShareExecutionDialog diff --git a/packages/ui/src/views/agentexecutions/index.jsx b/packages/ui/src/views/agentexecutions/index.jsx new file mode 100644 index 000000000..90d8600c2 --- /dev/null +++ b/packages/ui/src/views/agentexecutions/index.jsx @@ -0,0 +1,464 @@ +import { useEffect, useState } from 'react' +import DatePicker from 'react-datepicker' +import 'react-datepicker/dist/react-datepicker.css' + +// material-ui +import { + Pagination, + Box, + Stack, + TextField, + MenuItem, + Button, + Grid, + FormControl, + InputLabel, + Select, + Dialog, + DialogActions, + DialogContent, + DialogContentText, + DialogTitle, + IconButton, + Tooltip, + Typography, + useTheme +} from '@mui/material' + +// project imports +import MainCard from '@/ui-component/cards/MainCard' +import ErrorBoundary from '@/ErrorBoundary' +import ViewHeader from '@/layout/MainLayout/ViewHeader' + +// API +import useApi from '@/hooks/useApi' +import executionsApi from '@/api/executions' +import { useSelector } from 'react-redux' + +// icons +import execution_empty from '@/assets/images/executions_empty.svg' +import { IconTrash } from '@tabler/icons-react' + +// const +import { ExecutionsListTable } from '@/ui-component/table/ExecutionsListTable' +import { ExecutionDetails } from './ExecutionDetails' +import { omit } from 'lodash' + +// ==============================|| AGENT EXECUTIONS ||============================== // + +const AgentExecutions = () => { + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const borderColor = theme.palette.grey[900] + 25 + + const getAllExecutions = useApi(executionsApi.getAllExecutions) + const deleteExecutionsApi = useApi(executionsApi.deleteExecutions) + const getExecutionByIdApi = useApi(executionsApi.getExecutionById) + + const [error, setError] = useState(null) + const [isLoading, setLoading] = useState(true) + const [executions, setExecutions] = useState([]) + const [openDrawer, setOpenDrawer] = useState(false) + const [selectedExecutionData, setSelectedExecutionData] = useState([]) + const [selectedMetadata, setSelectedMetadata] = useState({}) + const [selectedExecutionIds, setSelectedExecutionIds] = useState([]) + const [openDeleteDialog, setOpenDeleteDialog] = useState(false) + const [filters, setFilters] = useState({ + state: '', + startDate: null, + endDate: null, + agentflowId: '', + sessionId: '' + }) + const [pagination, setPagination] = useState({ + page: 1, + limit: 10, + total: 0 + }) + + const handleFilterChange = (field, value) => { + setFilters({ + ...filters, + [field]: value + }) + } + + const onDateChange = (field, date) => { + const updatedDate = new Date(date) + updatedDate.setHours(0, 0, 0, 0) + + setFilters({ + ...filters, + [field]: updatedDate + }) + } + + const handlePageChange = (event, newPage) => { + setPagination({ + ...pagination, + page: newPage + }) + } + + const handleLimitChange = (event) => { + setPagination({ + ...pagination, + page: 1, // Reset to first page when changing items per page + limit: parseInt(event.target.value, 10) + }) + } + + const applyFilters = () => { + setLoading(true) + const params = { + page: pagination.page, + limit: pagination.limit + } + + if (filters.state) params.state = filters.state + + // Create date strings that preserve the exact date values + if (filters.startDate) { + const date = new Date(filters.startDate) + // Format date as YYYY-MM-DD and set to start of day in UTC + // This ensures the server sees the same date we've selected regardless of timezone + const year = date.getFullYear() + const month = String(date.getMonth() + 1).padStart(2, '0') + const day = String(date.getDate()).padStart(2, '0') + params.startDate = `${year}-${month}-${day}T00:00:00.000Z` + } + + if (filters.endDate) { + const date = new Date(filters.endDate) + // Format date as YYYY-MM-DD and set to end of day in UTC + const year = date.getFullYear() + const month = String(date.getMonth() + 1).padStart(2, '0') + const day = String(date.getDate()).padStart(2, '0') + params.endDate = `${year}-${month}-${day}T23:59:59.999Z` + } + + if (filters.agentflowId) params.agentflowId = filters.agentflowId + if (filters.sessionId) params.sessionId = filters.sessionId + + getAllExecutions.request(params) + } + + const resetFilters = () => { + setFilters({ + state: '', + startDate: null, + endDate: null, + agentflowId: '', + sessionId: '' + }) + getAllExecutions.request() + } + + const handleExecutionSelectionChange = (selectedIds) => { + setSelectedExecutionIds(selectedIds) + } + + const handleDeleteDialogOpen = () => { + if (selectedExecutionIds.length > 0) { + setOpenDeleteDialog(true) + } + } + + const handleDeleteDialogClose = () => { + setOpenDeleteDialog(false) + } + + const handleDeleteExecutions = () => { + deleteExecutionsApi.request(selectedExecutionIds) + setOpenDeleteDialog(false) + } + + useEffect(() => { + getAllExecutions.request() + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + useEffect(() => { + if (getAllExecutions.data) { + try { + const { data, total } = getAllExecutions.data + if (!Array.isArray(data)) return + setExecutions(data) + setPagination((prev) => ({ ...prev, total })) + } catch (e) { + console.error(e) + } + } + }, [getAllExecutions.data]) + + useEffect(() => { + setLoading(getAllExecutions.loading) + }, [getAllExecutions.loading]) + + useEffect(() => { + setError(getAllExecutions.error) + }, [getAllExecutions.error]) + + useEffect(() => { + applyFilters() + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [pagination.page, pagination.limit]) + + useEffect(() => { + if (deleteExecutionsApi.data) { + // Refresh the executions list + getAllExecutions.request({ + page: pagination.page, + limit: pagination.limit + }) + setSelectedExecutionIds([]) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [deleteExecutionsApi.data]) + + useEffect(() => { + if (getExecutionByIdApi.data) { + const execution = getExecutionByIdApi.data + const executionDetails = + typeof execution.executionData === 'string' ? JSON.parse(execution.executionData) : execution.executionData + setSelectedExecutionData(executionDetails) + setSelectedMetadata(omit(execution, ['executionData'])) + } + }, [getExecutionByIdApi.data]) + + return ( + + {error ? ( + + ) : ( + + + + {/* Filter Section */} + + + + + State + + + + + onDateChange('startDate', date)} + selectsStart + startDate={filters.startDate} + className='form-control' + wrapperClassName='datePicker' + maxDate={new Date()} + customInput={ + + } + /> + + + onDateChange('endDate', date)} + selectsEnd + endDate={filters.endDate} + className='form-control' + wrapperClassName='datePicker' + minDate={filters.startDate} + maxDate={new Date()} + customInput={ + + } + /> + + + handleFilterChange('sessionId', e.target.value)} + size='small' + sx={{ + '& .MuiOutlinedInput-notchedOutline': { + borderColor: borderColor + } + }} + /> + + + + + + + + + + + + + + + + + + { + setOpenDrawer(true) + const executionDetails = + typeof execution.executionData === 'string' ? JSON.parse(execution.executionData) : execution.executionData + setSelectedExecutionData(executionDetails) + setSelectedMetadata(omit(execution, ['executionData'])) + }} + /> + + {/* Pagination and Page Size Controls */} + + + Items per page: + + + + + + + + setOpenDrawer(false)} + onProceedSuccess={() => { + setOpenDrawer(false) + getAllExecutions.request() + }} + onUpdateSharing={() => { + getAllExecutions.request() + }} + onRefresh={(executionId) => { + getAllExecutions.request() + getExecutionByIdApi.request(executionId) + }} + /> + + {/* Delete Confirmation Dialog */} + + Confirm Deletion + + + Are you sure you want to delete {selectedExecutionIds.length} execution + {selectedExecutionIds.length !== 1 ? 's' : ''}? This action cannot be undone. + + + + + + + + + {!isLoading && (!executions || executions.length === 0) && ( + + + execution_empty + +
No Executions Yet
+
+ )} +
+ )} +
+ ) +} + +export default AgentExecutions diff --git a/packages/ui/src/views/agentflows/index.jsx b/packages/ui/src/views/agentflows/index.jsx index fe7b08045..6e8d8cde0 100644 --- a/packages/ui/src/views/agentflows/index.jsx +++ b/packages/ui/src/views/agentflows/index.jsx @@ -2,7 +2,7 @@ import { useEffect, useState } from 'react' import { useNavigate } from 'react-router-dom' // material-ui -import { Box, Skeleton, Stack, ToggleButton, ToggleButtonGroup } from '@mui/material' +import { Chip, Box, Skeleton, Stack, ToggleButton, ToggleButtonGroup } from '@mui/material' import { useTheme } from '@mui/material/styles' // project imports @@ -24,7 +24,7 @@ import chatflowsApi from '@/api/chatflows' import useApi from '@/hooks/useApi' // const -import { baseURL } from '@/store/constant' +import { baseURL, AGENTFLOW_ICONS } from '@/store/constant' // icons import { IconPlus, IconLayoutGrid, IconList } from '@tabler/icons-react' @@ -38,12 +38,14 @@ const Agentflows = () => { const [isLoading, setLoading] = useState(true) const [error, setError] = useState(null) const [images, setImages] = useState({}) + const [icons, setIcons] = useState({}) const [search, setSearch] = useState('') const [loginDialogOpen, setLoginDialogOpen] = useState(false) const [loginDialogProps, setLoginDialogProps] = useState({}) const getAllAgentflows = useApi(chatflowsApi.getAllAgentflows) const [view, setView] = useState(localStorage.getItem('flowDisplayStyle') || 'card') + const [agentflowVersion, setAgentflowVersion] = useState(localStorage.getItem('agentFlowVersion') || 'v2') const handleChange = (event, nextView) => { if (nextView === null) return @@ -51,6 +53,13 @@ const Agentflows = () => { setView(nextView) } + const handleVersionChange = (event, nextView) => { + if (nextView === null) return + localStorage.setItem('agentFlowVersion', nextView) + setAgentflowVersion(nextView) + getAllAgentflows.request(nextView === 'v2' ? 'AGENTFLOW' : 'MULTIAGENT') + } + const onSearchChange = (event) => { setSearch(event.target.value) } @@ -70,15 +79,23 @@ const Agentflows = () => { } const addNew = () => { - navigate('/agentcanvas') + if (agentflowVersion === 'v2') { + navigate('/v2/agentcanvas') + } else { + navigate('/agentcanvas') + } } const goToCanvas = (selectedAgentflow) => { - navigate(`/agentcanvas/${selectedAgentflow.id}`) + if (selectedAgentflow.type === 'AGENTFLOW') { + navigate(`/v2/agentcanvas/${selectedAgentflow.id}`) + } else { + navigate(`/agentcanvas/${selectedAgentflow.id}`) + } } useEffect(() => { - getAllAgentflows.request() + getAllAgentflows.request(agentflowVersion === 'v2' ? 'AGENTFLOW' : 'MULTIAGENT') // eslint-disable-next-line react-hooks/exhaustive-deps }, []) @@ -106,19 +123,27 @@ const Agentflows = () => { try { const agentflows = getAllAgentflows.data const images = {} + const icons = {} for (let i = 0; i < agentflows.length; i += 1) { const flowDataStr = agentflows[i].flowData const flowData = JSON.parse(flowDataStr) const nodes = flowData.nodes || [] images[agentflows[i].id] = [] + icons[agentflows[i].id] = [] for (let j = 0; j < nodes.length; j += 1) { - const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` - if (!images[agentflows[i].id].includes(imageSrc)) { - images[agentflows[i].id].push(imageSrc) + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodes[j].data.name) + if (foundIcon) { + icons[agentflows[i].id].push(foundIcon) + } else { + const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` + if (!images[agentflows[i].id].includes(imageSrc)) { + images[agentflows[i].id].push(imageSrc) + } } } } setImages(images) + setIcons(icons) } catch (e) { console.error(e) } @@ -131,7 +156,46 @@ const Agentflows = () => { ) : ( - + + + + + V2 + + + V1 + + { ) : ( {getAllAgentflows.data?.filter(filterFlows).map((data, index) => ( - goToCanvas(data)} data={data} images={images[data.id]} /> + goToCanvas(data)} + data={data} + images={images[data.id]} + icons={icons[data.id]} + /> ))} )} @@ -189,6 +259,7 @@ const Agentflows = () => { isAgentCanvas={true} data={getAllAgentflows.data} images={images} + icons={icons} isLoading={isLoading} filterFunction={filterFlows} updateFlowsApi={getAllAgentflows} diff --git a/packages/ui/src/views/agentflowsv2/AgentFlowEdge.jsx b/packages/ui/src/views/agentflowsv2/AgentFlowEdge.jsx new file mode 100644 index 000000000..ad34ccf60 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/AgentFlowEdge.jsx @@ -0,0 +1,193 @@ +import { EdgeLabelRenderer, getBezierPath } from 'reactflow' +import { memo, useState, useContext } from 'react' +import PropTypes from 'prop-types' +import { useDispatch } from 'react-redux' +import { SET_DIRTY } from '@/store/actions' +import { flowContext } from '@/store/context/ReactFlowContext' +import { IconX } from '@tabler/icons-react' + +function EdgeLabel({ transform, isHumanInput, label, color }) { + return ( +
+ {label} +
+ ) +} + +EdgeLabel.propTypes = { + transform: PropTypes.string, + isHumanInput: PropTypes.bool, + label: PropTypes.string, + color: PropTypes.string +} + +const foreignObjectSize = 40 + +const AgentFlowEdge = ({ id, sourceX, sourceY, targetX, targetY, sourcePosition, targetPosition, data, markerEnd, selected }) => { + const [isHovered, setIsHovered] = useState(false) + const { deleteEdge } = useContext(flowContext) + const dispatch = useDispatch() + + const onEdgeClick = (evt, id) => { + evt.stopPropagation() + deleteEdge(id) + dispatch({ type: SET_DIRTY }) + } + + const xEqual = sourceX === targetX + const yEqual = sourceY === targetY + + const [edgePath, edgeCenterX, edgeCenterY] = getBezierPath({ + // we need this little hack in order to display the gradient for a straight line + sourceX: xEqual ? sourceX + 0.0001 : sourceX, + sourceY: yEqual ? sourceY + 0.0001 : sourceY, + sourcePosition, + targetX, + targetY, + targetPosition + }) + + const gradientId = `edge-gradient-${id}` + return ( + <> + + + + + + + setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + /> + setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + /> + {data?.edgeLabel && ( + + + + )} + {isHovered && ( + setIsHovered(true)} + onMouseLeave={() => setIsHovered(false)} + > +
+ +
+
+ )} + + ) +} + +AgentFlowEdge.propTypes = { + id: PropTypes.string, + sourceX: PropTypes.number, + sourceY: PropTypes.number, + targetX: PropTypes.number, + targetY: PropTypes.number, + sourcePosition: PropTypes.any, + targetPosition: PropTypes.any, + style: PropTypes.object, + data: PropTypes.object, + markerEnd: PropTypes.any, + selected: PropTypes.bool +} + +export default memo(AgentFlowEdge) diff --git a/packages/ui/src/views/agentflowsv2/AgentFlowNode.jsx b/packages/ui/src/views/agentflowsv2/AgentFlowNode.jsx new file mode 100644 index 000000000..c5a71913a --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/AgentFlowNode.jsx @@ -0,0 +1,484 @@ +import PropTypes from 'prop-types' +import { useContext, memo, useRef, useState, useEffect } from 'react' +import { useSelector } from 'react-redux' +import { Handle, Position, useUpdateNodeInternals, NodeToolbar } from 'reactflow' + +// material-ui +import { styled, useTheme, alpha, darken, lighten } from '@mui/material/styles' +import { ButtonGroup, Avatar, Box, Typography, IconButton, Tooltip } from '@mui/material' + +// project imports +import MainCard from '@/ui-component/cards/MainCard' +import { flowContext } from '@/store/context/ReactFlowContext' +import NodeInfoDialog from '@/ui-component/dialog/NodeInfoDialog' + +// icons +import { + IconCheck, + IconExclamationMark, + IconCircleChevronRightFilled, + IconCopy, + IconTrash, + IconInfoCircle, + IconLoader +} from '@tabler/icons-react' +import StopCircleIcon from '@mui/icons-material/StopCircle' +import CancelIcon from '@mui/icons-material/Cancel' + +// const +import { baseURL, AGENTFLOW_ICONS } from '@/store/constant' + +const CardWrapper = styled(MainCard)(({ theme }) => ({ + background: theme.palette.card.main, + color: theme.darkTextPrimary, + border: 'solid 1px', + width: 'max-content', + height: 'auto', + padding: '10px', + boxShadow: 'none' +})) + +const StyledNodeToolbar = styled(NodeToolbar)(({ theme }) => ({ + backgroundColor: theme.palette.card.main, + color: theme.darkTextPrimary, + padding: '5px', + borderRadius: '10px', + boxShadow: '0 2px 14px 0 rgb(32 40 45 / 8%)' +})) + +// ===========================|| CANVAS NODE ||=========================== // + +const AgentFlowNode = ({ data }) => { + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const ref = useRef(null) + const updateNodeInternals = useUpdateNodeInternals() + // eslint-disable-next-line + const [position, setPosition] = useState(0) + const [isHovered, setIsHovered] = useState(false) + const { deleteNode, duplicateNode } = useContext(flowContext) + const [showInfoDialog, setShowInfoDialog] = useState(false) + const [infoDialogProps, setInfoDialogProps] = useState({}) + + const defaultColor = '#666666' // fallback color if data.color is not present + const nodeColor = data.color || defaultColor + + // Get different shades of the color based on state + const getStateColor = () => { + if (data.selected) return nodeColor + if (isHovered) return alpha(nodeColor, 0.8) + return alpha(nodeColor, 0.5) + } + + const getOutputAnchors = () => { + return data.outputAnchors ?? [] + } + + const getAnchorPosition = (index) => { + const currentHeight = ref.current?.clientHeight || 0 + const spacing = currentHeight / (getOutputAnchors().length + 1) + const position = spacing * (index + 1) + + // Update node internals when we get a non-zero position + if (position > 0) { + updateNodeInternals(data.id) + } + + return position + } + + const getMinimumHeight = () => { + const outputCount = getOutputAnchors().length + // Use exactly 60px as minimum height + return Math.max(60, outputCount * 20 + 40) + } + + const getBackgroundColor = () => { + if (customization.isDarkMode) { + return isHovered ? darken(nodeColor, 0.7) : darken(nodeColor, 0.8) + } + return isHovered ? lighten(nodeColor, 0.8) : lighten(nodeColor, 0.9) + } + + const getStatusBackgroundColor = (status) => { + switch (status) { + case 'ERROR': + return theme.palette.error.dark + case 'INPROGRESS': + return theme.palette.warning.dark + case 'STOPPED': + case 'TERMINATED': + return theme.palette.error.main + case 'FINISHED': + return theme.palette.success.dark + default: + return theme.palette.primary.dark + } + } + + const renderIcon = (node) => { + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === node.name) + + if (!foundIcon) return null + return + } + + useEffect(() => { + if (ref.current) { + setTimeout(() => { + setPosition(ref.current?.offsetTop + ref.current?.clientHeight / 2) + updateNodeInternals(data.id) + }, 10) + } + }, [data, ref, updateNodeInternals]) + + return ( +
setIsHovered(true)} onMouseLeave={() => setIsHovered(false)}> + + + {data.name !== 'startAgentflow' && ( + { + duplicateNode(data.id) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.primary.main + } + }} + > + + + )} + { + deleteNode(data.id) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.error.main + } + }} + > + + + { + setInfoDialogProps({ data }) + setShowInfoDialog(true) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.info.main + } + }} + > + + + + + + {data && data.status && ( + + + {data.status === 'INPROGRESS' ? ( + + ) : data.status === 'ERROR' ? ( + + ) : data.status === 'TERMINATED' ? ( + + ) : data.status === 'STOPPED' ? ( + + ) : ( + + )} + + + )} + + + {!data.hideInput && ( + +
+ + )} + +
+ + {data.color && !data.icon ? ( +
+ {renderIcon(data)} +
+ ) : ( +
+ {data.name} +
+ )} +
+ + + {data.label} + + + {(() => { + // Array of model configs to check and render + const modelConfigs = [ + { model: data.inputs?.llmModel, config: data.inputs?.llmModelConfig }, + { model: data.inputs?.agentModel, config: data.inputs?.agentModelConfig }, + { model: data.inputs?.conditionAgentModel, config: data.inputs?.conditionAgentModelConfig } + ] + + // Filter out undefined models and render each valid one + return modelConfigs + .filter((item) => item.model && item.config) + .map((item, index) => ( + + + {item.model} + + {item.config.modelName || item.config.model} + + + + )) + })()} + + {(() => { + // Array of tool configurations to check and render + const toolConfigs = [ + { tools: data.inputs?.llmTools, toolProperty: 'llmSelectedTool' }, + { tools: data.inputs?.agentTools, toolProperty: 'agentSelectedTool' }, + { + tools: data.inputs?.selectedTool ? [{ selectedTool: data.inputs?.selectedTool }] : [], + toolProperty: 'selectedTool' + }, + { tools: data.inputs?.agentKnowledgeVSEmbeddings, toolProperty: ['vectorStore', 'embeddingModel'] } + ] + + // Filter out undefined tools and render each valid collection + return toolConfigs + .filter((config) => config.tools && config.tools.length > 0) + .map((config, configIndex) => ( + + {config.tools.flatMap((tool, toolIndex) => { + if (Array.isArray(config.toolProperty)) { + return config.toolProperty + .filter((prop) => tool[prop]) + .map((prop, propIndex) => { + const toolName = tool[prop] + return ( + + {toolName} + + ) + }) + } else { + const toolName = tool[config.toolProperty] + if (!toolName) return [] + + return [ + + {toolName} + + ] + } + })} + + )) + })()} + +
+ {getOutputAnchors().map((outputAnchor, index) => { + return ( + +
+ + + ) + })} + + + setShowInfoDialog(false)}> +
+ ) +} + +AgentFlowNode.propTypes = { + data: PropTypes.object +} + +export default memo(AgentFlowNode) diff --git a/packages/ui/src/views/agentflowsv2/Canvas.jsx b/packages/ui/src/views/agentflowsv2/Canvas.jsx new file mode 100644 index 000000000..d0b3c6ec7 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/Canvas.jsx @@ -0,0 +1,786 @@ +import { useEffect, useRef, useState, useCallback, useContext } from 'react' +import ReactFlow, { addEdge, Controls, MiniMap, Background, useNodesState, useEdgesState } from 'reactflow' +import 'reactflow/dist/style.css' +import './index.css' +import { useReward } from 'react-rewards' + +import { useDispatch, useSelector } from 'react-redux' +import { useNavigate, useLocation } from 'react-router-dom' +import { + REMOVE_DIRTY, + SET_DIRTY, + SET_CHATFLOW, + enqueueSnackbar as enqueueSnackbarAction, + closeSnackbar as closeSnackbarAction +} from '@/store/actions' +import { omit, cloneDeep } from 'lodash' + +// material-ui +import { Toolbar, Box, AppBar, Button, Fab } from '@mui/material' +import { useTheme } from '@mui/material/styles' + +// project imports +import CanvasNode from './AgentFlowNode' +import IterationNode from './IterationNode' +import AgentFlowEdge from './AgentFlowEdge' +import ConnectionLine from './ConnectionLine' +import StickyNote from './StickyNote' +import CanvasHeader from '@/views/canvas/CanvasHeader' +import AddNodes from '@/views/canvas/AddNodes' +import ConfirmDialog from '@/ui-component/dialog/ConfirmDialog' +import EditNodeDialog from '@/views/agentflowsv2/EditNodeDialog' +import ChatPopUp from '@/views/chatmessage/ChatPopUp' +import ValidationPopUp from '@/views/chatmessage/ValidationPopUp' +import { flowContext } from '@/store/context/ReactFlowContext' + +// API +import nodesApi from '@/api/nodes' +import chatflowsApi from '@/api/chatflows' + +// Hooks +import useApi from '@/hooks/useApi' +import useConfirm from '@/hooks/useConfirm' + +// icons +import { IconX, IconRefreshAlert } from '@tabler/icons-react' + +// utils +import { + getUniqueNodeLabel, + getUniqueNodeId, + initNode, + updateOutdatedNodeData, + updateOutdatedNodeEdge, + isValidConnectionAgentflowV2 +} from '@/utils/genericHelper' +import useNotifier from '@/utils/useNotifier' +import { usePrompt } from '@/utils/usePrompt' + +// const +import { FLOWISE_CREDENTIAL_ID, AGENTFLOW_ICONS } from '@/store/constant' + +const nodeTypes = { agentFlow: CanvasNode, stickyNote: StickyNote, iteration: IterationNode } +const edgeTypes = { agentFlow: AgentFlowEdge } + +// ==============================|| CANVAS ||============================== // + +const AgentflowCanvas = () => { + const theme = useTheme() + const navigate = useNavigate() + const customization = useSelector((state) => state.customization) + + const { state } = useLocation() + const templateFlowData = state ? state.templateFlowData : '' + + const URLpath = document.location.pathname.toString().split('/') + const chatflowId = + URLpath[URLpath.length - 1] === 'canvas' || URLpath[URLpath.length - 1] === 'agentcanvas' ? '' : URLpath[URLpath.length - 1] + const canvasTitle = URLpath.includes('agentcanvas') ? 'Agent' : 'Chatflow' + + const { confirm } = useConfirm() + + const dispatch = useDispatch() + const canvas = useSelector((state) => state.canvas) + const [canvasDataStore, setCanvasDataStore] = useState(canvas) + const [chatflow, setChatflow] = useState(null) + const { reactFlowInstance, setReactFlowInstance } = useContext(flowContext) + + // ==============================|| Snackbar ||============================== // + + useNotifier() + const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) + const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) + + // ==============================|| ReactFlow ||============================== // + + const [nodes, setNodes, onNodesChange] = useNodesState() + const [edges, setEdges, onEdgesChange] = useEdgesState() + + const [selectedNode, setSelectedNode] = useState(null) + const [isSyncNodesButtonEnabled, setIsSyncNodesButtonEnabled] = useState(false) + const [editNodeDialogOpen, setEditNodeDialogOpen] = useState(false) + const [editNodeDialogProps, setEditNodeDialogProps] = useState({}) + + const reactFlowWrapper = useRef(null) + + // ==============================|| Chatflow API ||============================== // + + const getNodesApi = useApi(nodesApi.getAllNodes) + const createNewChatflowApi = useApi(chatflowsApi.createNewChatflow) + const updateChatflowApi = useApi(chatflowsApi.updateChatflow) + const getSpecificChatflowApi = useApi(chatflowsApi.getSpecificChatflow) + + // ==============================|| Events & Actions ||============================== // + + const onConnect = (params) => { + if (!isValidConnectionAgentflowV2(params, reactFlowInstance)) { + return + } + + const nodeName = params.sourceHandle.split('_')[0] + const targetNodeName = params.targetHandle.split('_')[0] + + const targetColor = AGENTFLOW_ICONS.find((icon) => icon.name === targetNodeName)?.color ?? theme.palette.primary.main + const sourceColor = AGENTFLOW_ICONS.find((icon) => icon.name === nodeName)?.color ?? theme.palette.primary.main + + let edgeLabel = undefined + if (nodeName === 'conditionAgentflow' || nodeName === 'conditionAgentAgentflow') { + const _edgeLabel = params.sourceHandle.split('-').pop() + edgeLabel = (isNaN(_edgeLabel) ? 0 : _edgeLabel).toString() + } + + if (nodeName === 'humanInputAgentflow') { + edgeLabel = params.sourceHandle.split('-').pop() + edgeLabel = edgeLabel === '0' ? 'proceed' : 'reject' + } + + // Check if both source and target nodes are within the same iteration node + const sourceNode = reactFlowInstance.getNodes().find((node) => node.id === params.source) + const targetNode = reactFlowInstance.getNodes().find((node) => node.id === params.target) + const isWithinIterationNode = sourceNode?.parentNode && targetNode?.parentNode && sourceNode.parentNode === targetNode.parentNode + + const newEdge = { + ...params, + data: { + ...params.data, + sourceColor, + targetColor, + edgeLabel, + isHumanInput: nodeName === 'humanInputAgentflow' + }, + ...(isWithinIterationNode && { zIndex: 9999 }), + type: 'agentFlow', + id: `${params.source}-${params.sourceHandle}-${params.target}-${params.targetHandle}` + } + setEdges((eds) => addEdge(newEdge, eds)) + } + + const handleLoadFlow = (file) => { + try { + const flowData = JSON.parse(file) + const nodes = flowData.nodes || [] + + setNodes(nodes) + setEdges(flowData.edges || []) + setTimeout(() => setDirty(), 0) + } catch (e) { + console.error(e) + } + } + + const handleDeleteFlow = async () => { + const confirmPayload = { + title: `Delete`, + description: `Delete ${canvasTitle} ${chatflow.name}?`, + confirmButtonName: 'Delete', + cancelButtonName: 'Cancel' + } + const isConfirmed = await confirm(confirmPayload) + + if (isConfirmed) { + try { + await chatflowsApi.deleteChatflow(chatflow.id) + localStorage.removeItem(`${chatflow.id}_INTERNAL`) + navigate('/agentflows') + } catch (error) { + enqueueSnackbar({ + message: typeof error.response.data === 'object' ? error.response.data.message : error.response.data, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + } + } + + const handleSaveFlow = (chatflowName) => { + if (reactFlowInstance) { + const nodes = reactFlowInstance.getNodes().map((node) => { + const nodeData = cloneDeep(node.data) + if (Object.prototype.hasOwnProperty.call(nodeData.inputs, FLOWISE_CREDENTIAL_ID)) { + nodeData.credential = nodeData.inputs[FLOWISE_CREDENTIAL_ID] + nodeData.inputs = omit(nodeData.inputs, [FLOWISE_CREDENTIAL_ID]) + } + node.data = { + ...nodeData, + selected: false, + status: undefined + } + return node + }) + + const rfInstanceObject = reactFlowInstance.toObject() + rfInstanceObject.nodes = nodes + const flowData = JSON.stringify(rfInstanceObject) + + if (!chatflow.id) { + const newChatflowBody = { + name: chatflowName, + deployed: false, + isPublic: false, + flowData, + type: 'AGENTFLOW' + } + createNewChatflowApi.request(newChatflowBody) + } else { + const updateBody = { + name: chatflowName, + flowData + } + updateChatflowApi.request(chatflow.id, updateBody) + } + } + } + + // eslint-disable-next-line + const onNodeClick = useCallback((event, clickedNode) => { + setSelectedNode(clickedNode) + setNodes((nds) => + nds.map((node) => { + if (node.id === clickedNode.id) { + node.data = { + ...node.data, + selected: true + } + } else { + node.data = { + ...node.data, + selected: false + } + } + + return node + }) + ) + }) + + // eslint-disable-next-line + const onNodeDoubleClick = useCallback((event, node) => { + if (!node || !node.data) return + if (node.data.name === 'stickyNoteAgentflow') { + // dont show dialog + } else { + const dialogProps = { + data: node.data, + inputParams: node.data.inputParams.filter((inputParam) => !inputParam.hidden) + } + + setEditNodeDialogProps(dialogProps) + setEditNodeDialogOpen(true) + } + }) + + const onDragOver = useCallback((event) => { + event.preventDefault() + event.dataTransfer.dropEffect = 'move' + }, []) + + const onDrop = useCallback( + (event) => { + event.preventDefault() + const reactFlowBounds = reactFlowWrapper.current.getBoundingClientRect() + let nodeData = event.dataTransfer.getData('application/reactflow') + + // check if the dropped element is valid + if (typeof nodeData === 'undefined' || !nodeData) { + return + } + + nodeData = JSON.parse(nodeData) + + const position = reactFlowInstance.project({ + x: event.clientX - reactFlowBounds.left - 100, + y: event.clientY - reactFlowBounds.top - 50 + }) + const nodes = reactFlowInstance.getNodes() + + if (nodeData.name === 'startAgentflow' && nodes.find((node) => node.data.name === 'startAgentflow')) { + enqueueSnackbar({ + message: 'Only one start node is allowed', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + return + } + + const newNodeId = getUniqueNodeId(nodeData, reactFlowInstance.getNodes()) + const newNodeLabel = getUniqueNodeLabel(nodeData, nodes) + + const newNode = { + id: newNodeId, + position, + data: { ...initNode(nodeData, newNodeId, true), label: newNodeLabel } + } + + if (nodeData.type === 'Iteration') { + newNode.type = 'iteration' + } else if (nodeData.type === 'StickyNote') { + newNode.type = 'stickyNote' + } else { + newNode.type = 'agentFlow' + } + + // Check if the dropped node is within any Iteration node's flowContainerSize + const iterationNodes = nodes.filter((node) => node.type === 'iteration') + let parentNode = null + + for (const iterationNode of iterationNodes) { + // Get the iteration node's position and dimensions + const nodeWidth = iterationNode.width || 300 + const nodeHeight = iterationNode.height || 250 + + // Calculate the boundaries of the iteration node + const nodeLeft = iterationNode.position.x + const nodeRight = nodeLeft + nodeWidth + const nodeTop = iterationNode.position.y + const nodeBottom = nodeTop + nodeHeight + + // Check if the dropped position is within these boundaries + if (position.x >= nodeLeft && position.x <= nodeRight && position.y >= nodeTop && position.y <= nodeBottom) { + parentNode = iterationNode + + // We can't have nested iteration nodes + if (nodeData.name === 'iterationAgentflow') { + enqueueSnackbar({ + message: 'Nested iteration node is not supported yet', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + return + } + + // We can't have human input node inside iteration node + if (nodeData.name === 'humanInputAgentflow') { + enqueueSnackbar({ + message: 'Human input node is not supported inside Iteration node', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + return + } + break + } + } + + // If the node is dropped inside an iteration node, set its parent + if (parentNode) { + newNode.parentNode = parentNode.id + newNode.extent = 'parent' + // Adjust position to be relative to the parent + newNode.position = { + x: position.x - parentNode.position.x, + y: position.y - parentNode.position.y + } + } + + setSelectedNode(newNode) + setNodes((nds) => { + return (nds ?? []).concat(newNode).map((node) => { + if (node.id === newNode.id) { + node.data = { + ...node.data, + selected: true + } + } else { + node.data = { + ...node.data, + selected: false + } + } + + return node + }) + }) + setTimeout(() => setDirty(), 0) + }, + + // eslint-disable-next-line + [reactFlowInstance] + ) + + const syncNodes = () => { + const componentNodes = canvas.componentNodes + + const cloneNodes = cloneDeep(nodes) + const cloneEdges = cloneDeep(edges) + let toBeRemovedEdges = [] + + for (let i = 0; i < cloneNodes.length; i++) { + const node = cloneNodes[i] + const componentNode = componentNodes.find((cn) => cn.name === node.data.name) + if (componentNode && componentNode.version > node.data.version) { + const clonedComponentNode = cloneDeep(componentNode) + cloneNodes[i].data = updateOutdatedNodeData(clonedComponentNode, node.data, true) + toBeRemovedEdges.push(...updateOutdatedNodeEdge(cloneNodes[i].data, cloneEdges)) + } + } + + setNodes(cloneNodes) + setEdges(cloneEdges.filter((edge) => !toBeRemovedEdges.includes(edge))) + setDirty() + setIsSyncNodesButtonEnabled(false) + } + + const { reward: confettiReward } = useReward('canvasConfetti', 'confetti', { + elementCount: 150, + spread: 80, + lifetime: 300, + startVelocity: 40, + zIndex: 10000, + decay: 0.92, + position: 'fixed' + }) + + const triggerConfetti = () => { + setTimeout(() => { + confettiReward() + }, 50) + } + + const saveChatflowSuccess = () => { + dispatch({ type: REMOVE_DIRTY }) + enqueueSnackbar({ + message: `${canvasTitle} saved`, + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + } + + const errorFailed = (message) => { + enqueueSnackbar({ + message, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + + const setDirty = () => { + dispatch({ type: SET_DIRTY }) + } + + const checkIfSyncNodesAvailable = (nodes) => { + const componentNodes = canvas.componentNodes + + for (let i = 0; i < nodes.length; i++) { + const node = nodes[i] + const componentNode = componentNodes.find((cn) => cn.name === node.data.name) + if (componentNode && componentNode.version > node.data.version) { + setIsSyncNodesButtonEnabled(true) + return + } + } + + setIsSyncNodesButtonEnabled(false) + } + + // ==============================|| useEffect ||============================== // + + // Get specific chatflow successful + useEffect(() => { + if (getSpecificChatflowApi.data) { + const chatflow = getSpecificChatflowApi.data + const initialFlow = chatflow.flowData ? JSON.parse(chatflow.flowData) : [] + setNodes(initialFlow.nodes || []) + setEdges(initialFlow.edges || []) + dispatch({ type: SET_CHATFLOW, chatflow }) + } else if (getSpecificChatflowApi.error) { + errorFailed(`Failed to retrieve ${canvasTitle}: ${getSpecificChatflowApi.error.response.data.message}`) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [getSpecificChatflowApi.data, getSpecificChatflowApi.error]) + + // Create new chatflow successful + useEffect(() => { + if (createNewChatflowApi.data) { + const chatflow = createNewChatflowApi.data + dispatch({ type: SET_CHATFLOW, chatflow }) + saveChatflowSuccess() + window.history.replaceState(state, null, `/v2/agentcanvas/${chatflow.id}`) + } else if (createNewChatflowApi.error) { + errorFailed(`Failed to save ${canvasTitle}: ${createNewChatflowApi.error.response.data.message}`) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [createNewChatflowApi.data, createNewChatflowApi.error]) + + // Update chatflow successful + useEffect(() => { + if (updateChatflowApi.data) { + dispatch({ type: SET_CHATFLOW, chatflow: updateChatflowApi.data }) + saveChatflowSuccess() + } else if (updateChatflowApi.error) { + errorFailed(`Failed to save ${canvasTitle}: ${updateChatflowApi.error.response.data.message}`) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [updateChatflowApi.data, updateChatflowApi.error]) + + useEffect(() => { + setChatflow(canvasDataStore.chatflow) + if (canvasDataStore.chatflow) { + const flowData = canvasDataStore.chatflow.flowData ? JSON.parse(canvasDataStore.chatflow.flowData) : [] + checkIfSyncNodesAvailable(flowData.nodes || []) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [canvasDataStore.chatflow]) + + // Initialization + useEffect(() => { + setIsSyncNodesButtonEnabled(false) + if (chatflowId) { + getSpecificChatflowApi.request(chatflowId) + } else { + if (localStorage.getItem('duplicatedFlowData')) { + handleLoadFlow(localStorage.getItem('duplicatedFlowData')) + setTimeout(() => localStorage.removeItem('duplicatedFlowData'), 0) + } else { + setNodes([]) + setEdges([]) + } + dispatch({ + type: SET_CHATFLOW, + chatflow: { + name: `Untitled ${canvasTitle}` + } + }) + } + + getNodesApi.request() + + // Clear dirty state before leaving and remove any ongoing test triggers and webhooks + return () => { + setTimeout(() => dispatch({ type: REMOVE_DIRTY }), 0) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + useEffect(() => { + setCanvasDataStore(canvas) + }, [canvas]) + + useEffect(() => { + function handlePaste(e) { + const pasteData = e.clipboardData.getData('text') + //TODO: prevent paste event when input focused, temporary fix: catch chatflow syntax + if (pasteData.includes('{"nodes":[') && pasteData.includes('],"edges":[')) { + handleLoadFlow(pasteData) + } + } + + window.addEventListener('paste', handlePaste) + + return () => { + window.removeEventListener('paste', handlePaste) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + useEffect(() => { + if (templateFlowData && templateFlowData.includes('"nodes":[') && templateFlowData.includes('],"edges":[')) { + handleLoadFlow(templateFlowData) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [templateFlowData]) + + usePrompt('You have unsaved changes! Do you want to navigate away?', canvasDataStore.isDirty) + + const [chatPopupOpen, setChatPopupOpen] = useState(false) + + useEffect(() => { + if (!chatflowId && !localStorage.getItem('duplicatedFlowData') && getNodesApi.data && nodes.length === 0) { + const startNodeData = getNodesApi.data.find((node) => node.name === 'startAgentflow') + if (startNodeData) { + const clonedStartNodeData = cloneDeep(startNodeData) + clonedStartNodeData.position = { x: 100, y: 100 } + const startNode = { + id: 'startAgentflow_0', + type: 'agentFlow', + position: { x: 100, y: 100 }, + data: { + ...initNode(clonedStartNodeData, 'startAgentflow_0', true), + label: 'Start' + } + } + setNodes([startNode]) + setEdges([]) + } + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [getNodesApi.data, chatflowId]) + + return ( + <> + + + + + + + + + +
+
+ + + + + + setEditNodeDialogOpen(false)} + /> + {isSyncNodesButtonEnabled && ( + syncNodes()} + > + + + )} + + {!chatPopupOpen && } + +
+
+
+ +
+ + ) +} + +export default AgentflowCanvas diff --git a/packages/ui/src/views/agentflowsv2/ConfigInput.jsx b/packages/ui/src/views/agentflowsv2/ConfigInput.jsx new file mode 100644 index 000000000..2215528c4 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/ConfigInput.jsx @@ -0,0 +1,219 @@ +import { useContext, useState, useEffect } from 'react' +import PropTypes from 'prop-types' +import { cloneDeep } from 'lodash' + +// Material +import { Accordion, AccordionSummary, AccordionDetails, Box, Typography } from '@mui/material' +import { useTheme } from '@mui/material/styles' +import ExpandMoreIcon from '@mui/icons-material/ExpandMore' +import { IconSettings } from '@tabler/icons-react' + +// Project imports +import NodeInputHandler from '../canvas/NodeInputHandler' + +// API +import nodesApi from '@/api/nodes' + +// const +import { initNode, showHideInputParams, initializeDefaultNodeData } from '@/utils/genericHelper' +import { flowContext } from '@/store/context/ReactFlowContext' +import { FLOWISE_CREDENTIAL_ID } from '@/store/constant' + +export const ConfigInput = ({ data, inputParam, disabled = false, arrayIndex = null, parentParamForArray = null }) => { + const theme = useTheme() + const { reactFlowInstance } = useContext(flowContext) + + const [expanded, setExpanded] = useState(false) + const [selectedComponentNodeData, setSelectedComponentNodeData] = useState({}) + + const handleAccordionChange = (event, isExpanded) => { + setExpanded(isExpanded) + } + + const onCustomDataChange = ({ inputParam, newValue }) => { + let nodeData = cloneDeep(selectedComponentNodeData) + + const updatedInputs = { ...nodeData.inputs } + updatedInputs[inputParam.name] = newValue + + const updatedInputParams = showHideInputParams({ + ...nodeData, + inputs: updatedInputs + }) + + // Remove inputs with display set to false + Object.keys(updatedInputs).forEach((key) => { + const input = updatedInputParams.find((param) => param.name === key) + if (input && input.display === false) { + delete updatedInputs[key] + } + }) + + const credential = updatedInputs.credential || updatedInputs[FLOWISE_CREDENTIAL_ID] + + nodeData = { + ...nodeData, + inputParams: updatedInputParams, + inputs: updatedInputs, + credential: credential ? credential : undefined + } + + setSelectedComponentNodeData(nodeData) + } + + // Load initial component data when the component mounts + useEffect(() => { + const loadComponentData = async () => { + // Get the node name from inputs + const nodeName = data.inputs[inputParam.name] + const node = await nodesApi.getSpecificNode(nodeName) + + if (!node.data) return + + // Initialize component node with basic data + const componentNodeData = cloneDeep(initNode(node.data, `${node.data.nodeName}_0`)) + + // Helper function to check if array-based configuration exists + const isArray = () => { + return parentParamForArray && data.inputs[parentParamForArray.name] + } + + const hasArrayConfig = () => { + return ( + parentParamForArray && + data.inputs[parentParamForArray.name] && + Array.isArray(data.inputs[parentParamForArray.name]) && + data.inputs[parentParamForArray.name][arrayIndex] && + data.inputs[parentParamForArray.name][arrayIndex][`${inputParam.name}Config`] + ) + } + + // Helper function to get current input value + const getCurrentInputValue = () => { + return hasArrayConfig() ? data.inputs[parentParamForArray.name][arrayIndex][inputParam.name] : data.inputs[inputParam.name] + } + + // Helper function to get config data + const getConfigData = () => { + return hasArrayConfig() + ? data.inputs[parentParamForArray.name][arrayIndex][`${inputParam.name}Config`] + : data.inputs[`${inputParam.name}Config`] + } + + // Update component inputs based on configuration + if (hasArrayConfig() || data.inputs[`${inputParam.name}Config`]) { + const configData = getConfigData() + const currentValue = getCurrentInputValue() + + // If stored config value doesn't match current input, reset to defaults + if (configData[inputParam.name] !== currentValue) { + const defaultInput = initializeDefaultNodeData(componentNodeData.inputParams) + componentNodeData.inputs = { ...defaultInput, [inputParam.name]: currentValue } + } else { + // Use existing config with current input value + componentNodeData.inputs = { ...configData, [inputParam.name]: currentValue } + } + } else { + const currentValue = isArray() + ? data.inputs[parentParamForArray.name][arrayIndex][inputParam.name] + : data.inputs[inputParam.name] + componentNodeData.inputs = { + ...componentNodeData.inputs, + [inputParam.name]: currentValue + } + } + + // Update input parameters visibility based on current inputs + componentNodeData.inputParams = showHideInputParams({ + ...componentNodeData, + inputs: componentNodeData.inputs + }) + + const credential = componentNodeData.inputs.credential || componentNodeData.inputs[FLOWISE_CREDENTIAL_ID] + componentNodeData.credential = credential ? credential : undefined + + setSelectedComponentNodeData(componentNodeData) + } + + loadComponentData() + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, []) + + // Update node configuration when selected component data changes + useEffect(() => { + if (!selectedComponentNodeData.inputs) return + + reactFlowInstance.setNodes((nds) => + nds.map((node) => { + if (node.id !== data.id) return node + + // Handle array-based configuration + if (arrayIndex !== null && parentParamForArray) { + // Initialize array if it doesn't exist + if (!node.data.inputs[parentParamForArray.name]) { + node.data.inputs[parentParamForArray.name] = [] + } + // Initialize array element if it doesn't exist + if (!node.data.inputs[parentParamForArray.name][arrayIndex]) { + node.data.inputs[parentParamForArray.name][arrayIndex] = {} + } + // Store config in array + node.data.inputs[parentParamForArray.name][arrayIndex][`${inputParam.name}Config`] = selectedComponentNodeData.inputs + } else { + // Store config directly + node.data.inputs[`${inputParam.name}Config`] = selectedComponentNodeData.inputs + } + return node + }) + ) + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [data.inputs, arrayIndex, parentParamForArray, selectedComponentNodeData]) + + return ( + <> + + + } sx={{ background: 'transparent' }}> + + {selectedComponentNodeData?.label} Parameters + + + {(selectedComponentNodeData.inputParams ?? []) + .filter((inputParam) => !inputParam.hidden) + .filter((inputParam) => inputParam.display !== false) + .map((inputParam, index) => ( + + ))} + + + + + ) +} + +ConfigInput.propTypes = { + name: PropTypes.string, + inputParam: PropTypes.object, + data: PropTypes.object, + disabled: PropTypes.bool, + arrayIndex: PropTypes.number, + parentParamForArray: PropTypes.object +} diff --git a/packages/ui/src/views/agentflowsv2/ConnectionLine.jsx b/packages/ui/src/views/agentflowsv2/ConnectionLine.jsx new file mode 100644 index 000000000..b56d0ebb4 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/ConnectionLine.jsx @@ -0,0 +1,102 @@ +import { memo } from 'react' +import { EdgeLabelRenderer, useStore, getBezierPath } from 'reactflow' +import PropTypes from 'prop-types' +import { AGENTFLOW_ICONS } from '@/store/constant' +import { useTheme } from '@mui/material/styles' + +function EdgeLabel({ transform, isHumanInput, label, color }) { + return ( +
+ {label} +
+ ) +} + +EdgeLabel.propTypes = { + transform: PropTypes.string, + isHumanInput: PropTypes.bool, + label: PropTypes.string, + color: PropTypes.string +} + +const ConnectionLine = ({ fromX, fromY, toX, toY, fromPosition, toPosition }) => { + const [edgePath] = getBezierPath({ + // we need this little hack in order to display the gradient for a straight line + sourceX: fromX, + sourceY: fromY, + sourcePosition: fromPosition, + targetX: toX, + targetY: toY, + targetPosition: toPosition + }) + + const { connectionHandleId } = useStore() + const theme = useTheme() + const nodeName = (connectionHandleId || '').split('_')[0] || '' + + const isLabelVisible = nodeName === 'humanInputAgentflow' || nodeName === 'conditionAgentflow' || nodeName === 'conditionAgentAgentflow' + + const getEdgeLabel = () => { + let edgeLabel = undefined + if (nodeName === 'conditionAgentflow' || nodeName === 'conditionAgentAgentflow') { + const _edgeLabel = connectionHandleId.split('-').pop() + edgeLabel = (isNaN(_edgeLabel) ? 0 : _edgeLabel).toString() + } + if (nodeName === 'humanInputAgentflow') { + const _edgeLabel = connectionHandleId.split('-').pop() + edgeLabel = (isNaN(_edgeLabel) ? 0 : _edgeLabel).toString() + edgeLabel = edgeLabel === '0' ? 'proceed' : 'reject' + } + return edgeLabel + } + + const color = + AGENTFLOW_ICONS.find((icon) => icon.name === (connectionHandleId || '').split('_')[0] || '')?.color ?? theme.palette.primary.main + + return ( + + + + + + + {isLabelVisible && ( + + + + )} + + ) +} + +ConnectionLine.propTypes = { + fromX: PropTypes.number, + fromY: PropTypes.number, + toX: PropTypes.number, + toY: PropTypes.number, + fromPosition: PropTypes.any, + toPosition: PropTypes.any +} + +export default memo(ConnectionLine) diff --git a/packages/ui/src/views/agentflowsv2/EditNodeDialog.jsx b/packages/ui/src/views/agentflowsv2/EditNodeDialog.jsx new file mode 100644 index 000000000..ce8469d95 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/EditNodeDialog.jsx @@ -0,0 +1,279 @@ +import { createPortal } from 'react-dom' +import { useDispatch, useSelector } from 'react-redux' +import { useState, useEffect, useRef, useContext, memo } from 'react' +import { useUpdateNodeInternals } from 'reactflow' +import PropTypes from 'prop-types' +import { Stack, Box, Typography, TextField, Dialog, DialogContent, ButtonBase, Avatar } from '@mui/material' +import NodeInputHandler from '@/views/canvas/NodeInputHandler' +import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from '@/store/actions' +import { IconPencil, IconX, IconCheck, IconInfoCircle } from '@tabler/icons-react' +import { useTheme } from '@mui/material/styles' +import { flowContext } from '@/store/context/ReactFlowContext' +import { showHideInputParams } from '@/utils/genericHelper' + +const EditNodeDialog = ({ show, dialogProps, onCancel }) => { + const portalElement = document.getElementById('portal') + const dispatch = useDispatch() + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const nodeNameRef = useRef() + const { reactFlowInstance } = useContext(flowContext) + const updateNodeInternals = useUpdateNodeInternals() + + const [inputParams, setInputParams] = useState([]) + const [data, setData] = useState({}) + const [isEditingNodeName, setEditingNodeName] = useState(null) + const [nodeName, setNodeName] = useState('') + + const onNodeLabelChange = () => { + reactFlowInstance.setNodes((nds) => + nds.map((node) => { + if (node.id === data.id) { + node.data = { + ...node.data, + label: nodeNameRef.current.value + } + setData(node.data) + } + return node + }) + ) + updateNodeInternals(data.id) + } + + const onCustomDataChange = ({ nodeId, inputParam, newValue }) => { + reactFlowInstance.setNodes((nds) => + nds.map((node) => { + if (node.id === nodeId) { + const updatedInputs = { + ...node.data.inputs, + [inputParam.name]: newValue + } + + const updatedInputParams = showHideInputParams({ + ...node.data, + inputs: updatedInputs + }) + + // Remove inputs with display set to false + Object.keys(updatedInputs).forEach((key) => { + const input = updatedInputParams.find((param) => param.name === key) + if (input && input.display === false) { + delete updatedInputs[key] + } + }) + + node.data = { + ...node.data, + inputParams: updatedInputParams, + inputs: updatedInputs + } + + setInputParams(updatedInputParams) + setData(node.data) + } + return node + }) + ) + } + + useEffect(() => { + if (dialogProps.inputParams) { + setInputParams(dialogProps.inputParams) + } + if (dialogProps.data) { + setData(dialogProps.data) + if (dialogProps.data.label) setNodeName(dialogProps.data.label) + } + + return () => { + setInputParams([]) + setData({}) + } + }, [dialogProps]) + + useEffect(() => { + if (show) dispatch({ type: SHOW_CANVAS_DIALOG }) + else dispatch({ type: HIDE_CANVAS_DIALOG }) + return () => dispatch({ type: HIDE_CANVAS_DIALOG }) + }, [show, dispatch]) + + const component = show ? ( + + + {data && data.name && ( + + {!isEditingNodeName ? ( + + + {nodeName} + + + {data?.id && ( + + setEditingNodeName(true)} + > + + + + )} + + ) : ( + + { + if (e.key === 'Enter') { + data.label = nodeNameRef.current.value + setNodeName(nodeNameRef.current.value) + onNodeLabelChange() + setEditingNodeName(false) + } else if (e.key === 'Escape') { + setEditingNodeName(false) + } + }} + /> + + { + data.label = nodeNameRef.current.value + setNodeName(nodeNameRef.current.value) + onNodeLabelChange() + setEditingNodeName(false) + }} + > + + + + + setEditingNodeName(false)} + > + + + + + )} + + )} + {data?.hint && ( + + + + {data.hint} + + + )} + {inputParams + .filter((inputParam) => inputParam.display !== false) + .map((inputParam, index) => ( + + ))} + + + ) : null + + return createPortal(component, portalElement) +} + +EditNodeDialog.propTypes = { + show: PropTypes.bool, + dialogProps: PropTypes.object, + onCancel: PropTypes.func +} + +export default memo(EditNodeDialog) diff --git a/packages/ui/src/views/agentflowsv2/IterationNode.jsx b/packages/ui/src/views/agentflowsv2/IterationNode.jsx new file mode 100644 index 000000000..8455e4e13 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/IterationNode.jsx @@ -0,0 +1,425 @@ +import PropTypes from 'prop-types' +import { useContext, memo, useRef, useState, useEffect, useCallback } from 'react' +import { useSelector } from 'react-redux' +import { Background, Handle, Position, useUpdateNodeInternals, NodeToolbar, NodeResizer } from 'reactflow' + +// material-ui +import { styled, useTheme, alpha, darken, lighten } from '@mui/material/styles' +import { ButtonGroup, Avatar, Box, Typography, IconButton, Tooltip } from '@mui/material' + +// project imports +import MainCard from '@/ui-component/cards/MainCard' +import { flowContext } from '@/store/context/ReactFlowContext' +import NodeInfoDialog from '@/ui-component/dialog/NodeInfoDialog' + +// icons +import { + IconCheck, + IconExclamationMark, + IconCircleChevronRightFilled, + IconCopy, + IconTrash, + IconInfoCircle, + IconLoader +} from '@tabler/icons-react' +import StopCircleIcon from '@mui/icons-material/StopCircle' +import CancelIcon from '@mui/icons-material/Cancel' + +// const +import { baseURL, AGENTFLOW_ICONS } from '@/store/constant' + +const CardWrapper = styled(MainCard)(({ theme }) => ({ + background: theme.palette.card.main, + color: theme.darkTextPrimary, + border: 'solid 1px', + width: 'max-content', + height: 'auto', + padding: '10px', + boxShadow: 'none' +})) + +const StyledNodeToolbar = styled(NodeToolbar)(({ theme }) => ({ + backgroundColor: theme.palette.card.main, + color: theme.darkTextPrimary, + padding: '5px', + borderRadius: '10px', + boxShadow: '0 2px 14px 0 rgb(32 40 45 / 8%)' +})) + +// ===========================|| ITERATION NODE ||=========================== // + +const IterationNode = ({ data }) => { + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const ref = useRef(null) + const reactFlowWrapper = useRef(null) + + const updateNodeInternals = useUpdateNodeInternals() + // eslint-disable-next-line + const [position, setPosition] = useState(0) + const [isHovered, setIsHovered] = useState(false) + const { deleteNode, duplicateNode, reactFlowInstance } = useContext(flowContext) + const [showInfoDialog, setShowInfoDialog] = useState(false) + const [infoDialogProps, setInfoDialogProps] = useState({}) + + const [cardDimensions, setCardDimensions] = useState({ + width: '300px', + height: '250px' + }) + + // Add useEffect to update dimensions when reactFlowInstance becomes available + useEffect(() => { + if (reactFlowInstance) { + const node = reactFlowInstance.getNodes().find((node) => node.id === data.id) + if (node && node.width && node.height) { + setCardDimensions({ + width: `${node.width}px`, + height: `${node.height}px` + }) + } + } + }, [reactFlowInstance, data.id]) + + const defaultColor = '#666666' // fallback color if data.color is not present + const nodeColor = data.color || defaultColor + + // Get different shades of the color based on state + const getStateColor = () => { + if (data.selected) return nodeColor + if (isHovered) return alpha(nodeColor, 0.8) + return alpha(nodeColor, 0.5) + } + + const getOutputAnchors = () => { + return data.outputAnchors ?? [] + } + + const getAnchorPosition = (index) => { + const currentHeight = ref.current?.clientHeight || 0 + const spacing = currentHeight / (getOutputAnchors().length + 1) + const position = spacing * (index + 1) + + // Update node internals when we get a non-zero position + if (position > 0) { + updateNodeInternals(data.id) + } + + return position + } + + const getMinimumHeight = () => { + const outputCount = getOutputAnchors().length + // Use exactly 60px as minimum height + return Math.max(60, outputCount * 20 + 40) + } + + const getBackgroundColor = () => { + if (customization.isDarkMode) { + return isHovered ? darken(nodeColor, 0.7) : darken(nodeColor, 0.8) + } + return isHovered ? lighten(nodeColor, 0.8) : lighten(nodeColor, 0.9) + } + + const getStatusBackgroundColor = (status) => { + switch (status) { + case 'ERROR': + return theme.palette.error.dark + case 'INPROGRESS': + return theme.palette.warning.dark + case 'STOPPED': + case 'TERMINATED': + return theme.palette.error.main + case 'FINISHED': + return theme.palette.success.dark + default: + return theme.palette.primary.dark + } + } + + const renderIcon = (node) => { + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === node.name) + + if (!foundIcon) return null + return + } + + useEffect(() => { + if (ref.current) { + setTimeout(() => { + setPosition(ref.current?.offsetTop + ref.current?.clientHeight / 2) + updateNodeInternals(data.id) + }, 10) + } + }, [data, ref, updateNodeInternals]) + + const onResizeEnd = useCallback( + (e, params) => { + if (!ref.current) return + + // Set the card dimensions directly from resize params + setCardDimensions({ + width: `${params.width}px`, + height: `${params.height}px` + }) + }, + [ref, setCardDimensions] + ) + + return ( +
setIsHovered(true)} onMouseLeave={() => setIsHovered(false)}> + + + {data.color && !data.icon ? ( +
+ {renderIcon(data)} +
+ ) : ( +
+ {data.name} +
+ )} + + {data.label} + +
+
+ + + { + duplicateNode(data.id) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.primary.main + } + }} + > + + + { + deleteNode(data.id) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.error.main + } + }} + > + + + { + setInfoDialogProps({ data }) + setShowInfoDialog(true) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.info.main + } + }} + > + + + + + + + {data && data.status && ( + + + {data.status === 'INPROGRESS' ? ( + + ) : data.status === 'ERROR' ? ( + + ) : data.status === 'TERMINATED' ? ( + + ) : data.status === 'STOPPED' ? ( + + ) : ( + + )} + + + )} + + + {!data.hideInput && ( + +
+ + )} +
+ +
+ +
+
+
+ {getOutputAnchors().map((outputAnchor, index) => { + return ( + +
+ + + ) + })} + + + setShowInfoDialog(false)}> +
+ ) +} + +IterationNode.propTypes = { + data: PropTypes.object +} + +export default memo(IterationNode) diff --git a/packages/ui/src/views/agentflowsv2/MarketplaceCanvas.jsx b/packages/ui/src/views/agentflowsv2/MarketplaceCanvas.jsx new file mode 100644 index 000000000..3e4ef254e --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/MarketplaceCanvas.jsx @@ -0,0 +1,135 @@ +import { useEffect, useState, useCallback, useRef, useContext } from 'react' +import ReactFlow, { Controls, Background, useNodesState, useEdgesState } from 'reactflow' +import 'reactflow/dist/style.css' +import '@/views/canvas/index.css' + +import { useLocation, useNavigate } from 'react-router-dom' + +// material-ui +import { Toolbar, Box, AppBar } from '@mui/material' +import { useTheme } from '@mui/material/styles' + +// project imports +import AgentFlowNode from './AgentFlowNode' +import AgentFlowEdge from './AgentFlowEdge' +import IterationNode from './IterationNode' +import MarketplaceCanvasHeader from '@/views/marketplaces/MarketplaceCanvasHeader' +import StickyNote from './StickyNote' +import EditNodeDialog from '@/views/agentflowsv2/EditNodeDialog' +import { flowContext } from '@/store/context/ReactFlowContext' + +const nodeTypes = { agentFlow: AgentFlowNode, stickyNote: StickyNote, iteration: IterationNode } +const edgeTypes = { agentFlow: AgentFlowEdge } + +// ==============================|| CANVAS ||============================== // + +const MarketplaceCanvasV2 = () => { + const theme = useTheme() + const navigate = useNavigate() + + const { state } = useLocation() + const { flowData, name } = state + + // ==============================|| ReactFlow ||============================== // + + const [nodes, setNodes, onNodesChange] = useNodesState() + const [edges, setEdges, onEdgesChange] = useEdgesState() + const [editNodeDialogOpen, setEditNodeDialogOpen] = useState(false) + const [editNodeDialogProps, setEditNodeDialogProps] = useState({}) + + const reactFlowWrapper = useRef(null) + const { setReactFlowInstance } = useContext(flowContext) + + // ==============================|| useEffect ||============================== // + + useEffect(() => { + if (flowData) { + const initialFlow = JSON.parse(flowData) + setNodes(initialFlow.nodes || []) + setEdges(initialFlow.edges || []) + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [flowData]) + + const onChatflowCopy = (flowData) => { + const templateFlowData = JSON.stringify(flowData) + navigate('/v2/agentcanvas', { state: { templateFlowData } }) + } + + // eslint-disable-next-line + const onNodeDoubleClick = useCallback((event, node) => { + if (!node || !node.data) return + if (node.data.name === 'stickyNoteAgentflow') { + // dont show dialog + } else { + const dialogProps = { + data: node.data, + inputParams: node.data.inputParams.filter((inputParam) => !inputParam.hidden), + disabled: true + } + + setEditNodeDialogProps(dialogProps) + setEditNodeDialogOpen(true) + } + }) + + return ( + <> + + + + onChatflowCopy(flowData)} + /> + + + +
+
+ + + + setEditNodeDialogOpen(false)} + /> + +
+
+
+
+ + ) +} + +export default MarketplaceCanvasV2 diff --git a/packages/ui/src/views/agentflowsv2/StickyNote.jsx b/packages/ui/src/views/agentflowsv2/StickyNote.jsx new file mode 100644 index 000000000..c154adfa6 --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/StickyNote.jsx @@ -0,0 +1,136 @@ +import PropTypes from 'prop-types' +import { useRef, useContext, useState } from 'react' +import { useSelector } from 'react-redux' +import { NodeToolbar } from 'reactflow' + +// material-ui +import { styled, useTheme, alpha, darken, lighten } from '@mui/material/styles' + +// project imports +import { ButtonGroup, IconButton, Box } from '@mui/material' +import { IconCopy, IconTrash } from '@tabler/icons-react' +import { Input } from '@/ui-component/input/Input' +import MainCard from '@/ui-component/cards/MainCard' + +// const +import { flowContext } from '@/store/context/ReactFlowContext' + +const CardWrapper = styled(MainCard)(({ theme }) => ({ + background: theme.palette.card.main, + color: theme.darkTextPrimary, + border: 'solid 1px', + width: 'max-content', + height: 'auto', + padding: '10px', + boxShadow: 'none' +})) + +const StyledNodeToolbar = styled(NodeToolbar)(({ theme }) => ({ + backgroundColor: theme.palette.card.main, + color: theme.darkTextPrimary, + padding: '5px', + borderRadius: '10px', + boxShadow: '0 2px 14px 0 rgb(32 40 45 / 8%)' +})) + +const StickyNote = ({ data }) => { + const theme = useTheme() + const customization = useSelector((state) => state.customization) + const ref = useRef(null) + + const { reactFlowInstance, deleteNode, duplicateNode } = useContext(flowContext) + const [inputParam] = data.inputParams + const [isHovered, setIsHovered] = useState(false) + + const defaultColor = '#666666' // fallback color if data.color is not present + const nodeColor = data.color || defaultColor + + // Get different shades of the color based on state + const getStateColor = () => { + if (data.selected) return nodeColor + if (isHovered) return alpha(nodeColor, 0.8) + return alpha(nodeColor, 0.5) + } + + const getBackgroundColor = () => { + if (customization.isDarkMode) { + return isHovered ? darken(nodeColor, 0.7) : darken(nodeColor, 0.8) + } + return isHovered ? lighten(nodeColor, 0.8) : lighten(nodeColor, 0.9) + } + + return ( +
setIsHovered(true)} onMouseLeave={() => setIsHovered(false)}> + + + { + duplicateNode(data.id) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.primary.main + } + }} + > + + + { + deleteNode(data.id) + }} + sx={{ + color: customization.isDarkMode ? 'white' : 'inherit', + '&:hover': { + color: theme.palette.error.main + } + }} + > + + + + + + + (data.inputs[inputParam.name] = newValue)} + value={data.inputs[inputParam.name] ?? inputParam.default ?? ''} + nodes={reactFlowInstance ? reactFlowInstance.getNodes() : []} + edges={reactFlowInstance ? reactFlowInstance.getEdges() : []} + nodeId={data.id} + /> + + +
+ ) +} + +StickyNote.propTypes = { + data: PropTypes.object +} + +export default StickyNote diff --git a/packages/ui/src/views/agentflowsv2/index.css b/packages/ui/src/views/agentflowsv2/index.css new file mode 100644 index 000000000..4e790158c --- /dev/null +++ b/packages/ui/src/views/agentflowsv2/index.css @@ -0,0 +1,56 @@ +.edgebutton { + width: 20px; + height: 20px; + background: #eee; + border: 1px solid #fff; + cursor: pointer; + border-radius: 50%; + font-size: 12px; + line-height: 1; +} + +.edgebutton:hover { + background: #5e35b1; + color: #eee; + box-shadow: 0 0 6px 2px rgba(0, 0, 0, 0.08); +} + +.edgebutton-foreignobject div { + background: transparent; + width: 40px; + height: 40px; + display: flex; + justify-content: center; + align-items: center; + min-height: 40px; +} + +.reactflow-parent-wrapper { + display: flex; + flex-grow: 1; + height: 100%; +} + +.reactflow-parent-wrapper .reactflow-wrapper { + flex-grow: 1; + height: 100%; +} + +.chatflow-canvas .react-flow__handle-connecting { + cursor: not-allowed; + background: #db4e4e !important; +} + +.chatflow-canvas .react-flow__handle-valid { + cursor: crosshair; + background: #5dba62 !important; +} + +.agent-flow-edge-selector:hover { + cursor: pointer; +} + +.agent-flow-edge-selector:hover + .agent-flow-edge { + stroke-width: 3 !important; + opacity: 1; +} diff --git a/packages/ui/src/views/apikey/index.jsx b/packages/ui/src/views/apikey/index.jsx index 56bc3ecfe..afa04133d 100644 --- a/packages/ui/src/views/apikey/index.jsx +++ b/packages/ui/src/views/apikey/index.jsx @@ -367,7 +367,13 @@ const APIKey = () => { ) : ( - + + ) + } + }) + } + + const generateDocStoreToolDesc = async (storeId) => { + if (!storeId) { + enqueueSnackbar({ + message: 'Please select a knowledge base', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + action: (key) => ( + + ) + } + }) + return + } + storeId = storeId.split(':')[0] + const isValid = checkInputParamsMandatory() + if (!isValid) { + displayWarning() + return + } + + // Check if model is already selected in the node + const currentNode = reactFlowInstance?.getNodes().find((node) => node.id === data.id) + const currentNodeInputs = currentNode?.data?.inputs + + const existingModel = currentNodeInputs?.llmModel || currentNodeInputs?.agentModel || currentNodeInputs?.humanInputModel + if (existingModel) { + try { + setLoading(true) + const selectedChatModelObj = { + name: existingModel, + inputs: + currentNodeInputs?.llmModelConfig || currentNodeInputs?.agentModelConfig || currentNodeInputs?.humanInputModelConfig + } + const resp = await documentstoreApi.generateDocStoreToolDesc(storeId, { selectedChatModel: selectedChatModelObj }) + if (resp.data) { + setLoading(false) + const content = resp.data?.content || resp.data.kwargs?.content + // Update the input value directly + data.inputs[inputParam.name] = content + enqueueSnackbar({ + message: 'Document Store Tool Description generated successfully', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + } + } catch (error) { + console.error('Error generating doc store tool desc', error) + setLoading(false) + enqueueSnackbar({ + message: typeof error.response.data === 'object' ? error.response.data.message : error.response.data, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + return + } + + // If no model selected, load chat models and open model selection dialog + await loadChatModels() + setModelSelectionCallback(() => async (selectedModel) => { + try { + setLoading(true) + const selectedChatModelObj = { + name: selectedModel.name, + inputs: selectedModel.inputs + } + const resp = await documentstoreApi.generateDocStoreToolDesc(storeId, { selectedChatModel: selectedChatModelObj }) + if (resp.data) { + setLoading(false) + const content = resp.data?.content || resp.data.kwargs?.content + // Update the input value directly + data.inputs[inputParam.name] = content + enqueueSnackbar({ + message: 'Document Store Tool Description generated successfully', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + action: (key) => ( + + ) + } + }) + } + } catch (error) { + console.error('Error generating doc store tool desc', error) + setLoading(false) + enqueueSnackbar({ + message: typeof error.response.data === 'object' ? error.response.data.message : error.response.data, + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + }) + setModelSelectionDialogOpen(true) + } + + const generateInstruction = async () => { + const isValid = checkInputParamsMandatory() + if (!isValid) { + displayWarning() + return + } + + const currentNode = reactFlowInstance?.getNodes().find((node) => node.id === data.id) + const currentNodeInputs = currentNode?.data?.inputs + + // Check if model is already selected in the node + const existingModel = currentNodeInputs?.llmModel || currentNodeInputs?.agentModel || currentNodeInputs?.humanInputModel + if (existingModel) { + // Open prompt generator dialog directly with existing model + setPromptGeneratorDialogProps({ + title: 'Generate Instructions', + description: 'You can generate a prompt template by sharing basic details about your task.', + data: { + selectedChatModel: { + name: existingModel, + inputs: + currentNodeInputs?.llmModelConfig || + currentNodeInputs?.agentModelConfig || + currentNodeInputs?.humanInputModelConfig + } + } + }) + setPromptGeneratorDialogOpen(true) + return + } + + // If no model selected, load chat models and open model selection dialog + await loadChatModels() + setModelSelectionCallback(() => async (selectedModel) => { + // After model selection, open prompt generator dialog + setPromptGeneratorDialogProps({ + title: 'Generate Instructions', + description: 'You can generate a prompt template by sharing basic details about your task.', + data: { selectedChatModel: selectedModel } + }) + setPromptGeneratorDialogOpen(true) + }) + setModelSelectionDialogOpen(true) + } + useEffect(() => { if (ref.current && ref.current.offsetTop && ref.current.clientHeight) { setPosition(ref.current.offsetTop + ref.current.clientHeight / 2) @@ -528,7 +842,23 @@ const NodeInputHandler = ({ > )} -
+ {data.name === 'chatNvidiaNIM' && inputParam.name === 'modelName' && ( + <> + + + )} +
{inputParam.label} {!inputParam.optional &&  *} @@ -562,12 +892,47 @@ const NodeInputHandler = ({ {inputParam.hint.label} )} + {inputParam.acceptVariable && inputParam.type === 'string' && ( + + + + )} + {inputParam.generateDocStoreDescription && ( + generateDocStoreToolDesc(data.inputs['documentStore'])} + > + + + )} + {inputParam.generateInstruction && ( + generateInstruction()} + > + + + )} {((inputParam.type === 'string' && inputParam.rows) || inputParam.type === 'code') && ( - {inputParam.tabs.map((inputChildParam, index) => ( - - - - ))} + {inputParam.tabs + .filter((inputParam) => inputParam.display !== false) + .map((inputChildParam, index) => ( + + + + ))} )} {inputParam.type === 'file' && ( @@ -649,7 +1016,7 @@ const NodeInputHandler = ({ {inputParam.type === 'boolean' && ( (data.inputs[inputParam.name] = newValue)} + onChange={(newValue) => handleDataChange({ inputParam, newValue })} value={data.inputs[inputParam.name] ?? inputParam.default ?? false} /> )} @@ -698,18 +1065,33 @@ const NodeInputHandler = ({
)} - {(inputParam.type === 'string' || inputParam.type === 'password' || inputParam.type === 'number') && ( - (data.inputs[inputParam.name] = newValue)} - value={data.inputs[inputParam.name] ?? inputParam.default ?? ''} - nodes={inputParam?.acceptVariable && reactFlowInstance ? reactFlowInstance.getNodes() : []} - edges={inputParam?.acceptVariable && reactFlowInstance ? reactFlowInstance.getEdges() : []} - nodeId={data.id} - /> - )} + + {(inputParam.type === 'string' || inputParam.type === 'password' || inputParam.type === 'number') && + (inputParam?.acceptVariable ? ( + (data.inputs[inputParam.name] = newValue)} + value={data.inputs[inputParam.name] ?? inputParam.default ?? ''} + nodes={reactFlowInstance ? reactFlowInstance.getNodes() : []} + edges={reactFlowInstance ? reactFlowInstance.getEdges() : []} + nodeId={data.id} + /> + ) : ( + (data.inputs[inputParam.name] = newValue)} + value={data.inputs[inputParam.name] ?? inputParam.default ?? ''} + nodes={[]} + edges={[]} + nodeId={data.id} + /> + ))} {inputParam.type === 'json' && ( <> {!inputParam?.acceptVariable && ( @@ -752,36 +1134,47 @@ const NodeInputHandler = ({ )} {inputParam.type === 'options' && ( - (data.inputs[inputParam.name] = newValue)} - value={data.inputs[inputParam.name] ?? inputParam.default ?? 'choose an option'} - /> +
+ handleDataChange({ inputParam, newValue })} + value={data.inputs[inputParam.name] ?? inputParam.default ?? 'choose an option'} + /> +
)} {inputParam.type === 'multiOptions' && ( - (data.inputs[inputParam.name] = newValue)} - value={data.inputs[inputParam.name] ?? inputParam.default ?? 'choose an option'} - /> +
+ handleDataChange({ inputParam, newValue })} + value={data.inputs[inputParam.name] ?? inputParam.default ?? 'choose an option'} + /> +
)} - {inputParam.type === 'asyncOptions' && ( + {(inputParam.type === 'asyncOptions' || inputParam.type === 'asyncMultiOptions') && ( <> {data.inputParams.length === 1 &&
} -
+
(data.inputs[inputParam.name] = newValue)} + onSelect={(newValue) => { + if (inputParam.loadConfig) setReloadTimestamp(Date.now().toString()) + handleDataChange({ inputParam, newValue }) + }} onCreateNew={() => addAsyncOption(inputParam.name)} /> {EDITABLE_OPTIONS.includes(inputParam.name) && data.inputs[inputParam.name] && ( @@ -807,6 +1200,7 @@ const NodeInputHandler = ({
)} + {inputParam.type === 'array' && } {/* CUSTOM INPUT LOGIC */} {inputParam.type.includes('conditionFunction') && ( <> @@ -857,6 +1251,20 @@ const NodeInputHandler = ({ /> )} + {inputParam.loadConfig && data && data.inputs && data.inputs[inputParam.name] && ( + <> + + + )} )} @@ -879,6 +1287,13 @@ const NodeInputHandler = ({ onConfirm={(newValue, inputParamName) => onExpandDialogSave(newValue, inputParamName)} onInputHintDialogClicked={onInputHintDialogClicked} > + setShowExpandRichDialog(false)} + onConfirm={(newValue, inputParamName) => onExpandRichDialogSave(newValue, inputParamName)} + onInputHintDialogClicked={onInputHintDialogClicked} + > setShowInputHintDialog(false)} > + setIsNvidiaNIMDialogOpen(false)} + onComplete={handleNvidiaNIMDialogComplete} + > + { + setModelSelectionDialogOpen(false) + setSelectedTempChatModel({}) + }} + aria-labelledby='model-selection-dialog-title' + maxWidth='sm' + fullWidth + > + Select Model + + + + { + if (!newValue) { + setSelectedTempChatModel({}) + } else { + const foundChatComponent = availableChatModels.find((chatModel) => chatModel.name === newValue) + if (foundChatComponent) { + const chatModelId = `${foundChatComponent.name}_0` + const clonedComponent = cloneDeep(foundChatComponent) + const initChatModelData = initNode(clonedComponent, chatModelId) + setSelectedTempChatModel(initChatModelData) + } + } + }} + value={selectedTempChatModel?.name ?? 'choose an option'} + /> + + {selectedTempChatModel && Object.keys(selectedTempChatModel).length > 0 && ( + + {(selectedTempChatModel.inputParams ?? []) + .filter((inputParam) => !inputParam.hidden) + .map((inputParam, index) => ( + + ))} + + )} + + + + + + + + setPromptGeneratorDialogOpen(false)} + onConfirm={(generatedInstruction) => { + try { + data.inputs[inputParam.name] = generatedInstruction + setPromptGeneratorDialogOpen(false) + } catch (error) { + enqueueSnackbar({ + message: 'Error setting generated instruction', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + persist: true, + action: (key) => ( + + ) + } + }) + } + }} + /> + {loading && }
) } @@ -904,6 +1418,9 @@ NodeInputHandler.propTypes = { disabled: PropTypes.bool, isAdditionalParams: PropTypes.bool, disablePadding: PropTypes.bool, + parentParamForArray: PropTypes.object, + arrayIndex: PropTypes.number, + onCustomDataChange: PropTypes.func, onHideNodeInfoDialog: PropTypes.func } diff --git a/packages/ui/src/views/canvas/StickyNote.jsx b/packages/ui/src/views/canvas/StickyNote.jsx index 7f5378a52..accf1d487 100644 --- a/packages/ui/src/views/canvas/StickyNote.jsx +++ b/packages/ui/src/views/canvas/StickyNote.jsx @@ -1,5 +1,5 @@ import PropTypes from 'prop-types' -import { useContext, useState } from 'react' +import { useContext, useState, memo } from 'react' import { useSelector } from 'react-redux' // material-ui @@ -31,13 +31,19 @@ const StickyNote = ({ data }) => { setOpen(true) } + const getBorderColor = () => { + if (data.selected) return theme.palette.primary.main + else if (theme?.customization?.isDarkMode) return theme.palette.grey[900] + 25 + else return theme.palette.grey[900] + 50 + } + return ( <> { const URLpath = document.location.pathname.toString().split('/') const chatflowId = URLpath[URLpath.length - 1] === 'chatbot' ? '' : URLpath[URLpath.length - 1] const navigate = useNavigate() + const theme = useTheme() const [chatflow, setChatflow] = useState(null) const [chatbotTheme, setChatbotTheme] = useState({}) @@ -80,7 +86,7 @@ const ChatbotFull = () => { const chatflowType = chatflowData.type if (chatflowData.chatbotConfig) { let parsedConfig = {} - if (chatflowType === 'MULTIAGENT') { + if (chatflowType === 'MULTIAGENT' || chatflowType === 'AGENTFLOW') { parsedConfig.showAgentMessages = true } @@ -99,7 +105,7 @@ const ChatbotFull = () => { setChatbotTheme(parsedConfig) setChatbotOverrideConfig({}) } - } else if (chatflowType === 'MULTIAGENT') { + } else if (chatflowType === 'MULTIAGENT' || chatflowType === 'AGENTFLOW') { setChatbotTheme({ showAgentMessages: true }) } } @@ -114,7 +120,29 @@ const ChatbotFull = () => { {!isLoading ? ( <> {!chatflow || chatflow.apikeyid ? ( -

Invalid Chatbot

+ + + + + + + Invalid Chatbot + + + {`The chatbot you're looking for doesn't exist or requires API key authentication.`} + + + + + ) : ( { ) : ( - + { + switch (status) { + case 'FINISHED': + return 'success.dark' + case 'ERROR': + case 'TIMEOUT': + return 'error.main' + case 'TERMINATED': + case 'STOPPED': + return 'error.main' + case 'INPROGRESS': + return 'warning.dark' + } +} + +const StyledTreeItemRoot = styled(TreeItem2Root)(({ theme }) => ({ + color: theme.palette.grey[400] +})) + +const CustomTreeItemContent = styled(TreeItem2Content)(({ theme }) => ({ + flexDirection: 'row-reverse', + borderRadius: theme.spacing(0.7), + marginBottom: theme.spacing(0.5), + marginTop: theme.spacing(0.5), + padding: theme.spacing(0.5), + paddingRight: theme.spacing(1), + fontWeight: 500, + [`&.Mui-expanded `]: { + '&:not(.Mui-focused, .Mui-selected, .Mui-selected.Mui-focused) .labelIcon': { + color: theme.palette.primary.dark, + ...theme.applyStyles('light', { + color: theme.palette.primary.main + }) + }, + '&::before': { + content: '""', + display: 'block', + position: 'absolute', + left: '16px', + top: '44px', + height: 'calc(100% - 48px)', + width: '1.5px', + backgroundColor: theme.palette.grey[700], + ...theme.applyStyles('light', { + backgroundColor: theme.palette.grey[300] + }) + } + }, + '&:hover': { + backgroundColor: alpha(theme.palette.primary.main, 0.1), + color: 'white', + ...theme.applyStyles('light', { + color: theme.palette.primary.main + }) + }, + [`&.Mui-focused, &.Mui-selected, &.Mui-selected.Mui-focused`]: { + backgroundColor: theme.palette.primary.dark, + color: theme.palette.primary.contrastText, + ...theme.applyStyles('light', { + backgroundColor: theme.palette.primary.main + }) + } +})) + +const StyledTreeItemLabelText = styled(Typography)(({ theme }) => ({ + color: theme.palette.text.primary +})) + +function CustomLabel({ icon: Icon, itemStatus, children, name, label, data, metadata, ...other }) { + const [openDialog, setOpenDialog] = useState(false) + + const handleOpenDialog = (event) => { + // Stop propagation to prevent parent elements from capturing the click + event.stopPropagation() + setOpenDialog(true) + } + + const handleCloseDialog = () => setOpenDialog(false) + + // Check if this is an iteration node + const isIterationNode = name === 'iterationAgentflow' + + return ( + + + {(() => { + // Display iteration icon for iteration nodes + if (isIterationNode) { + return ( + + + + ) + } + + // Otherwise display the node icon + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === name) + if (foundIcon) { + return ( + + + + ) + } + return null + })()} + + {children} + + + + {Icon && } + + + e.stopPropagation()}> + {data ? ( + + ) : ( + No data available for this item + )} + + + + + + + ) +} + +CustomLabel.propTypes = { + icon: PropTypes.elementType, + itemStatus: PropTypes.string, + expandable: PropTypes.bool, + children: PropTypes.node, + name: PropTypes.string, + label: PropTypes.string, + status: PropTypes.object, + data: PropTypes.object, + metadata: PropTypes.object +} + +CustomLabel.displayName = 'CustomLabel' + +const isExpandable = (reactChildren) => { + if (Array.isArray(reactChildren)) { + return reactChildren.length > 0 && reactChildren.some(isExpandable) + } + return Boolean(reactChildren) +} + +const getIconFromStatus = (status, theme) => { + switch (status) { + case 'FINISHED': + return CheckCircleIcon + case 'ERROR': + case 'TIMEOUT': + return ErrorIcon + case 'TERMINATED': + // eslint-disable-next-line react/display-name + return (props) => + case 'STOPPED': + return StopCircleIcon + case 'INPROGRESS': + // eslint-disable-next-line react/display-name + return (props) => ( + // eslint-disable-next-line + + ) + } +} + +const CustomTreeItem = forwardRef(function CustomTreeItem(props, ref) { + const { id, itemId, label, disabled, children, agentflowId, sessionId, ...other } = props + const theme = useTheme() + + const { + getRootProps, + getContentProps, + getIconContainerProps, + getCheckboxProps, + getLabelProps, + getGroupTransitionProps, + getDragAndDropOverlayProps, + status, + publicAPI + } = useTreeItem2({ id, itemId, children, label, disabled, rootRef: ref }) + + const item = publicAPI.getItem(itemId) + const expandable = isExpandable(children) + let icon + if (item.status) { + icon = getIconFromStatus(item.status, theme) + } + + return ( + + + + + + + + + + + {children && ( + { + const nodeName = item.name || item.id?.split('_')[0] + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodeName) + return foundIcon ? foundIcon.color : theme.palette.primary.main + })()}`, + marginLeft: '13px', + paddingLeft: '8px' + }} + /> + )} + + + ) +}) + +CustomTreeItem.propTypes = { + id: PropTypes.string, + itemId: PropTypes.string, + label: PropTypes.string, + disabled: PropTypes.bool, + children: PropTypes.node, + agentflowId: PropTypes.string, + sessionId: PropTypes.string, + className: PropTypes.string +} + +CustomTreeItem.displayName = 'CustomTreeItem' + +const AgentExecutedDataCard = ({ status, execution, agentflowId, sessionId }) => { + const [executionTree, setExecution] = useState([]) + const [expandedItems, setExpandedItems] = useState([]) + const [selectedItem, setSelectedItem] = useState(null) + const theme = useTheme() + + const getAllNodeIds = (nodes) => { + let ids = [] + nodes.forEach((node) => { + ids.push(node.id) + if (node.children && node.children.length > 0) { + ids = [...ids, ...getAllNodeIds(node.children)] + } + }) + return ids + } + + // Transform the execution data into a tree structure + const buildTreeData = (nodes) => { + // for each node, loop through each and every nested key of node.data, and remove the key if it is equal to FLOWISE_CREDENTIAL_ID + nodes.forEach((node) => { + const removeFlowiseCredentialId = (data) => { + for (const key in data) { + if (key === FLOWISE_CREDENTIAL_ID) { + delete data[key] + } + if (typeof data[key] === 'object') { + removeFlowiseCredentialId(data[key]) + } + } + } + removeFlowiseCredentialId(node.data) + }) + + // Create a map for quick node lookup + // Use execution index to make each node instance unique + const nodeMap = new Map() + nodes.forEach((node, index) => { + const uniqueNodeId = `${node.nodeId}_${index}` + nodeMap.set(uniqueNodeId, { ...node, uniqueNodeId, children: [], executionIndex: index }) + }) + + // Identify iteration nodes and their children + const iterationGroups = new Map() // parentId -> Map of iterationIndex -> nodes + + // Group iteration child nodes by their parent and iteration index + nodes.forEach((node, index) => { + if (node.data?.parentNodeId && node.data?.iterationIndex !== undefined) { + const parentId = node.data.parentNodeId + const iterationIndex = node.data.iterationIndex + + if (!iterationGroups.has(parentId)) { + iterationGroups.set(parentId, new Map()) + } + + const iterationMap = iterationGroups.get(parentId) + if (!iterationMap.has(iterationIndex)) { + iterationMap.set(iterationIndex, []) + } + + iterationMap.get(iterationIndex).push(`${node.nodeId}_${index}`) + } + }) + + // Create virtual iteration container nodes + iterationGroups.forEach((iterationMap, parentId) => { + iterationMap.forEach((nodeIds, iterationIndex) => { + // Find the parent iteration node + let parentNode = null + for (let i = 0; i < nodes.length; i++) { + if (nodes[i].nodeId === parentId) { + parentNode = nodes[i] + break + } + } + + if (!parentNode) return + + // Get iteration context from first child node + const firstChildId = nodeIds[0] + const firstChild = nodeMap.get(firstChildId) + const iterationContext = firstChild?.data?.iterationContext || { index: iterationIndex } + + // Create a virtual node for this iteration + const iterationNodeId = `${parentId}_${iterationIndex}` + const iterationLabel = `Iteration #${iterationIndex}` + + // Determine status based on child nodes + const childNodes = nodeIds.map((id) => nodeMap.get(id)) + const iterationStatus = childNodes.some((n) => n.status === 'ERROR') + ? 'ERROR' + : childNodes.some((n) => n.status === 'INPROGRESS') + ? 'INPROGRESS' + : childNodes.every((n) => n.status === 'FINISHED') + ? 'FINISHED' + : 'UNKNOWN' + + // Create the virtual node and add to nodeMap + const virtualNode = { + nodeId: iterationNodeId, + nodeLabel: iterationLabel, + data: { + name: 'iterationAgentflow', + iterationIndex, + iterationContext, + isVirtualNode: true, + parentIterationId: parentId + }, + previousNodeIds: [], // Will be handled in the main tree building + status: iterationStatus, + uniqueNodeId: iterationNodeId, + children: [], + executionIndex: -1 // Flag as a virtual node + } + + nodeMap.set(iterationNodeId, virtualNode) + + // Set this virtual node as the parent for all nodes in this iteration + nodeIds.forEach((childId) => { + const childNode = nodeMap.get(childId) + if (childNode) { + childNode.virtualParentId = iterationNodeId + } + }) + }) + }) + + // Root nodes have no previous nodes + const rootNodes = [] + const processedNodes = new Set() + + // First pass: Build the main tree structure (excluding iteration children) + nodes.forEach((node, index) => { + const uniqueNodeId = `${node.nodeId}_${index}` + const treeNode = nodeMap.get(uniqueNodeId) + + // Skip nodes that belong to an iteration (they'll be added to their virtual parent) + if (node.data?.parentNodeId && node.data?.iterationIndex !== undefined) { + return + } + + if (node.previousNodeIds.length === 0) { + rootNodes.push(treeNode) + } else { + // Find the most recent (latest) parent node among all previous nodes + let mostRecentParentIndex = -1 + let mostRecentParentId = null + + node.previousNodeIds.forEach((parentId) => { + // Find the most recent instance of this parent node + for (let i = 0; i < index; i++) { + if (nodes[i].nodeId === parentId && i > mostRecentParentIndex) { + mostRecentParentIndex = i + mostRecentParentId = parentId + } + } + }) + + // Only add to the most recent parent + if (mostRecentParentIndex !== -1) { + const parentUniqueId = `${mostRecentParentId}_${mostRecentParentIndex}` + const parentNode = nodeMap.get(parentUniqueId) + if (parentNode) { + parentNode.children.push(treeNode) + processedNodes.add(uniqueNodeId) + } + } + } + }) + + // Second pass: Build the iteration sub-trees + iterationGroups.forEach((iterationMap, parentId) => { + // Find all instances of the parent node + const parentInstances = [] + nodes.forEach((node, index) => { + if (node.nodeId === parentId) { + parentInstances.push(`${node.nodeId}_${index}`) + } + }) + + // Find the latest instance of the parent node that exists in the tree + let latestParent = null + for (let i = parentInstances.length - 1; i >= 0; i--) { + const parentId = parentInstances[i] + const parent = nodeMap.get(parentId) + if (parent) { + latestParent = parent + break + } + } + + if (!latestParent) return + + // Add all virtual iteration nodes to the parent + iterationMap.forEach((nodeIds, iterationIndex) => { + const iterationNodeId = `${parentId}_${iterationIndex}` + const virtualNode = nodeMap.get(iterationNodeId) + if (virtualNode) { + latestParent.children.push(virtualNode) + } + }) + }) + + // Third pass: Build the structure inside each virtual iteration node + nodeMap.forEach((node) => { + if (node.virtualParentId) { + const virtualParent = nodeMap.get(node.virtualParentId) + if (virtualParent) { + if (node.previousNodeIds.length === 0) { + // This is a root node within the iteration + virtualParent.children.push(node) + } else { + // Find its parent within the same iteration + let parentFound = false + for (const prevNodeId of node.previousNodeIds) { + // Look for nodes with the same previous node ID in the same iteration + nodeMap.forEach((potentialParent) => { + if ( + potentialParent.nodeId === prevNodeId && + potentialParent.data?.iterationIndex === node.data?.iterationIndex && + potentialParent.data?.parentNodeId === node.data?.parentNodeId && + !parentFound + ) { + potentialParent.children.push(node) + parentFound = true + } + }) + } + + // If no parent was found within the iteration, add directly to virtual parent + if (!parentFound) { + virtualParent.children.push(node) + } + } + } + } + }) + + // Final pass: Sort all children arrays to ensure iteration nodes appear first + const sortChildrenNodes = (node) => { + if (node.children && node.children.length > 0) { + // Sort children: iteration nodes first, then others by their original execution order + node.children.sort((a, b) => { + // Check if a is an iteration node + const aIsIteration = a.data?.name === 'iterationAgentflow' || a.data?.isVirtualNode + // Check if b is an iteration node + const bIsIteration = b.data?.name === 'iterationAgentflow' || b.data?.isVirtualNode + + // If both are iterations or both are not iterations, preserve original order + if (aIsIteration === bIsIteration) { + return a.executionIndex - b.executionIndex + } + + // Otherwise, put iterations first + return aIsIteration ? -1 : 1 + }) + + // Recursively sort children's children + node.children.forEach(sortChildrenNodes) + } + } + + // Apply sorting to all root nodes and their children + rootNodes.forEach(sortChildrenNodes) + + // Transform to the required format + const transformNode = (node) => ({ + id: node.uniqueNodeId, + label: node.nodeLabel, + name: node.data?.name, + status: node.status, + data: node.data, + children: node.children.map(transformNode) + }) + + return rootNodes.map(transformNode) + } + + const handleExpandedItemsChange = (event, itemIds) => { + setExpandedItems(itemIds) + } + + useEffect(() => { + if (execution) { + const newTree = buildTreeData(execution) + + setExecution(newTree) + setExpandedItems(getAllNodeIds(newTree)) + // Set the first item as default selected item + if (newTree.length > 0) { + setSelectedItem(newTree[0]) + } + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [execution]) + + const handleNodeSelect = (event, itemId) => { + const findNode = (nodes, id) => { + for (const node of nodes) { + if (node.id === id) return node + if (node.children) { + const found = findNode(node.children, id) + if (found) return found + } + } + return null + } + + const selectedNode = findNode(executionTree, itemId) + setSelectedItem(selectedNode) + } + + const getExecutionStatus = useCallback((executionTree) => { + const getAllStatuses = (nodes) => { + let statuses = [] + nodes.forEach((node) => { + if (node.status) statuses.push(node.status) + if (node.children && node.children.length > 0) { + statuses = [...statuses, ...getAllStatuses(node.children)] + } + }) + return statuses + } + + const statuses = getAllStatuses(executionTree) + if (statuses.includes('ERROR')) return 'ERROR' + if (statuses.includes('INPROGRESS')) return 'INPROGRESS' + if (statuses.includes('STOPPED')) return 'STOPPED' + if (statuses.every((status) => status === 'FINISHED')) return 'FINISHED' + return null + }, []) + + return ( + + + } + sx={{ + '& .MuiAccordionSummary-content': { + alignItems: 'center' + } + }} + > + {executionTree.length > 0 && + (() => { + const execStatus = status ?? getExecutionStatus(executionTree) + return ( + + + + ) + })()} + Process Flow + + + + + }} + sx={{ width: '100%' }} + /> + + + + ) +} + +AgentExecutedDataCard.propTypes = { + status: PropTypes.string, + execution: PropTypes.array, + agentflowId: PropTypes.string, + sessionId: PropTypes.string +} + +export default memo(AgentExecutedDataCard) diff --git a/packages/ui/src/views/chatmessage/AgentReasoningCard.jsx b/packages/ui/src/views/chatmessage/AgentReasoningCard.jsx new file mode 100644 index 000000000..ef69b49b5 --- /dev/null +++ b/packages/ui/src/views/chatmessage/AgentReasoningCard.jsx @@ -0,0 +1,183 @@ +import { Box, Card, CardContent, Chip, Stack } from '@mui/material' +import { IconTool, IconDeviceSdCard } from '@tabler/icons-react' +import { MemoizedReactMarkdown } from '@/ui-component/markdown/MemoizedReactMarkdown' +import nextAgentGIF from '@/assets/images/next-agent.gif' +import PropTypes from 'prop-types' + +const AgentReasoningCard = ({ + agent, + index, + customization, + chatflowid, + isDialog, + onSourceDialogClick, + renderArtifacts, + agentReasoningArtifacts, + getAgentIcon, + removeDuplicateURL, + isValidURL, + onURLClick, + getLabel +}) => { + if (agent.nextAgent) { + return ( + + + + + agentPNG + +
{agent.nextAgent}
+
+
+
+ ) + } + + return ( + + + + + agentPNG + +
{agent.agentName}
+
+ {agent.usedTools && agent.usedTools.length > 0 && ( +
+ {agent.usedTools.map((tool, index) => { + return tool !== null ? ( + } + onClick={() => onSourceDialogClick(tool, 'Used Tools')} + /> + ) : null + })} +
+ )} + {agent.state && Object.keys(agent.state).length > 0 && ( +
+ } + onClick={() => onSourceDialogClick(agent.state, 'State')} + /> +
+ )} + {agent.artifacts && ( +
+ {agentReasoningArtifacts(agent.artifacts).map((item, index) => { + return item !== null ? <>{renderArtifacts(item, index, true)} : null + })} +
+ )} + {agent.messages.length > 0 && ( + + {agent.messages.length > 1 ? agent.messages.join('\\n') : agent.messages[0]} + + )} + {agent.instructions &&

{agent.instructions}

} + {agent.messages.length === 0 && !agent.instructions &&

Finished

} + {agent.sourceDocuments && agent.sourceDocuments.length > 0 && ( +
+ {removeDuplicateURL(agent).map((source, index) => { + const URL = source && source.metadata && source.metadata.source ? isValidURL(source.metadata.source) : undefined + return ( + (URL ? onURLClick(source.metadata.source) : onSourceDialogClick(source))} + /> + ) + })} +
+ )} +
+
+ ) +} + +AgentReasoningCard.propTypes = { + agent: PropTypes.object.isRequired, + index: PropTypes.number.isRequired, + customization: PropTypes.object.isRequired, + chatflowid: PropTypes.string, + isDialog: PropTypes.bool, + onSourceDialogClick: PropTypes.func.isRequired, + renderArtifacts: PropTypes.func.isRequired, + agentReasoningArtifacts: PropTypes.func.isRequired, + getAgentIcon: PropTypes.func.isRequired, + removeDuplicateURL: PropTypes.func.isRequired, + isValidURL: PropTypes.func.isRequired, + onURLClick: PropTypes.func.isRequired, + getLabel: PropTypes.func.isRequired +} + +AgentReasoningCard.displayName = 'AgentReasoningCard' + +export default AgentReasoningCard diff --git a/packages/ui/src/views/chatmessage/ChatExpandDialog.jsx b/packages/ui/src/views/chatmessage/ChatExpandDialog.jsx index 88b8e90c3..702d778de 100644 --- a/packages/ui/src/views/chatmessage/ChatExpandDialog.jsx +++ b/packages/ui/src/views/chatmessage/ChatExpandDialog.jsx @@ -3,7 +3,7 @@ import PropTypes from 'prop-types' import { useSelector } from 'react-redux' import { Dialog, DialogContent, DialogTitle, Button } from '@mui/material' -import { ChatMessage } from './ChatMessage' +import ChatMessage from './ChatMessage' import { StyledButton } from '@/ui-component/button/StyledButton' import { IconEraser } from '@tabler/icons-react' diff --git a/packages/ui/src/views/chatmessage/ChatMessage.jsx b/packages/ui/src/views/chatmessage/ChatMessage.jsx index d5a67de7a..d8c4a66ea 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.jsx +++ b/packages/ui/src/views/chatmessage/ChatMessage.jsx @@ -1,11 +1,7 @@ -import { useState, useRef, useEffect, useCallback, Fragment } from 'react' +import { useState, useRef, useEffect, useCallback, Fragment, useContext, memo } from 'react' import { useSelector, useDispatch } from 'react-redux' import PropTypes from 'prop-types' import { cloneDeep } from 'lodash' -import rehypeMathjax from 'rehype-mathjax' -import rehypeRaw from 'rehype-raw' -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' import axios from 'axios' import { v4 as uuidv4 } from 'uuid' import { EventStreamContentType, fetchEventSource } from '@microsoft/fetch-event-source' @@ -22,10 +18,14 @@ import { InputAdornment, OutlinedInput, Typography, - CardContent, - Stack + Stack, + Dialog, + DialogTitle, + DialogContent, + DialogActions, + TextField } from '@mui/material' -import { useTheme } from '@mui/material/styles' +import { darken, useTheme } from '@mui/material/styles' import { IconCircleDot, IconDownload, @@ -36,7 +36,6 @@ import { IconX, IconTool, IconSquareFilled, - IconDeviceSdCard, IconCheck, IconPaperclip, IconSparkles @@ -46,14 +45,15 @@ import userPNG from '@/assets/images/account.png' import multiagent_supervisorPNG from '@/assets/images/multiagent_supervisor.png' import multiagent_workerPNG from '@/assets/images/multiagent_worker.png' import audioUploadSVG from '@/assets/images/wave-sound.jpg' -import nextAgentGIF from '@/assets/images/next-agent.gif' // project import -import { CodeBlock } from '@/ui-component/markdown/CodeBlock' +import NodeInputHandler from '@/views/canvas/NodeInputHandler' import { MemoizedReactMarkdown } from '@/ui-component/markdown/MemoizedReactMarkdown' import SourceDocDialog from '@/ui-component/dialog/SourceDocDialog' import ChatFeedbackContentDialog from '@/ui-component/dialog/ChatFeedbackContentDialog' import StarterPromptsCard from '@/ui-component/cards/StarterPromptsCard' +import AgentReasoningCard from './AgentReasoningCard' +import AgentExecutedDataCard from './AgentExecutedDataCard' import { ImageButton, ImageSrc, ImageBackdrop, ImageMarked } from '@/ui-component/button/ImageButton' import CopyToClipboardButton from '@/ui-component/button/CopyToClipboardButton' import ThumbsUpButton from '@/ui-component/button/ThumbsUpButton' @@ -70,9 +70,11 @@ import vectorstoreApi from '@/api/vectorstore' import attachmentsApi from '@/api/attachments' import chatmessagefeedbackApi from '@/api/chatmessagefeedback' import leadsApi from '@/api/lead' +import executionsApi from '@/api/executions' // Hooks import useApi from '@/hooks/useApi' +import { flowContext } from '@/store/context/ReactFlowContext' // Const import { baseURL, maxScroll } from '@/store/constant' @@ -159,13 +161,14 @@ CardWithDeleteOverlay.propTypes = { onDelete: PropTypes.func } -export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, previews, setPreviews }) => { +const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, previews, setPreviews }) => { const theme = useTheme() const customization = useSelector((state) => state.customization) const ps = useRef() const dispatch = useDispatch() + const { onAgentflowNodeStatusUpdate, clearAgentflowNodeStatus } = useContext(flowContext) useNotifier() const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) @@ -192,6 +195,7 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview const inputRef = useRef(null) const getChatmessageApi = useApi(chatmessageApi.getInternalChatmessageFromChatflow) + const getAllExecutionsApi = useApi(executionsApi.getAllExecutions) const getIsChatflowStreamingApi = useApi(chatflowsApi.getIsChatflowStreaming) const getAllowChatFlowUploads = useApi(chatflowsApi.getAllowChatflowUploads) const getChatflowConfig = useApi(chatflowsApi.getSpecificChatflow) @@ -200,6 +204,7 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview // full file upload const [fullFileUpload, setFullFileUpload] = useState(false) + const [fullFileUploadAllowedTypes, setFullFileUploadAllowedTypes] = useState('*') // feedback const [chatFeedbackStatus, setChatFeedbackStatus] = useState(false) @@ -231,31 +236,54 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview const [recordingNotSupported, setRecordingNotSupported] = useState(false) const [isLoadingRecording, setIsLoadingRecording] = useState(false) + const [openFeedbackDialog, setOpenFeedbackDialog] = useState(false) + const [feedback, setFeedback] = useState('') + const [pendingActionData, setPendingActionData] = useState(null) + const [feedbackType, setFeedbackType] = useState('') + + // start input type + const [startInputType, setStartInputType] = useState('') + const [formTitle, setFormTitle] = useState('') + const [formDescription, setFormDescription] = useState('') + const [formInputsData, setFormInputsData] = useState({}) + const [formInputParams, setFormInputParams] = useState([]) + + const [isConfigLoading, setIsConfigLoading] = useState(true) + const isFileAllowedForUpload = (file) => { const constraints = getAllowChatFlowUploads.data /** * {isImageUploadAllowed: boolean, imgUploadSizeAndTypes: Array<{ fileTypes: string[], maxUploadSize: number }>} */ let acceptFile = false + + // Early return if constraints are not available yet + if (!constraints) { + console.warn('Upload constraints not loaded yet') + return false + } + if (constraints.isImageUploadAllowed) { const fileType = file.type const sizeInMB = file.size / 1024 / 1024 - constraints.imgUploadSizeAndTypes.map((allowed) => { - if (allowed.fileTypes.includes(fileType) && sizeInMB <= allowed.maxUploadSize) { - acceptFile = true - } - }) + if (constraints.imgUploadSizeAndTypes && Array.isArray(constraints.imgUploadSizeAndTypes)) { + constraints.imgUploadSizeAndTypes.forEach((allowed) => { + if (allowed.fileTypes && allowed.fileTypes.includes(fileType) && sizeInMB <= allowed.maxUploadSize) { + acceptFile = true + } + }) + } } if (fullFileUpload) { return true } else if (constraints.isRAGFileUploadAllowed) { const fileExt = file.name.split('.').pop() - if (fileExt) { - constraints.fileUploadSizeAndTypes.map((allowed) => { - if (allowed.fileTypes.length === 1 && allowed.fileTypes[0] === '*') { + if (fileExt && constraints.fileUploadSizeAndTypes && Array.isArray(constraints.fileUploadSizeAndTypes)) { + constraints.fileUploadSizeAndTypes.forEach((allowed) => { + if (allowed.fileTypes && allowed.fileTypes.length === 1 && allowed.fileTypes[0] === '*') { acceptFile = true - } else if (allowed.fileTypes.includes(`.${fileExt}`)) { + } else if (allowed.fileTypes && allowed.fileTypes.includes(`.${fileExt}`)) { acceptFile = true } }) @@ -545,6 +573,28 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview }) } + const updateAgentFlowEvent = (event) => { + if (event === 'INPROGRESS') { + setMessages((prevMessages) => [...prevMessages, { message: '', type: 'apiMessage', agentFlowEventStatus: event }]) + } else { + setMessages((prevMessages) => { + let allMessages = [...cloneDeep(prevMessages)] + if (allMessages[allMessages.length - 1].type === 'userMessage') return allMessages + allMessages[allMessages.length - 1].agentFlowEventStatus = event + return allMessages + }) + } + } + + const updateAgentFlowExecutedData = (agentFlowExecutedData) => { + setMessages((prevMessages) => { + let allMessages = [...cloneDeep(prevMessages)] + if (allMessages[allMessages.length - 1].type === 'userMessage') return allMessages + allMessages[allMessages.length - 1].agentFlowExecutedData = agentFlowExecutedData + return allMessages + }) + } + const updateLastMessageAction = (action) => { setMessages((prevMessages) => { let allMessages = [...cloneDeep(prevMessages)] @@ -584,6 +634,28 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview }) } + const updateLastMessageNextAgentFlow = (nextAgentFlow) => { + onAgentflowNodeStatusUpdate(nextAgentFlow) + } + + const updateLastMessageUsedTools = (usedTools) => { + setMessages((prevMessages) => { + let allMessages = [...cloneDeep(prevMessages)] + if (allMessages[allMessages.length - 1].type === 'userMessage') return allMessages + allMessages[allMessages.length - 1].usedTools = usedTools + return allMessages + }) + } + + const updateLastMessageFileAnnotations = (fileAnnotations) => { + setMessages((prevMessages) => { + let allMessages = [...cloneDeep(prevMessages)] + if (allMessages[allMessages.length - 1].type === 'userMessage') return allMessages + allMessages[allMessages.length - 1].fileAnnotations = fileAnnotations + return allMessages + }) + } + const abortMessage = () => { setIsMessageStopping(false) setMessages((prevMessages) => { @@ -612,25 +684,6 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview }) } - const updateLastMessageUsedTools = (usedTools) => { - setMessages((prevMessages) => { - let allMessages = [...cloneDeep(prevMessages)] - if (allMessages[allMessages.length - 1].type === 'userMessage') return allMessages - allMessages[allMessages.length - 1].usedTools = usedTools - return allMessages - }) - } - - const updateLastMessageFileAnnotations = (fileAnnotations) => { - setMessages((prevMessages) => { - let allMessages = [...cloneDeep(prevMessages)] - if (allMessages[allMessages.length - 1].type === 'userMessage') return allMessages - allMessages[allMessages.length - 1].fileAnnotations = fileAnnotations - return allMessages - }) - } - - // Handle errors const handleError = (message = 'Oops! There seems to be an error. Please try again.') => { message = message.replace(`Unable to parse JSON response from chat agent.\n\n`, '') setMessages((prevMessages) => [...prevMessages, { message, type: 'apiMessage' }]) @@ -653,6 +706,29 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview handleSubmit(undefined, promptStarterInput) } + const onSubmitResponse = (actionData, feedback = '', type = '') => { + let fbType = feedbackType + if (type) { + fbType = type + } + const question = feedback ? feedback : fbType.charAt(0).toUpperCase() + fbType.slice(1) + handleSubmit(undefined, question, undefined, { + type: fbType, + startNodeId: actionData?.nodeId, + feedback + }) + } + + const handleSubmitFeedback = () => { + if (pendingActionData) { + onSubmitResponse(pendingActionData, feedback) + setOpenFeedbackDialog(false) + setFeedback('') + setPendingActionData(null) + setFeedbackType('') + } + } + const handleActionClick = async (elem, action) => { setUserInput(elem.label) setMessages((prevMessages) => { @@ -661,7 +737,19 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview allMessages[allMessages.length - 1].action = null return allMessages }) - handleSubmit(undefined, elem.label, action) + if (elem.type.includes('agentflowv2')) { + const type = elem.type.includes('approve') ? 'proceed' : 'reject' + setFeedbackType(type) + + if (action.data && action.data.input && action.data.input.humanInputEnableFeedback) { + setPendingActionData(action.data) + setOpenFeedbackDialog(true) + } else { + onSubmitResponse(action.data, '', type) + } + } else { + handleSubmit(undefined, elem.label, action) + } } const updateMetadata = (data, input) => { @@ -693,7 +781,11 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview if (data.followUpPrompts) { const followUpPrompts = JSON.parse(data.followUpPrompts) - setFollowUpPrompts(followUpPrompts) + if (typeof followUpPrompts === 'string') { + setFollowUpPrompts(JSON.parse(followUpPrompts)) + } else { + setFollowUpPrompts(followUpPrompts) + } } } @@ -759,7 +851,7 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview } // Handle form submission - const handleSubmit = async (e, selectedInput, action) => { + const handleSubmit = async (e, selectedInput, action, humanInput) => { if (e) e.preventDefault() if (!selectedInput && userInput.trim() === '') { @@ -771,13 +863,21 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview let input = userInput - if (selectedInput !== undefined && selectedInput.trim() !== '') input = selectedInput + if (typeof selectedInput === 'string') { + if (selectedInput !== undefined && selectedInput.trim() !== '') input = selectedInput - if (input.trim()) { - inputHistory.addToHistory(input) + if (input.trim()) { + inputHistory.addToHistory(input) + } + } else if (typeof selectedInput === 'object') { + input = Object.entries(selectedInput) + .map(([key, value]) => `${key}: ${value}`) + .join('\n') } setLoading(true) + clearAgentflowNodeStatus() + let uploads = previews.map((item) => { return { data: item.data, @@ -803,9 +903,14 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview question: input, chatId } + if (typeof selectedInput === 'object') { + params.form = selectedInput + delete params.question + } if (uploads && uploads.length > 0) params.uploads = uploads if (leadEmail) params.leadEmail = leadEmail if (action) params.action = action + if (humanInput) params.humanInput = humanInput if (isChatFlowAvailableToStream) { fetchResponseFromEventStream(chatflowid, params) @@ -828,8 +933,10 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview id: data?.chatMessageId, sourceDocuments: data?.sourceDocuments, usedTools: data?.usedTools, + calledTools: data?.calledTools, fileAnnotations: data?.fileAnnotations, agentReasoning: data?.agentReasoning, + agentFlowExecutedData: data?.agentFlowExecutedData, action: data?.action, artifacts: data?.artifacts, type: 'apiMessage', @@ -894,6 +1001,12 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview case 'agentReasoning': updateLastMessageAgentReasoning(payload.data) break + case 'agentFlowEvent': + updateAgentFlowEvent(payload.data) + break + case 'agentFlowExecutedData': + updateAgentFlowExecutedData(payload.data) + break case 'artifacts': updateLastMessageArtifacts(payload.data) break @@ -903,6 +1016,9 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview case 'nextAgent': updateLastMessageNextAgent(payload.data) break + case 'nextAgentFlow': + updateLastMessageNextAgentFlow(payload.data) + break case 'metadata': updateMetadata(payload.data, input) break @@ -981,7 +1097,9 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview } const getFileUploadAllowedTypes = () => { - if (fullFileUpload) return '*' + if (fullFileUpload) { + return fullFileUploadAllowedTypes === '' ? '*' : fullFileUploadAllowedTypes + } return fileUploadAllowedTypes.includes('*') ? '*' : fileUploadAllowedTypes || '*' } @@ -1052,6 +1170,8 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview }) } if (message.followUpPrompts) obj.followUpPrompts = JSON.parse(message.followUpPrompts) + if (message.role === 'apiMessage' && message.execution && message.execution.executionData) + obj.agentFlowExecutedData = JSON.parse(message.execution.executionData) return obj }) setMessages((prevMessages) => [...prevMessages, ...loadedMessages]) @@ -1061,6 +1181,25 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview // eslint-disable-next-line react-hooks/exhaustive-deps }, [getChatmessageApi.data]) + useEffect(() => { + if (getAllExecutionsApi.data?.length) { + const chatId = getAllExecutionsApi.data[0]?.sessionId + setChatId(chatId) + const loadedMessages = getAllExecutionsApi.data.map((execution) => { + const executionData = + typeof execution.executionData === 'string' ? JSON.parse(execution.executionData) : execution.executionData + const obj = { + id: execution.id, + agentFlow: executionData + } + return obj + }) + setMessages((prevMessages) => [...prevMessages, ...loadedMessages]) + setLocalStorageChatflow(chatflowid, chatId) + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [getAllExecutionsApi.data]) + // Get chatflow streaming capability useEffect(() => { if (getIsChatflowStreamingApi.data) { @@ -1083,6 +1222,38 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview useEffect(() => { if (getChatflowConfig.data) { + setIsConfigLoading(false) + if (getChatflowConfig.data?.flowData) { + let nodes = JSON.parse(getChatflowConfig.data?.flowData).nodes ?? [] + const startNode = nodes.find((node) => node.data.name === 'startAgentflow') + if (startNode) { + const startInputType = startNode.data.inputs?.startInputType + setStartInputType(startInputType) + + const formInputTypes = startNode.data.inputs?.formInputTypes + if (startInputType === 'formInput' && formInputTypes && formInputTypes.length > 0) { + for (const formInputType of formInputTypes) { + if (formInputType.type === 'options') { + formInputType.options = formInputType.addOptions.map((option) => ({ + label: option.option, + name: option.option + })) + } + } + setFormInputParams(formInputTypes) + setFormInputsData({ + id: 'formInput', + inputs: {}, + inputParams: formInputTypes + }) + setFormTitle(startNode.data.inputs?.formTitle) + setFormDescription(startNode.data.inputs?.formDescription) + } + + getAllExecutionsApi.request({ agentflowId: chatflowid }) + } + } + if (getChatflowConfig.data?.chatbotConfig && JSON.parse(getChatflowConfig.data?.chatbotConfig)) { let config = JSON.parse(getChatflowConfig.data?.chatbotConfig) if (config.starterPrompts) { @@ -1118,12 +1289,22 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview if (config.fullFileUpload) { setFullFileUpload(config.fullFileUpload.status) + if (config.fullFileUpload?.allowedUploadFileTypes) { + setFullFileUploadAllowedTypes(config.fullFileUpload?.allowedUploadFileTypes) + } } } } // eslint-disable-next-line react-hooks/exhaustive-deps }, [getChatflowConfig.data]) + useEffect(() => { + if (getChatflowConfig.error) { + setIsConfigLoading(false) + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [getChatflowConfig.error]) + useEffect(() => { if (fullFileUpload) { setIsChatFlowAvailableForFileUploads(true) @@ -1155,10 +1336,13 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview getAllowChatFlowUploads.request(chatflowid) getChatflowConfig.request(chatflowid) - // Scroll to bottom - scrollToBottom() + // Add a small delay to ensure content is rendered before scrolling + setTimeout(() => { + scrollToBottom() + }, 100) setIsRecording(false) + setIsConfigLoading(true) // leads const savedLead = getLocalStorageChatflow(chatflowid)?.lead @@ -1198,7 +1382,13 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview if (followUpPromptsStatus && messages.length > 0) { const lastMessage = messages[messages.length - 1] if (lastMessage.type === 'apiMessage' && lastMessage.followUpPrompts) { - setFollowUpPrompts(lastMessage.followUpPrompts) + if (Array.isArray(lastMessage.followUpPrompts)) { + setFollowUpPrompts(lastMessage.followUpPrompts) + } + if (typeof lastMessage.followUpPrompts === 'string') { + const followUpPrompts = JSON.parse(lastMessage.followUpPrompts) + setFollowUpPrompts(followUpPrompts) + } } else if (lastMessage.type === 'userMessage') { setFollowUpPrompts([]) } @@ -1477,35 +1667,124 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview ) } else { return ( - - ) : ( - - {children} - - ) - } - }} - > + {item.data} ) } } + if (isConfigLoading) { + return ( + + + + + + ) + } + + if (startInputType === 'formInput' && messages.length === 1) { + return ( + + + + + {formTitle || 'Please Fill Out The Form'} + + + {formDescription || 'Complete all fields below to continue'} + + + {/* Form inputs */} + + {formInputParams && + formInputParams.map((inputParam, index) => ( + + { + setFormInputsData((prev) => ({ + ...prev, + inputs: { + ...prev.inputs, + [inputParam.name]: newValue + } + })) + }} + /> + + ))} + + + + + + + ) + } + return (
{isDragActive && ( @@ -1589,210 +1868,37 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview })}
)} - {message.agentReasoning && ( + {message.agentReasoning && message.agentReasoning.length > 0 && (
- {message.agentReasoning.map((agent, index) => { - return agent.nextAgent ? ( - - - - - agentPNG - -
{agent.nextAgent}
-
-
-
- ) : ( - - - - - agentPNG - -
{agent.agentName}
-
- {agent.usedTools && agent.usedTools.length > 0 && ( -
- {agent.usedTools.map((tool, index) => { - return tool !== null ? ( - } - onClick={() => onSourceDialogClick(tool, 'Used Tools')} - /> - ) : null - })} -
- )} - {agent.state && Object.keys(agent.state).length > 0 && ( -
- } - onClick={() => onSourceDialogClick(agent.state, 'State')} - /> -
- )} - {agent.artifacts && ( -
- {agentReasoningArtifacts(agent.artifacts).map((item, index) => { - return item !== null ? ( - <>{renderArtifacts(item, index, true)} - ) : null - })} -
- )} - {agent.messages.length > 0 && ( - - ) : ( - - {children} - - ) - } - }} - > - {agent.messages.length > 1 - ? agent.messages.join('\\n') - : agent.messages[0]} - - )} - {agent.instructions &&

{agent.instructions}

} - {agent.messages.length === 0 && !agent.instructions &&

Finished

} - {agent.sourceDocuments && agent.sourceDocuments.length > 0 && ( -
- {removeDuplicateURL(agent).map((source, index) => { - const URL = - source && source.metadata && source.metadata.source - ? isValidURL(source.metadata.source) - : undefined - return ( - - URL - ? onURLClick(source.metadata.source) - : onSourceDialogClick(source) - } - /> - ) - })} -
- )} -
-
- ) - })} + {message.agentReasoning.map((agent, index) => ( + + ))}
)} + {message.agentFlowExecutedData && + Array.isArray(message.agentFlowExecutedData) && + message.agentFlowExecutedData.length > 0 && ( + + )} {message.usedTools && (
} + icon={ + + } onClick={() => onSourceDialogClick(tool, 'Used Tools')} /> ) : null @@ -1910,30 +2026,7 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview ) : ( <> - {/* Messages are being rendered in Markdown format */} - - ) : ( - - {children} - - ) - } - }} - > + {message.message} @@ -2012,7 +2105,8 @@ export const ChatMessage = ({ open, chatflowid, isAgentCanvas, isDialog, preview {(message.action.elements || []).map((elem, index) => { return ( <> - {elem.type === 'approve-button' && elem.label === 'Yes' ? ( + {(elem.type === 'approve-button' && elem.label === 'Yes') || + elem.type === 'agentflowv2-approve-button' ? ( - ) : elem.type === 'reject-button' && elem.label === 'No' ? ( + ) : (elem.type === 'reject-button' && elem.label === 'No') || + elem.type === 'agentflowv2-reject-button' ? ( + + +
) } @@ -2384,3 +2510,5 @@ ChatMessage.propTypes = { previews: PropTypes.array, setPreviews: PropTypes.func } + +export default memo(ChatMessage) diff --git a/packages/ui/src/views/chatmessage/ChatPopUp.jsx b/packages/ui/src/views/chatmessage/ChatPopUp.jsx index 05deb38da..1731feb7e 100644 --- a/packages/ui/src/views/chatmessage/ChatPopUp.jsx +++ b/packages/ui/src/views/chatmessage/ChatPopUp.jsx @@ -1,4 +1,4 @@ -import { useState, useRef, useEffect } from 'react' +import { memo, useState, useRef, useEffect, useContext } from 'react' import { useDispatch } from 'react-redux' import PropTypes from 'prop-types' @@ -10,7 +10,7 @@ import { IconMessage, IconX, IconEraser, IconArrowsMaximize } from '@tabler/icon import { StyledFab } from '@/ui-component/button/StyledFab' import MainCard from '@/ui-component/cards/MainCard' import Transitions from '@/ui-component/extended/Transitions' -import { ChatMessage } from './ChatMessage' +import ChatMessage from './ChatMessage' import ChatExpandDialog from './ChatExpandDialog' // api @@ -19,6 +19,7 @@ import chatmessageApi from '@/api/chatmessage' // Hooks import useConfirm from '@/hooks/useConfirm' import useNotifier from '@/utils/useNotifier' +import { flowContext } from '@/store/context/ReactFlowContext' // Const import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackbarAction } from '@/store/actions' @@ -26,10 +27,11 @@ import { enqueueSnackbar as enqueueSnackbarAction, closeSnackbar as closeSnackba // Utils import { getLocalStorageChatflow, removeLocalStorageChatHistory } from '@/utils/genericHelper' -export const ChatPopUp = ({ chatflowid, isAgentCanvas }) => { +const ChatPopUp = ({ chatflowid, isAgentCanvas, onOpenChange }) => { const theme = useTheme() const { confirm } = useConfirm() const dispatch = useDispatch() + const { clearAgentflowNodeStatus } = useContext(flowContext) useNotifier() const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) @@ -48,10 +50,13 @@ export const ChatPopUp = ({ chatflowid, isAgentCanvas }) => { return } setOpen(false) + if (onOpenChange) onOpenChange(false) } const handleToggle = () => { - setOpen((prevOpen) => !prevOpen) + const newOpenState = !open + setOpen(newOpenState) + if (onOpenChange) onOpenChange(newOpenState) } const expandChat = () => { @@ -69,6 +74,7 @@ export const ChatPopUp = ({ chatflowid, isAgentCanvas }) => { open: false } setExpandDialogProps(props) + clearAgentflowNodeStatus() setTimeout(() => { const resetProps = { ...expandDialogProps, @@ -127,6 +133,7 @@ export const ChatPopUp = ({ chatflowid, isAgentCanvas }) => { useEffect(() => { if (prevOpen.current === true && open === false) { anchorRef.current.focus() + if (onOpenChange) onOpenChange(false) } prevOpen.current = open @@ -146,6 +153,7 @@ export const ChatPopUp = ({ chatflowid, isAgentCanvas }) => { > {open ? : } + {open && ( { ) } -ChatPopUp.propTypes = { chatflowid: PropTypes.string, isAgentCanvas: PropTypes.bool } +ChatPopUp.propTypes = { + chatflowid: PropTypes.string, + isAgentCanvas: PropTypes.bool, + onOpenChange: PropTypes.func +} + +export default memo(ChatPopUp) diff --git a/packages/ui/src/views/chatmessage/ValidationPopUp.jsx b/packages/ui/src/views/chatmessage/ValidationPopUp.jsx new file mode 100644 index 000000000..1cd9b97c2 --- /dev/null +++ b/packages/ui/src/views/chatmessage/ValidationPopUp.jsx @@ -0,0 +1,301 @@ +import { useState, useRef, useEffect, memo } from 'react' +import { useDispatch, useSelector } from 'react-redux' +import PropTypes from 'prop-types' + +import { Typography, Box, ClickAwayListener, Paper, Popper, Button } from '@mui/material' +import { useTheme, alpha, lighten, darken } from '@mui/material/styles' +import { IconCheckbox, IconMessage, IconX, IconExclamationCircle, IconChecklist } from '@tabler/icons-react' + +// project import +import { StyledFab } from '@/ui-component/button/StyledFab' +import MainCard from '@/ui-component/cards/MainCard' +import Transitions from '@/ui-component/extended/Transitions' +import validate_empty from '@/assets/images/validate_empty.svg' + +// api +import validationApi from '@/api/validation' + +// Hooks +import useNotifier from '@/utils/useNotifier' + +// Const +import { enqueueSnackbar as enqueueSnackbarAction } from '@/store/actions' +import { AGENTFLOW_ICONS } from '@/store/constant' + +// Utils + +const ValidationPopUp = ({ chatflowid, hidden }) => { + const theme = useTheme() + const dispatch = useDispatch() + const customization = useSelector((state) => state.customization) + + useNotifier() + const enqueueSnackbar = (...args) => dispatch(enqueueSnackbarAction(...args)) + + const [open, setOpen] = useState(false) + const [previews, setPreviews] = useState([]) + const [loading, setLoading] = useState(false) + + const anchorRef = useRef(null) + const prevOpen = useRef(open) + + const handleClose = (event) => { + if (anchorRef.current && anchorRef.current.contains(event.target)) { + return + } + setOpen(false) + } + + const handleToggle = () => { + setOpen((prevOpen) => !prevOpen) + } + + const validateFlow = async () => { + if (!chatflowid) return + + try { + setLoading(true) + const response = await validationApi.checkValidation(chatflowid) + setPreviews(response.data) + + if (response.data.length === 0) { + enqueueSnackbar({ + message: 'No issues found in your flow!', + options: { + key: new Date().getTime() + Math.random(), + variant: 'success', + autoHideDuration: 3000 + } + }) + } + } catch (error) { + console.error(error) + enqueueSnackbar({ + message: error.message || 'Failed to validate flow', + options: { + key: new Date().getTime() + Math.random(), + variant: 'error', + autoHideDuration: 3000 + } + }) + } finally { + setLoading(false) + } + } + + useEffect(() => { + if (prevOpen.current === true && open === false) { + anchorRef.current.focus() + } + prevOpen.current = open + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [open, chatflowid]) + + const getNodeIcon = (item) => { + // Extract node name from the item + const nodeName = item.name + + // Find matching icon from AGENTFLOW_ICONS + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodeName) + + if (foundIcon) { + return ( + + + + ) + } + + // Default icon if no match found + return ( + + {item.type === 'LLM' ? : } + + ) + } + + return ( + <> + {!hidden && ( + + {open ? : } + + )} + + + {({ TransitionProps }) => ( + + + + + + Checklist ({previews.length}) + + + + {previews.length > 0 ? ( + previews.map((item, index) => ( + +
+ {getNodeIcon(item)} +
{item.label || item.name}
+
+ + + + {item.issues.map((issue, issueIndex) => ( + + + {issue} + + ))} +
+ )) + ) : ( + + validate_empty + + )} +
+ + + + +
+
+
+
+ )} +
+ + ) +} + +ValidationPopUp.propTypes = { + chatflowid: PropTypes.string, + hidden: PropTypes.bool +} + +export default memo(ValidationPopUp) diff --git a/packages/ui/src/views/credentials/AddEditCredentialDialog.jsx b/packages/ui/src/views/credentials/AddEditCredentialDialog.jsx index d1ca3d3cd..13cddd162 100644 --- a/packages/ui/src/views/credentials/AddEditCredentialDialog.jsx +++ b/packages/ui/src/views/credentials/AddEditCredentialDialog.jsx @@ -29,6 +29,7 @@ import { initializeDefaultNodeData } from '@/utils/genericHelper' // const import { baseURL, REDACTED_CREDENTIAL_VALUE } from '@/store/constant' import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from '@/store/actions' +import keySVG from '@/assets/images/key.svg' const AddEditCredentialDialog = ({ show, dialogProps, onCancel, onConfirm, setError }) => { const portalElement = document.getElementById('portal') @@ -237,6 +238,11 @@ const AddEditCredentialDialog = ({ show, dialogProps, onCancel, onConfirm, setEr }} alt={componentCredential.name} src={`${baseURL}/api/v1/components-credentials-icon/${componentCredential.name}`} + onError={(e) => { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = keySVG + }} />
{componentCredential.label} diff --git a/packages/ui/src/views/credentials/CredentialListDialog.jsx b/packages/ui/src/views/credentials/CredentialListDialog.jsx index a6b19cb5c..d5d1058d2 100644 --- a/packages/ui/src/views/credentials/CredentialListDialog.jsx +++ b/packages/ui/src/views/credentials/CredentialListDialog.jsx @@ -9,6 +9,7 @@ import { IconSearch, IconX } from '@tabler/icons-react' // const import { baseURL } from '@/store/constant' import { HIDE_CANVAS_DIALOG, SHOW_CANVAS_DIALOG } from '@/store/actions' +import keySVG from '@/assets/images/key.svg' const CredentialListDialog = ({ show, dialogProps, onCancel, onCredentialSelected }) => { const portalElement = document.getElementById('portal') @@ -152,6 +153,11 @@ const CredentialListDialog = ({ show, dialogProps, onCancel, onCredentialSelecte }} alt={componentCredential.name} src={`${baseURL}/api/v1/components-credentials-icon/${componentCredential.name}`} + onError={(e) => { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = keySVG + }} />
{componentCredential.label} diff --git a/packages/ui/src/views/credentials/index.jsx b/packages/ui/src/views/credentials/index.jsx index 4509ea1dd..266bbd47a 100644 --- a/packages/ui/src/views/credentials/index.jsx +++ b/packages/ui/src/views/credentials/index.jsx @@ -42,6 +42,7 @@ import useNotifier from '@/utils/useNotifier' // Icons import { IconTrash, IconEdit, IconX, IconPlus } from '@tabler/icons-react' import CredentialEmptySVG from '@/assets/images/credential_empty.svg' +import keySVG from '@/assets/images/key.svg' // const import { baseURL } from '@/store/constant' @@ -233,6 +234,7 @@ const Credentials = () => { search={true} searchPlaceholder='Search Credentials' title='Credentials' + description='API keys, tokens, and secrets for 3rd party integrations' > { }} alt={credential.credentialName} src={`${baseURL}/api/v1/components-credentials-icon/${credential.credentialName}`} + onError={(e) => { + e.target.onerror = null + e.target.style.padding = '5px' + e.target.src = keySVG + }} /> {credential.name} - {moment(credential.updatedDate).format('MMMM Do, YYYY')} + {moment(credential.updatedDate).format('MMMM Do, YYYY HH:mm:ss')} - {moment(credential.createdDate).format('MMMM Do, YYYY')} + {moment(credential.createdDate).format('MMMM Do, YYYY HH:mm:ss')} edit(credential)}> diff --git a/packages/ui/src/views/docstore/DocStoreAPIDialog.jsx b/packages/ui/src/views/docstore/DocStoreAPIDialog.jsx index 8cf06e751..0807838a0 100644 --- a/packages/ui/src/views/docstore/DocStoreAPIDialog.jsx +++ b/packages/ui/src/views/docstore/DocStoreAPIDialog.jsx @@ -1,12 +1,7 @@ import { createPortal } from 'react-dom' import { useState, useEffect } from 'react' import PropTypes from 'prop-types' -import rehypeMathjax from 'rehype-mathjax' -import rehypeRaw from 'rehype-raw' -import remarkGfm from 'remark-gfm' -import remarkMath from 'remark-math' import { MemoizedReactMarkdown } from '@/ui-component/markdown/MemoizedReactMarkdown' -import { CodeBlock } from '@/ui-component/markdown/CodeBlock' import { Typography, Stack, Card, Accordion, AccordionSummary, AccordionDetails, Dialog, DialogContent, DialogTitle } from '@mui/material' import { TableViewOnly } from '@/ui-component/table/Table' import documentstoreApi from '@/api/documentstore' @@ -308,29 +303,7 @@ curl -X POST http://localhost:3000/api/v1/document-store/upsert/${dialogProps.st {dialogProps.title} - - ) : ( - - {children} - - ) - } - }} - > - {values} - + {values} You can override existing configurations: diff --git a/packages/ui/src/views/docstore/DocStoreInputHandler.jsx b/packages/ui/src/views/docstore/DocStoreInputHandler.jsx index 97c307ff6..7c6b3e521 100644 --- a/packages/ui/src/views/docstore/DocStoreInputHandler.jsx +++ b/packages/ui/src/views/docstore/DocStoreInputHandler.jsx @@ -4,7 +4,7 @@ import { useSelector } from 'react-redux' // material-ui import { Box, Typography, IconButton, Button } from '@mui/material' -import { IconArrowsMaximize, IconAlertTriangle } from '@tabler/icons-react' +import { IconRefresh, IconArrowsMaximize, IconAlertTriangle } from '@tabler/icons-react' // project import import { Dropdown } from '@/ui-component/dropdown/Dropdown' @@ -33,6 +33,7 @@ const DocStoreInputHandler = ({ inputParam, data, disabled = false }) => { const [expandDialogProps, setExpandDialogProps] = useState({}) const [showManageScrapedLinksDialog, setShowManageScrapedLinksDialog] = useState(false) const [manageScrapedLinksDialogProps, setManageScrapedLinksDialogProps] = useState({}) + const [reloadTimestamp, setReloadTimestamp] = useState(Date.now().toString()) const onExpandDialogClicked = (value, inputParam) => { const dialogProps = { @@ -216,19 +217,33 @@ const DocStoreInputHandler = ({ inputParam, data, disabled = false }) => { value={data.inputs[inputParam.name] ?? inputParam.default ?? 'choose an option'} /> )} - {inputParam.type === 'asyncOptions' && ( + {(inputParam.type === 'asyncOptions' || inputParam.type === 'asyncMultiOptions') && ( <> {data.inputParams?.length === 1 &&
}
- (data.inputs[inputParam.name] = newValue)} - onCreateNew={() => addAsyncOption(inputParam.name)} - /> +
+ (data.inputs[inputParam.name] = newValue)} + onCreateNew={() => addAsyncOption(inputParam.name)} + /> +
+ {inputParam.refresh && ( + setReloadTimestamp(Date.now().toString())} + > + + + )}
)} diff --git a/packages/ui/src/views/docstore/ExpandedChunkDialog.jsx b/packages/ui/src/views/docstore/ExpandedChunkDialog.jsx index e1adc1f93..a0b85b10f 100644 --- a/packages/ui/src/views/docstore/ExpandedChunkDialog.jsx +++ b/packages/ui/src/views/docstore/ExpandedChunkDialog.jsx @@ -181,7 +181,16 @@ const ExpandedChunkDialog = ({ show, dialogProps, onCancel, onChunkEdit, onDelet onValueChange={(text) => setContentValue(text)} /> )} -
+
e.stopPropagation()} + onKeyDown={(e) => { + if (e.key === 'Enter' || e.key === ' ') { + e.stopPropagation() + } + }} + role='presentation' + style={{ marginTop: '20px', marginBottom: '15px' }} + > {!isEdit && ( { ) : ( - + { {data.inputAnchors.map((inputAnchor, index) => ( ))} - {data.inputParams.map((inputParam, index) => ( - - ))} + {data.inputParams + .filter((inputParam) => inputParam.display !== false) + .map((inputParam, index) => ( + + ))} {data.inputParams.find((param) => param.additionalParams) && (
{ @@ -77,6 +73,7 @@ const Marketplace = () => { const [isLoading, setLoading] = useState(true) const [error, setError] = useState(null) const [images, setImages] = useState({}) + const [icons, setIcons] = useState({}) const [usecases, setUsecases] = useState([]) const [eligibleUsecases, setEligibleUsecases] = useState([]) const [selectedUsecases, setSelectedUsecases] = useState([]) @@ -95,6 +92,7 @@ const Marketplace = () => { const getAllCustomTemplatesApi = useApi(marketplacesApi.getAllCustomTemplates) const [activeTabValue, setActiveTabValue] = useState(0) const [templateImages, setTemplateImages] = useState({}) + const [templateIcons, setTemplateIcons] = useState({}) const [templateUsecases, setTemplateUsecases] = useState([]) const [eligibleTemplateUsecases, setEligibleTemplateUsecases] = useState([]) const [selectedTemplateUsecases, setSelectedTemplateUsecases] = useState([]) @@ -102,6 +100,16 @@ const Marketplace = () => { const closeSnackbar = (...args) => dispatch(closeSnackbarAction(...args)) const { confirm } = useConfirm() + const getSelectStyles = (borderColor, isDarkMode) => ({ + '& .MuiOutlinedInput-notchedOutline': { + borderRadius: 2, + borderColor: borderColor + }, + '& .MuiSvgIcon-root': { + color: isDarkMode ? '#fff' : 'inherit' + } + }) + const handleTabChange = (event, newValue) => { if (newValue === 1 && !getAllCustomTemplatesApi.data) { getAllCustomTemplatesApi.request() @@ -304,7 +312,11 @@ const Marketplace = () => { } const goToCanvas = (selectedChatflow) => { - navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) + if (selectedChatflow.type === 'AgentflowV2') { + navigate(`/v2/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) + } else { + navigate(`/marketplace/${selectedChatflow.id}`, { state: selectedChatflow }) + } } useEffect(() => { @@ -323,6 +335,7 @@ const Marketplace = () => { const flows = getAllTemplatesMarketplacesApi.data const usecases = [] const images = {} + const icons = {} for (let i = 0; i < flows.length; i += 1) { if (flows[i].flowData) { const flowDataStr = flows[i].flowData @@ -330,15 +343,22 @@ const Marketplace = () => { usecases.push(...flows[i].usecases) const nodes = flowData.nodes || [] images[flows[i].id] = [] + icons[flows[i].id] = [] for (let j = 0; j < nodes.length; j += 1) { - const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` - if (!images[flows[i].id].includes(imageSrc)) { - images[flows[i].id].push(imageSrc) + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodes[j].data.name) + if (foundIcon) { + icons[flows[i].id].push(foundIcon) + } else { + const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` + if (!images[flows[i].id].includes(imageSrc)) { + images[flows[i].id].push(imageSrc) + } } } } } setImages(images) + setIcons(icons) setUsecases(Array.from(new Set(usecases)).sort()) setEligibleUsecases(Array.from(new Set(usecases)).sort()) } catch (e) { @@ -363,6 +383,7 @@ const Marketplace = () => { const flows = getAllCustomTemplatesApi.data const usecases = [] const tImages = {} + const tIcons = {} for (let i = 0; i < flows.length; i += 1) { if (flows[i].flowData) { const flowDataStr = flows[i].flowData @@ -373,15 +394,22 @@ const Marketplace = () => { } const nodes = flowData.nodes || [] tImages[flows[i].id] = [] + tIcons[flows[i].id] = [] for (let j = 0; j < nodes.length; j += 1) { - const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` - if (!tImages[flows[i].id].includes(imageSrc)) { - tImages[flows[i].id].push(imageSrc) + const foundIcon = AGENTFLOW_ICONS.find((icon) => icon.name === nodes[j].data.name) + if (foundIcon) { + tIcons[flows[i].id].push(foundIcon) + } else { + const imageSrc = `${baseURL}/api/v1/node-icon/${nodes[j].data.name}` + if (!tImages[flows[i].id].includes(imageSrc)) { + tImages[flows[i].id].push(imageSrc) + } } } } } setTemplateImages(tImages) + setTemplateIcons(tIcons) setTemplateUsecases(Array.from(new Set(usecases)).sort()) setEligibleTemplateUsecases(Array.from(new Set(usecases)).sort()) } catch (e) { @@ -426,10 +454,10 @@ const Marketplace = () => { multiple value={badgeFilter} onChange={handleBadgeFilterChange} - input={} + input={} renderValue={(selected) => selected.join(', ')} MenuProps={MenuProps} - sx={SelectStyles} + sx={getSelectStyles(theme.palette.grey[900] + 25, theme?.customization?.isDarkMode)} > {badges.map((name) => ( { multiple value={typeFilter} onChange={handleTypeFilterChange} - input={} + input={} renderValue={(selected) => selected.join(', ')} MenuProps={MenuProps} - sx={SelectStyles} + sx={getSelectStyles(theme.palette.grey[900] + 25, theme?.customization?.isDarkMode)} > {types.map((name) => ( { multiple value={frameworkFilter} onChange={handleFrameworkFilterChange} - input={} + input={} renderValue={(selected) => selected.join(', ')} MenuProps={MenuProps} - sx={SelectStyles} + sx={getSelectStyles(theme.palette.grey[900] + 25, theme?.customization?.isDarkMode)} > {framework.map((name) => ( { search={true} searchPlaceholder='Search Name/Description/Node' title='Marketplace' + description='Explore and use pre-built templates' > { badgeContent={data.badge} color={data.badge === 'POPULAR' ? 'primary' : 'error'} > - {(data.type === 'Chatflow' || data.type === 'Agentflow') && ( + {(data.type === 'Chatflow' || + data.type === 'Agentflow' || + data.type === 'AgentflowV2') && ( goToCanvas(data)} data={data} images={images[data.id]} + icons={icons[data.id]} /> )} {data.type === 'Tool' && ( @@ -636,13 +668,17 @@ const Marketplace = () => { )} )} - {!data.badge && (data.type === 'Chatflow' || data.type === 'Agentflow') && ( - goToCanvas(data)} - data={data} - images={images[data.id]} - /> - )} + {!data.badge && + (data.type === 'Chatflow' || + data.type === 'Agentflow' || + data.type === 'AgentflowV2') && ( + goToCanvas(data)} + data={data} + images={images[data.id]} + icons={icons[data.id]} + /> + )} {!data.badge && data.type === 'Tool' && ( goToTool(data)} /> )} @@ -747,11 +783,14 @@ const Marketplace = () => { badgeContent={data.badge} color={data.badge === 'POPULAR' ? 'primary' : 'error'} > - {(data.type === 'Chatflow' || data.type === 'Agentflow') && ( + {(data.type === 'Chatflow' || + data.type === 'Agentflow' || + data.type === 'AgentflowV2') && ( goToCanvas(data)} data={data} images={templateImages[data.id]} + icons={templateIcons[data.id]} /> )} {data.type === 'Tool' && ( @@ -759,13 +798,17 @@ const Marketplace = () => { )} )} - {!data.badge && (data.type === 'Chatflow' || data.type === 'Agentflow') && ( - goToCanvas(data)} - data={data} - images={templateImages[data.id]} - /> - )} + {!data.badge && + (data.type === 'Chatflow' || + data.type === 'Agentflow' || + data.type === 'AgentflowV2') && ( + goToCanvas(data)} + data={data} + images={templateImages[data.id]} + icons={templateIcons[data.id]} + /> + )} {!data.badge && data.type === 'Tool' && ( goToTool(data)} /> )} diff --git a/packages/ui/src/views/tools/index.jsx b/packages/ui/src/views/tools/index.jsx index f858f0ade..1a2e5d803 100644 --- a/packages/ui/src/views/tools/index.jsx +++ b/packages/ui/src/views/tools/index.jsx @@ -138,7 +138,13 @@ const Tools = () => { ) : ( - + ({ borderColor: theme.palette.grey[900] + 25, @@ -170,6 +169,7 @@ const Variables = () => { const onConfirm = () => { setShowVariableDialog(false) getAllVariables.request() + refreshVariablesCache() } useEffect(() => { @@ -200,7 +200,13 @@ const Variables = () => { ) : ( - + @@ -340,10 +346,10 @@ const Variables = () => { /> - {moment(variable.updatedDate).format('MMMM Do, YYYY')} + {moment(variable.updatedDate).format('MMMM Do, YYYY HH:mm:ss')} - {moment(variable.createdDate).format('MMMM Do, YYYY')} + {moment(variable.createdDate).format('MMMM Do, YYYY HH:mm:ss')} edit(variable)}> diff --git a/packages/ui/src/views/vectorstore/VectorStorePopUp.jsx b/packages/ui/src/views/vectorstore/VectorStorePopUp.jsx index 899faf24a..e7fe929fa 100644 --- a/packages/ui/src/views/vectorstore/VectorStorePopUp.jsx +++ b/packages/ui/src/views/vectorstore/VectorStorePopUp.jsx @@ -1,4 +1,4 @@ -import { useState, useRef, useEffect } from 'react' +import { useState, useRef, useEffect, memo } from 'react' import PropTypes from 'prop-types' import { IconDatabaseImport, IconX } from '@tabler/icons-react' @@ -8,7 +8,7 @@ import { StyledFab } from '@/ui-component/button/StyledFab' import VectorStoreDialog from './VectorStoreDialog' import UpsertResultDialog from './UpsertResultDialog' -export const VectorStorePopUp = ({ chatflowid }) => { +const VectorStorePopUp = ({ chatflowid }) => { const [open, setOpen] = useState(false) const [showExpandDialog, setShowExpandDialog] = useState(false) const [expandDialogProps, setExpandDialogProps] = useState({}) @@ -77,3 +77,5 @@ export const VectorStorePopUp = ({ chatflowid }) => { } VectorStorePopUp.propTypes = { chatflowid: PropTypes.string } + +export default memo(VectorStorePopUp) diff --git a/packages/ui/vite.config.js b/packages/ui/vite.config.js index 34aeec774..c336cd4de 100644 --- a/packages/ui/vite.config.js +++ b/packages/ui/vite.config.js @@ -18,12 +18,23 @@ export default defineConfig(async ({ mode }) => { } } } + dotenv.config() return { plugins: [react()], resolve: { alias: { - '@': resolve(__dirname, 'src') + '@': resolve(__dirname, 'src'), + '@codemirror/state': resolve(__dirname, '../../node_modules/@codemirror/state'), + '@codemirror/view': resolve(__dirname, '../../node_modules/@codemirror/view'), + '@codemirror/language': resolve(__dirname, '../../node_modules/@codemirror/language'), + '@codemirror/lang-javascript': resolve(__dirname, '../../node_modules/@codemirror/lang-javascript'), + '@codemirror/lang-json': resolve(__dirname, '../../node_modules/@codemirror/lang-json'), + '@uiw/react-codemirror': resolve(__dirname, '../../node_modules/@uiw/react-codemirror'), + '@uiw/codemirror-theme-vscode': resolve(__dirname, '../../node_modules/@uiw/codemirror-theme-vscode'), + '@uiw/codemirror-theme-sublime': resolve(__dirname, '../../node_modules/@uiw/codemirror-theme-sublime'), + '@lezer/common': resolve(__dirname, '../../node_modules/@lezer/common'), + '@lezer/highlight': resolve(__dirname, '../../node_modules/@lezer/highlight') } }, root: resolve(__dirname),