Merge branch 'main' into chore/Models-Update

# Conflicts:
#	packages/components/models.json
This commit is contained in:
Henry 2025-05-23 13:17:46 +08:00
commit e18c034807
351 changed files with 42612 additions and 5885 deletions

View File

@ -120,45 +120,48 @@ Flowise has 3 different modules in a single mono repository.
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
| Variable | Description | Type | Default |
| ---------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
| PORT | The HTTP port Flowise runs on | Number | 3000 |
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DEBUG | Print logs from components | Boolean | |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` |
| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` |
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean | |
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` |
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
| S3_STORAGE_REGION | Region for S3 bucket | String | |
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
| Variable | Description | Type | Default |
| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
| PORT | The HTTP port Flowise runs on | Number | 3000 |
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
| FLOWISE_USERNAME | Username to login | String | |
| FLOWISE_PASSWORD | Password to login | String | |
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
| DEBUG | Print logs from components | Boolean | |
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` |
| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` |
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
| S3_STORAGE_REGION | Region for S3 bucket | String | |
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
You can also specify the env variables when using `npx`. For example:

View File

@ -12,6 +12,10 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
# Install Chromium
RUN apk add --no-cache chromium
# Install curl for container-level health checks
# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126
RUN apk add --no-cache curl
#install PNPM globaly
RUN npm install -g pnpm

View File

@ -1,8 +1,9 @@
<!-- markdownlint-disable MD030 -->
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
# Flowise - Build LLM Apps Easily
<p align="center">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
</p>
[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
@ -10,11 +11,11 @@
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
English | [中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
English | [繁體中文](./i18n/README-TW.md) | [简体中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
<h3>Drag & drop UI to build your customized LLM flow</h3>
<h3>Build AI Agents, Visually</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
## ⚡Quick Start
@ -182,9 +183,9 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
[![Deploy on Elestio](https://elest.io/images/logos/deploy-to-elestio-btn.png)](https://elest.io/open-source/flowiseai)
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [Sealos](https://template.sealos.io/deploy?templateName=flowise)
[![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
[![Deploy on Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)

View File

@ -42,13 +42,11 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# LANGCHAIN_API_KEY=your_api_key
# LANGCHAIN_PROJECT=your_project
# DISABLE_FLOWISE_TELEMETRY=true
# Uncomment the following line to enable model list config, load the list of models from your local config file
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
# STORAGE_TYPE=local (local | s3)
# STORAGE_TYPE=local (local | s3 | gcs)
# BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage
# S3_STORAGE_BUCKET_NAME=flowise
# S3_STORAGE_ACCESS_KEY_ID=<your-access-key>
@ -56,6 +54,10 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# S3_STORAGE_REGION=us-west-2
# S3_ENDPOINT_URL=<custom-s3-endpoint-url>
# S3_FORCE_PATH_STYLE=false
# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path
# GOOGLE_CLOUD_STORAGE_PROJ_ID=<your-gcp-project-id>
# GOOGLE_CLOUD_STORAGE_BUCKET_NAME=<the-bucket-name>
# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true
# SHOW_COMMUNITY_NODES=true
# DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable)
@ -86,6 +88,8 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# QUEUE_NAME=flowise-queue
# QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000
# WORKER_CONCURRENCY=100000
# REMOVE_ON_AGE=86400
# REMOVE_ON_COUNT=10000
# REDIS_URL=
# REDIS_HOST=localhost
# REDIS_PORT=6379
@ -94,4 +98,6 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
# REDIS_TLS=
# REDIS_CERT=
# REDIS_KEY=
# REDIS_CA=
# REDIS_CA=
# REDIS_KEEP_ALIVE=
# ENABLE_BULLMQ_DASHBOARD=

View File

@ -13,7 +13,7 @@ RUN npm install -g flowise
FROM node:20-alpine
# Install runtime dependencies
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev curl
# Set the environment variable for Puppeteer to find Chromium
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser

View File

@ -28,7 +28,6 @@ services:
- LOG_LEVEL=${LOG_LEVEL}
- LOG_PATH=${LOG_PATH}
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
- GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY}
- GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY}
@ -38,6 +37,8 @@ services:
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
- QUEUE_NAME=${QUEUE_NAME}
- QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN}
- REMOVE_ON_AGE=${REMOVE_ON_AGE}
- REMOVE_ON_COUNT=${REMOVE_ON_COUNT}
- REDIS_URL=${REDIS_URL}
- REDIS_HOST=${REDIS_HOST}
- REDIS_PORT=${REDIS_PORT}
@ -47,8 +48,16 @@ services:
- REDIS_CERT=${REDIS_CERT}
- REDIS_KEY=${REDIS_KEY}
- REDIS_CA=${REDIS_CA}
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
ports:
- '${PORT}:${PORT}'
healthcheck:
test: ['CMD', 'curl', '-f', 'http://localhost:${PORT}/api/v1/ping']
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
volumes:
- ~/.flowise:/root/.flowise
entrypoint: /bin/sh -c "sleep 3; flowise start"

View File

@ -28,7 +28,6 @@ services:
- LOG_LEVEL=${LOG_LEVEL}
- LOG_PATH=${LOG_PATH}
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
- GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY}
- GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY}
@ -38,6 +37,8 @@ services:
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
- QUEUE_NAME=${QUEUE_NAME}
- QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN}
- REMOVE_ON_AGE=${REMOVE_ON_AGE}
- REMOVE_ON_COUNT=${REMOVE_ON_COUNT}
- REDIS_URL=${REDIS_URL}
- REDIS_HOST=${REDIS_HOST}
- REDIS_PORT=${REDIS_PORT}
@ -47,6 +48,8 @@ services:
- REDIS_CERT=${REDIS_CERT}
- REDIS_KEY=${REDIS_KEY}
- REDIS_CA=${REDIS_CA}
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
ports:
- '${PORT}:${PORT}'
volumes:

View File

@ -140,7 +140,6 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
| MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` |
| STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
| BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` |

View File

@ -1,8 +1,9 @@
<!-- markdownlint-disable MD030 -->
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
# Flowise - LLM アプリを簡単に構築
<p align="center">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
</p>
[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
@ -10,11 +11,11 @@
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | [中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
<h3>ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI</h3>
<h3>AIエージェントをビジュアルに構築</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
## ⚡ クイックスタート

View File

@ -1,8 +1,9 @@
<!-- markdownlint-disable MD030 -->
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
# Flowise - 간편한 LLM 애플리케이션 제작
<p align="center">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
</p>
[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
@ -10,11 +11,11 @@
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | [中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
<h3>드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기</h3>
<h3>AI 에이전트를 시각적으로 구축하세요</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
## ⚡빠른 시작 가이드

217
i18n/README-TW.md Normal file
View File

@ -0,0 +1,217 @@
<!-- markdownlint-disable MD030 -->
<p align="center">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
</p>
[![Release Notes](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
[![Twitter Follow](https://img.shields.io/twitter/follow/FlowiseAI?style=social)](https://twitter.com/FlowiseAI)
[![GitHub star chart](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub fork](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | 繁體中文 | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | [한국어](./README-KR.md)
<h3>可視化建構 AI/LLM 流程</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
## ⚡ 快速開始
下載並安裝 [NodeJS](https://nodejs.org/en/download) >= 18.15.0
1. 安裝 Flowise
```bash
npm install -g flowise
```
2. 啟動 Flowise
```bash
npx flowise start
```
使用用戶名和密碼
```bash
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
```
3. 打開 [http://localhost:3000](http://localhost:3000)
## 🐳 Docker
### Docker Compose
1. 克隆 Flowise 項目
2. 進入項目根目錄的 `docker` 文件夾
3. 複製 `.env.example` 文件,粘貼到相同位置,並重命名為 `.env` 文件
4. `docker compose up -d`
5. 打開 [http://localhost:3000](http://localhost:3000)
6. 您可以通過 `docker compose stop` 停止容器
### Docker 映像
1. 本地構建映像:
```bash
docker build --no-cache -t flowise .
```
2. 運行映像:
```bash
docker run -d --name flowise -p 3000:3000 flowise
```
3. 停止映像:
```bash
docker stop flowise
```
## 👨‍💻 開發者
Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
- `server`: 提供 API 邏輯的 Node 後端
- `ui`: React 前端
- `components`: 第三方節點集成
- `api-documentation`: 從 express 自動生成的 swagger-ui API 文檔
### 先決條件
- 安裝 [PNPM](https://pnpm.io/installation)
```bash
npm i -g pnpm
```
### 設置
1. 克隆存儲庫
```bash
git clone https://github.com/FlowiseAI/Flowise.git
```
2. 進入存儲庫文件夾
```bash
cd Flowise
```
3. 安裝所有模塊的所有依賴項:
```bash
pnpm install
```
4. 構建所有代碼:
```bash
pnpm build
```
<details>
<summary>退出代碼 134JavaScript 堆內存不足)</summary>
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 堆大小並重新運行腳本:
export NODE_OPTIONS="--max-old-space-size=4096"
pnpm build
</details>
5. 啟動應用:
```bash
pnpm start
```
您現在可以訪問 [http://localhost:3000](http://localhost:3000)
6. 對於開發構建:
- 在 `packages/ui` 中創建 `.env` 文件並指定 `VITE_PORT`(參考 `.env.example`
- 在 `packages/server` 中創建 `.env` 文件並指定 `PORT`(參考 `.env.example`
- 運行
```bash
pnpm dev
```
任何代碼更改都會自動重新加載應用程序 [http://localhost:8080](http://localhost:8080)
## 🔒 認證
要啟用應用級別的身份驗證,請在 `packages/server` 中的 `.env` 文件中添加 `FLOWISE_USERNAME``FLOWISE_PASSWORD`
```
FLOWISE_USERNAME=user
FLOWISE_PASSWORD=1234
```
## 🌱 環境變量
Flowise 支持不同的環境變量來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變量。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
## 📖 文檔
[Flowise 文檔](https://docs.flowiseai.com/)
## 🌐 自我托管
在您現有的基礎設施中部署 Flowise 自我托管,我們支持各種 [部署](https://docs.flowiseai.com/configuration/deployment)
- [AWS](https://docs.flowiseai.com/configuration/deployment/aws)
- [Azure](https://docs.flowiseai.com/configuration/deployment/azure)
- [Digital Ocean](https://docs.flowiseai.com/configuration/deployment/digital-ocean)
- [GCP](https://docs.flowiseai.com/configuration/deployment/gcp)
- [阿里雲](https://computenest.console.aliyun.com/service/instance/create/default?type=user&ServiceName=Flowise社区版)
- <details>
<summary>其他</summary>
- [Railway](https://docs.flowiseai.com/configuration/deployment/railway)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
- [Render](https://docs.flowiseai.com/configuration/deployment/render)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/configuration/deployment/render)
- [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
- [Elestio](https://elest.io/open-source/flowiseai)
[![Deploy on Elestio](https://elest.io/images/logos/deploy-to-elestio-btn.png)](https://elest.io/open-source/flowiseai)
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
[![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
[![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
</details>
## ☁️ Flowise 雲
[開始使用 Flowise 雲](https://flowiseai.com/)
## 🙋 支持
隨時在 [討論](https://github.com/FlowiseAI/Flowise/discussions) 中提出任何問題、提出問題和請求新功能
## 🙌 貢獻
感謝這些出色的貢獻者
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
</a>
請參閱 [貢獻指南](CONTRIBUTING.md)。如果您有任何問題或問題,請通過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
[![Star History Chart](https://api.star-history.com/svg?repos=FlowiseAI/Flowise&type=Timeline)](https://star-history.com/#FlowiseAI/Flowise&Date)
## 📄 許可證
此存儲庫中的源代碼根據 [Apache 許可證版本 2.0](LICENSE.md) 提供。

View File

@ -1,8 +1,9 @@
<!-- markdownlint-disable MD030 -->
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
# Flowise - 轻松构建 LLM 应用程序
<p align="center">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
</p>
[![发布说明](https://img.shields.io/github/release/FlowiseAI/Flowise)](https://github.com/FlowiseAI/Flowise/releases)
[![Discord](https://img.shields.io/discord/1087698854775881778?label=Discord&logo=discord)](https://discord.gg/jbaHfsRVBW)
@ -10,11 +11,11 @@
[![GitHub星图](https://img.shields.io/github/stars/FlowiseAI/Flowise?style=social)](https://star-history.com/#FlowiseAI/Flowise)
[![GitHub分支](https://img.shields.io/github/forks/FlowiseAI/Flowise?style=social)](https://github.com/FlowiseAI/Flowise/fork)
[English](../README.md) | 中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
[English](../README.md) | [繁體中文](./README-TW.md) | 简体中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
<h3>拖放界面构建定制化的LLM流程</h3>
<h3>可视化构建 AI/LLM 流程</h3>
<a href="https://github.com/FlowiseAI/Flowise">
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
## ⚡ 快速入门
@ -170,9 +171,9 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
[![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [Sealos](https://template.sealos.io/deploy?templateName=flowise)
[![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
[![部署到 Sealos](https://sealos.io/Deploy-on-Sealos.svg)](https://template.sealos.io/deploy?templateName=flowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 MiB

1
images/flowise_dark.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 40 KiB

1
images/flowise_white.svg Normal file

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 40 KiB

View File

@ -1,6 +1,6 @@
{
"name": "flowise",
"version": "2.2.5",
"version": "3.0.0",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
@ -13,7 +13,7 @@
"scripts": {
"build": "turbo run build",
"build-force": "pnpm clean && turbo run build --force",
"dev": "turbo run dev --parallel",
"dev": "turbo run dev --parallel --no-cache",
"start": "run-script-os",
"start:windows": "cd packages/server/bin && run start",
"start:default": "cd packages/server/bin && ./run start",
@ -62,7 +62,20 @@
"sqlite3"
],
"overrides": {
"set-value": "^3.0.3"
"axios": "1.7.9",
"body-parser": "2.0.2",
"braces": "3.0.3",
"cross-spawn": "7.0.6",
"glob-parent": "6.0.2",
"http-proxy-middleware": "3.0.3",
"json5": "2.2.3",
"nth-check": "2.1.1",
"path-to-regexp": "0.1.12",
"prismjs": "1.29.0",
"semver": "7.7.1",
"set-value": "4.1.0",
"unset-value": "2.0.1",
"webpack-dev-middleware": "7.4.2"
}
},
"engines": {
@ -70,11 +83,11 @@
"pnpm": ">=9"
},
"resolutions": {
"@google/generative-ai": "^0.15.0",
"@google/generative-ai": "^0.24.0",
"@grpc/grpc-js": "^1.10.10",
"@langchain/core": "0.3.37",
"@qdrant/openapi-typescript-fetch": "1.2.6",
"openai": "4.82.0",
"openai": "4.96.0",
"protobufjs": "7.4.0"
},
"eslintIgnore": [

View File

@ -5,7 +5,6 @@
"scripts": {
"build": "tsc",
"start": "node dist/index.js",
"dev": "concurrently \"tsc-watch --noClear -p ./tsconfig.json\" \"nodemon\"",
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0"
},
"license": "SEE LICENSE IN LICENSE.md",

View File

@ -6,7 +6,7 @@
Flowise 的应用集成。包含节点和凭据。
![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true)
![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true)
安装:

View File

@ -6,7 +6,7 @@ English | [中文](./README-ZH.md)
Apps integration for Flowise. Contain Nodes and Credentials.
![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true)
![Flowise](https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true)
Install:

View File

@ -0,0 +1,28 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class HTTPApiKeyCredential implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'HTTP Api Key'
this.name = 'httpApiKey'
this.version = 1.0
this.inputs = [
{
label: 'Key',
name: 'key',
type: 'string'
},
{
label: 'Value',
name: 'value',
type: 'password'
}
]
}
}
module.exports = { credClass: HTTPApiKeyCredential }

View File

@ -0,0 +1,28 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class HttpBasicAuthCredential implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'HTTP Basic Auth'
this.name = 'httpBasicAuth'
this.version = 1.0
this.inputs = [
{
label: 'Basic Auth Username',
name: 'basicAuthUsername',
type: 'string'
},
{
label: 'Basic Auth Password',
name: 'basicAuthPassword',
type: 'password'
}
]
}
}
module.exports = { credClass: HttpBasicAuthCredential }

View File

@ -0,0 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class HTTPBearerTokenCredential implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'HTTP Bearer Token'
this.name = 'httpBearerToken'
this.version = 1.0
this.inputs = [
{
label: 'Token',
name: 'token',
type: 'password'
}
]
}
}
module.exports = { credClass: HTTPBearerTokenCredential }

View File

@ -0,0 +1,33 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class JiraApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Jira API'
this.name = 'jiraApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/">official guide</a> on how to get accessToken on Github'
this.inputs = [
{
label: 'User Name',
name: 'username',
type: 'string',
placeholder: 'username@example.com'
},
{
label: 'Access Token',
name: 'accessToken',
type: 'password',
placeholder: '<JIRA_ACCESS_TOKEN>'
}
]
}
}
module.exports = { credClass: JiraApi }

View File

@ -0,0 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class LitellmApi implements INodeCredential {
label: string
name: string
version: number
inputs: INodeParams[]
constructor() {
this.label = 'Litellm API'
this.name = 'litellmApi'
this.version = 1.0
this.inputs = [
{
label: 'API Key',
name: 'litellmApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: LitellmApi }

View File

@ -0,0 +1,27 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class Mem0MemoryApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Mem0 Memory API'
this.name = 'mem0MemoryApi'
this.version = 1.0
this.description =
'Visit <a target="_blank" href="https://app.mem0.ai/settings/api-keys">Mem0 Platform</a> to get your API credentials'
this.inputs = [
{
label: 'API Key',
name: 'apiKey',
type: 'password',
description: 'API Key from Mem0 dashboard'
}
]
}
}
module.exports = { credClass: Mem0MemoryApi }

View File

@ -1,6 +1,6 @@
import { INodeParams, INodeCredential } from '../src/Interface'
import { INodeCredential, INodeParams } from '../src/Interface'
class NvdiaNIMApi implements INodeCredential {
class NvidiaNIMApi implements INodeCredential {
label: string
name: string
version: number
@ -8,17 +8,17 @@ class NvdiaNIMApi implements INodeCredential {
inputs: INodeParams[]
constructor() {
this.label = 'Nvdia NIM API Key'
this.name = 'nvdiaNIMApi'
this.label = 'NVIDIA NGC API Key'
this.name = 'nvidiaNIMApi'
this.version = 1.0
this.inputs = [
{
label: 'Nvdia NIM API Key',
name: 'nvdiaNIMApiKey',
label: 'NVIDIA NGC API Key',
name: 'nvidiaNIMApiKey',
type: 'password'
}
]
}
}
module.exports = { credClass: NvdiaNIMApi }
module.exports = { credClass: NvidiaNIMApi }

View File

@ -0,0 +1,39 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class OpikApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Opik API'
this.name = 'opikApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://www.comet.com/docs/opik/tracing/sdk_configuration">Opik documentation</a> on how to configure Opik credentials'
this.inputs = [
{
label: 'API Key',
name: 'opikApiKey',
type: 'password',
placeholder: '<OPIK_API_KEY>'
},
{
label: 'URL',
name: 'opikUrl',
type: 'string',
placeholder: 'https://www.comet.com/opik/api'
},
{
label: 'Workspace',
name: 'opikWorkspace',
type: 'string',
placeholder: 'default'
}
]
}
}
module.exports = { credClass: OpikApi }

View File

@ -0,0 +1,27 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class PerplexityApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Perplexity API'
this.name = 'perplexityApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://docs.perplexity.ai/docs/getting-started">official guide</a> on how to get API key'
this.inputs = [
{
label: 'Perplexity API Key',
name: 'perplexityApiKey',
type: 'password',
placeholder: '<PERPLEXITY_API_KEY>'
}
]
}
}
module.exports = { credClass: PerplexityApi }

View File

@ -0,0 +1,25 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class PostgresUrl implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Postgres URL'
this.name = 'PostgresUrl'
this.version = 1.0
this.inputs = [
{
label: 'Postgres URL',
name: 'postgresUrl',
type: 'string',
placeholder: 'postgresql://localhost/mydb'
}
]
}
}
module.exports = { credClass: PostgresUrl }

View File

@ -0,0 +1,32 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class SlackApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Slack API'
this.name = 'slackApi'
this.version = 1.0
this.description =
'Refer to <a target="_blank" href="https://github.com/modelcontextprotocol/servers/tree/main/src/slack">official guide</a> on how to get botToken and teamId on Slack'
this.inputs = [
{
label: 'Bot Token',
name: 'botToken',
type: 'password'
},
{
label: 'Team ID',
name: 'teamId',
type: 'string',
placeholder: '<SLACK_TEAM_ID>'
}
]
}
}
module.exports = { credClass: SlackApi }

View File

@ -10,8 +10,8 @@ class TavilyApi implements INodeCredential {
constructor() {
this.label = 'Tavily API'
this.name = 'tavilyApi'
this.version = 1.0
this.description = 'Tavily API is a real-time API to access Google search results'
this.version = 1.1
this.description = 'Tavily API is a search engine designed for LLMs and AI agents'
this.inputs = [
{
label: 'Tavily Api Key',

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,350 @@
import { CommonType, ICommonObject, ICondition, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
class Condition_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Condition'
this.name = 'conditionAgentflow'
this.version = 1.0
this.type = 'Condition'
this.category = 'Agent Flows'
this.description = `Split flows based on If Else conditions`
this.baseClasses = [this.type]
this.color = '#FFB938'
this.inputs = [
{
label: 'Conditions',
name: 'conditions',
type: 'array',
description: 'Values to compare',
acceptVariable: true,
default: [
{
type: 'string',
value1: '',
operation: 'equal',
value2: ''
}
],
array: [
{
label: 'Type',
name: 'type',
type: 'options',
options: [
{
label: 'String',
name: 'string'
},
{
label: 'Number',
name: 'number'
},
{
label: 'Boolean',
name: 'boolean'
}
],
default: 'string'
},
/////////////////////////////////////// STRING ////////////////////////////////////////
{
label: 'Value 1',
name: 'value1',
type: 'string',
default: '',
description: 'First value to be compared with',
acceptVariable: true,
show: {
'conditions[$index].type': 'string'
}
},
{
label: 'Operation',
name: 'operation',
type: 'options',
options: [
{
label: 'Contains',
name: 'contains'
},
{
label: 'Ends With',
name: 'endsWith'
},
{
label: 'Equal',
name: 'equal'
},
{
label: 'Not Contains',
name: 'notContains'
},
{
label: 'Not Equal',
name: 'notEqual'
},
{
label: 'Regex',
name: 'regex'
},
{
label: 'Starts With',
name: 'startsWith'
},
{
label: 'Is Empty',
name: 'isEmpty'
},
{
label: 'Not Empty',
name: 'notEmpty'
}
],
default: 'equal',
description: 'Type of operation',
show: {
'conditions[$index].type': 'string'
}
},
{
label: 'Value 2',
name: 'value2',
type: 'string',
default: '',
description: 'Second value to be compared with',
acceptVariable: true,
show: {
'conditions[$index].type': 'string'
},
hide: {
'conditions[$index].operation': ['isEmpty', 'notEmpty']
}
},
/////////////////////////////////////// NUMBER ////////////////////////////////////////
{
label: 'Value 1',
name: 'value1',
type: 'number',
default: '',
description: 'First value to be compared with',
acceptVariable: true,
show: {
'conditions[$index].type': 'number'
}
},
{
label: 'Operation',
name: 'operation',
type: 'options',
options: [
{
label: 'Smaller',
name: 'smaller'
},
{
label: 'Smaller Equal',
name: 'smallerEqual'
},
{
label: 'Equal',
name: 'equal'
},
{
label: 'Not Equal',
name: 'notEqual'
},
{
label: 'Larger',
name: 'larger'
},
{
label: 'Larger Equal',
name: 'largerEqual'
},
{
label: 'Is Empty',
name: 'isEmpty'
},
{
label: 'Not Empty',
name: 'notEmpty'
}
],
default: 'equal',
description: 'Type of operation',
show: {
'conditions[$index].type': 'number'
}
},
{
label: 'Value 2',
name: 'value2',
type: 'number',
default: 0,
description: 'Second value to be compared with',
acceptVariable: true,
show: {
'conditions[$index].type': 'number'
}
},
/////////////////////////////////////// BOOLEAN ////////////////////////////////////////
{
label: 'Value 1',
name: 'value1',
type: 'boolean',
default: false,
description: 'First value to be compared with',
show: {
'conditions[$index].type': 'boolean'
}
},
{
label: 'Operation',
name: 'operation',
type: 'options',
options: [
{
label: 'Equal',
name: 'equal'
},
{
label: 'Not Equal',
name: 'notEqual'
}
],
default: 'equal',
description: 'Type of operation',
show: {
'conditions[$index].type': 'boolean'
}
},
{
label: 'Value 2',
name: 'value2',
type: 'boolean',
default: false,
description: 'Second value to be compared with',
show: {
'conditions[$index].type': 'boolean'
}
}
]
}
]
this.outputs = [
{
label: '0',
name: '0',
description: 'Condition 0'
},
{
label: '1',
name: '1',
description: 'Else'
}
]
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const state = options.agentflowRuntime?.state as ICommonObject
const compareOperationFunctions: {
[key: string]: (value1: CommonType, value2: CommonType) => boolean
} = {
contains: (value1: CommonType, value2: CommonType) => (value1 || '').toString().includes((value2 || '').toString()),
notContains: (value1: CommonType, value2: CommonType) => !(value1 || '').toString().includes((value2 || '').toString()),
endsWith: (value1: CommonType, value2: CommonType) => (value1 as string).endsWith(value2 as string),
equal: (value1: CommonType, value2: CommonType) => value1 === value2,
notEqual: (value1: CommonType, value2: CommonType) => value1 !== value2,
larger: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) > (Number(value2) || 0),
largerEqual: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) >= (Number(value2) || 0),
smaller: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) < (Number(value2) || 0),
smallerEqual: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) <= (Number(value2) || 0),
startsWith: (value1: CommonType, value2: CommonType) => (value1 as string).startsWith(value2 as string),
isEmpty: (value1: CommonType) => [undefined, null, ''].includes(value1 as string),
notEmpty: (value1: CommonType) => ![undefined, null, ''].includes(value1 as string)
}
const _conditions = nodeData.inputs?.conditions
const conditions: ICondition[] = typeof _conditions === 'string' ? JSON.parse(_conditions) : _conditions
const initialConditions = { ...conditions }
for (const condition of conditions) {
const _value1 = condition.value1
const _value2 = condition.value2
const operation = condition.operation
let value1: CommonType
let value2: CommonType
switch (condition.type) {
case 'boolean':
value1 = _value1
value2 = _value2
break
case 'number':
value1 = parseFloat(_value1 as string) || 0
value2 = parseFloat(_value2 as string) || 0
break
default: // string
value1 = _value1 as string
value2 = _value2 as string
}
const compareOperationResult = compareOperationFunctions[operation](value1, value2)
if (compareOperationResult) {
// find the matching condition
const conditionIndex = conditions.findIndex((c) => JSON.stringify(c) === JSON.stringify(condition))
// add isFulfilled to the condition
if (conditionIndex > -1) {
conditions[conditionIndex] = { ...condition, isFulfilled: true }
}
break
}
}
// If no condition is fullfilled, add isFulfilled to the ELSE condition
const dummyElseConditionData = {
type: 'string',
value1: '',
operation: 'equal',
value2: ''
}
if (!conditions.some((c) => c.isFulfilled)) {
conditions.push({
...dummyElseConditionData,
isFulfilled: true
})
} else {
conditions.push({
...dummyElseConditionData,
isFulfilled: false
})
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: { conditions: initialConditions },
output: { conditions },
state
}
return returnOutput
}
}
module.exports = { nodeClass: Condition_Agentflow }

View File

@ -0,0 +1,600 @@
import { AnalyticHandler } from '../../../src/handler'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages'
import {
getPastChatHistoryImageMessages,
getUniqueImageMessages,
processMessagesWithImages,
replaceBase64ImagesWithFileReferences
} from '../utils'
import { CONDITION_AGENT_SYSTEM_PROMPT, DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
class ConditionAgent_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Condition Agent'
this.name = 'conditionAgentAgentflow'
this.version = 1.0
this.type = 'ConditionAgent'
this.category = 'Agent Flows'
this.description = `Utilize an agent to split flows based on dynamic conditions`
this.baseClasses = [this.type]
this.color = '#ff8fab'
this.inputs = [
{
label: 'Model',
name: 'conditionAgentModel',
type: 'asyncOptions',
loadMethod: 'listModels',
loadConfig: true
},
{
label: 'Instructions',
name: 'conditionAgentInstructions',
type: 'string',
description: 'A general instructions of what the condition agent should do',
rows: 4,
acceptVariable: true,
placeholder: 'Determine if the user is interested in learning about AI'
},
{
label: 'Input',
name: 'conditionAgentInput',
type: 'string',
description: 'Input to be used for the condition agent',
rows: 4,
acceptVariable: true,
default: '<p><span class="variable" data-type="mention" data-id="question" data-label="question">{{ question }}</span> </p>'
},
{
label: 'Scenarios',
name: 'conditionAgentScenarios',
description: 'Define the scenarios that will be used as the conditions to split the flow',
type: 'array',
array: [
{
label: 'Scenario',
name: 'scenario',
type: 'string',
placeholder: 'User is asking for a pizza'
}
],
default: [
{
scenario: ''
},
{
scenario: ''
}
]
}
/*{
label: 'Enable Memory',
name: 'conditionAgentEnableMemory',
type: 'boolean',
description: 'Enable memory for the conversation thread',
default: true,
optional: true
},
{
label: 'Memory Type',
name: 'conditionAgentMemoryType',
type: 'options',
options: [
{
label: 'All Messages',
name: 'allMessages',
description: 'Retrieve all messages from the conversation'
},
{
label: 'Window Size',
name: 'windowSize',
description: 'Uses a fixed window size to surface the last N messages'
},
{
label: 'Conversation Summary',
name: 'conversationSummary',
description: 'Summarizes the whole conversation'
},
{
label: 'Conversation Summary Buffer',
name: 'conversationSummaryBuffer',
description: 'Summarize conversations once token limit is reached. Default to 2000'
}
],
optional: true,
default: 'allMessages',
show: {
conditionAgentEnableMemory: true
}
},
{
label: 'Window Size',
name: 'conditionAgentMemoryWindowSize',
type: 'number',
default: '20',
description: 'Uses a fixed window size to surface the last N messages',
show: {
conditionAgentMemoryType: 'windowSize'
}
},
{
label: 'Max Token Limit',
name: 'conditionAgentMemoryMaxTokenLimit',
type: 'number',
default: '2000',
description: 'Summarize conversations once token limit is reached. Default to 2000',
show: {
conditionAgentMemoryType: 'conversationSummaryBuffer'
}
}*/
]
this.outputs = [
{
label: '0',
name: '0',
description: 'Condition 0'
},
{
label: '1',
name: '1',
description: 'Else'
}
]
}
//@ts-ignore
loadMethods = {
async listModels(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const componentNodes = options.componentNodes as {
[key: string]: INode
}
const returnOptions: INodeOptionsValue[] = []
for (const nodeName in componentNodes) {
const componentNode = componentNodes[nodeName]
if (componentNode.category === 'Chat Models') {
if (componentNode.tags?.includes('LlamaIndex')) {
continue
}
returnOptions.push({
label: componentNode.label,
name: nodeName,
imageSrc: componentNode.icon
})
}
}
return returnOptions
}
}
private parseJsonMarkdown(jsonString: string): any {
// Strip whitespace
jsonString = jsonString.trim()
const starts = ['```json', '```', '``', '`', '{']
const ends = ['```', '``', '`', '}']
let startIndex = -1
let endIndex = -1
// Find start of JSON
for (const s of starts) {
startIndex = jsonString.indexOf(s)
if (startIndex !== -1) {
if (jsonString[startIndex] !== '{') {
startIndex += s.length
}
break
}
}
// Find end of JSON
if (startIndex !== -1) {
for (const e of ends) {
endIndex = jsonString.lastIndexOf(e, jsonString.length)
if (endIndex !== -1) {
if (jsonString[endIndex] === '}') {
endIndex += 1
}
break
}
}
}
if (startIndex !== -1 && endIndex !== -1 && startIndex < endIndex) {
const extractedContent = jsonString.slice(startIndex, endIndex).trim()
try {
return JSON.parse(extractedContent)
} catch (error) {
throw new Error(`Invalid JSON object. Error: ${error}`)
}
}
throw new Error('Could not find JSON block in the output.')
}
async run(nodeData: INodeData, question: string, options: ICommonObject): Promise<any> {
let llmIds: ICommonObject | undefined
let analyticHandlers = options.analyticHandlers as AnalyticHandler
try {
const abortController = options.abortController as AbortController
// Extract input parameters
const model = nodeData.inputs?.conditionAgentModel as string
const modelConfig = nodeData.inputs?.conditionAgentModelConfig as ICommonObject
if (!model) {
throw new Error('Model is required')
}
const conditionAgentInput = nodeData.inputs?.conditionAgentInput as string
let input = conditionAgentInput || question
const conditionAgentInstructions = nodeData.inputs?.conditionAgentInstructions as string
// Extract memory and configuration options
const enableMemory = nodeData.inputs?.conditionAgentEnableMemory as boolean
const memoryType = nodeData.inputs?.conditionAgentMemoryType as string
const _conditionAgentScenarios = nodeData.inputs?.conditionAgentScenarios as { scenario: string }[]
// Extract runtime state and history
const state = options.agentflowRuntime?.state as ICommonObject
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
// Initialize the LLM model instance
const nodeInstanceFilePath = options.componentNodes[model].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newLLMNodeInstance = new nodeModule.nodeClass()
const newNodeData = {
...nodeData,
credential: modelConfig['FLOWISE_CREDENTIAL_ID'],
inputs: {
...nodeData.inputs,
...modelConfig
}
}
let llmNodeInstance = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel
const isStructuredOutput =
_conditionAgentScenarios && Array.isArray(_conditionAgentScenarios) && _conditionAgentScenarios.length > 0
if (!isStructuredOutput) {
throw new Error('Scenarios are required')
}
// Prepare messages array
const messages: BaseMessageLike[] = [
{
role: 'system',
content: CONDITION_AGENT_SYSTEM_PROMPT
},
{
role: 'user',
content: `{"input": "Hello", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"}`
},
{
role: 'assistant',
content: `\`\`\`json\n{"output": "default"}\n\`\`\``
},
{
role: 'user',
content: `{"input": "What is AIGC?", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"}`
},
{
role: 'assistant',
content: `\`\`\`json\n{"output": "user is asking about AI"}\n\`\`\``
},
{
role: 'user',
content: `{"input": "Can you explain deep learning?", "scenarios": ["user is interested in AI topics", "default"], "instruction": "Determine if the user is interested in learning about AI"}`
},
{
role: 'assistant',
content: `\`\`\`json\n{"output": "user is interested in AI topics"}\n\`\`\``
}
]
// Use to store messages with image file references as we do not want to store the base64 data into database
let runtimeImageMessagesWithFileRef: BaseMessageLike[] = []
// Use to keep track of past messages with image file references
let pastImageMessagesWithFileRef: BaseMessageLike[] = []
input = `{"input": ${input}, "scenarios": ${JSON.stringify(
_conditionAgentScenarios.map((scenario) => scenario.scenario)
)}, "instruction": ${conditionAgentInstructions}}`
// Handle memory management if enabled
if (enableMemory) {
await this.handleMemory({
messages,
memoryType,
pastChatHistory,
runtimeChatHistory,
llmNodeInstance,
nodeData,
input,
abortController,
options,
modelConfig,
runtimeImageMessagesWithFileRef,
pastImageMessagesWithFileRef
})
} else {
/*
* If this is the first node:
* - Add images to messages if exist
*/
if (!runtimeChatHistory.length && options.uploads) {
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
if (imageContents) {
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
messages.push(imageMessageWithBase64)
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
}
}
messages.push({
role: 'user',
content: input
})
}
// Initialize response and determine if streaming is possible
let response: AIMessageChunk = new AIMessageChunk('')
// Start analytics
if (analyticHandlers && options.parentTraceIds) {
const llmLabel = options?.componentNodes?.[model]?.label || model
llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds)
}
// Track execution time
const startTime = Date.now()
response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
// Calculate execution time
const endTime = Date.now()
const timeDelta = endTime - startTime
// End analytics tracking
if (analyticHandlers && llmIds) {
await analyticHandlers.onLLMEnd(
llmIds,
typeof response.content === 'string' ? response.content : JSON.stringify(response.content)
)
}
let calledOutputName = 'default'
try {
const parsedResponse = this.parseJsonMarkdown(response.content as string)
if (!parsedResponse.output) {
throw new Error('Missing "output" key in response')
}
calledOutputName = parsedResponse.output
} catch (error) {
console.warn(`Failed to parse LLM response: ${error}. Using default output.`)
}
// Clean up empty inputs
for (const key in nodeData.inputs) {
if (nodeData.inputs[key] === '') {
delete nodeData.inputs[key]
}
}
// Find the first exact match
const matchedScenarioIndex = _conditionAgentScenarios.findIndex(
(scenario) => calledOutputName.toLowerCase() === scenario.scenario.toLowerCase()
)
const conditions = _conditionAgentScenarios.map((scenario, index) => {
return {
output: scenario.scenario,
isFulfilled: index === matchedScenarioIndex
}
})
// Replace the actual messages array with one that includes the file references for images instead of base64 data
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
messages,
runtimeImageMessagesWithFileRef,
pastImageMessagesWithFileRef
)
// Only add to runtime chat history if this is the first node
const inputMessages = []
if (!runtimeChatHistory.length) {
if (runtimeImageMessagesWithFileRef.length) {
inputMessages.push(...runtimeImageMessagesWithFileRef)
}
if (input && typeof input === 'string') {
inputMessages.push({ role: 'user', content: question })
}
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: { messages: messagesWithFileReferences },
output: {
conditions,
content: typeof response.content === 'string' ? response.content : JSON.stringify(response.content),
timeMetadata: {
start: startTime,
end: endTime,
delta: timeDelta
}
},
state,
chatHistory: [...inputMessages]
}
return returnOutput
} catch (error) {
if (options.analyticHandlers && llmIds) {
await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error))
}
if (error instanceof Error && error.message === 'Aborted') {
throw error
}
throw new Error(`Error in Condition Agent node: ${error instanceof Error ? error.message : String(error)}`)
}
}
/**
* Handles memory management based on the specified memory type
*/
private async handleMemory({
messages,
memoryType,
pastChatHistory,
runtimeChatHistory,
llmNodeInstance,
nodeData,
input,
abortController,
options,
modelConfig,
runtimeImageMessagesWithFileRef,
pastImageMessagesWithFileRef
}: {
messages: BaseMessageLike[]
memoryType: string
pastChatHistory: BaseMessageLike[]
runtimeChatHistory: BaseMessageLike[]
llmNodeInstance: BaseChatModel
nodeData: INodeData
input: string
abortController: AbortController
options: ICommonObject
modelConfig: ICommonObject
runtimeImageMessagesWithFileRef: BaseMessageLike[]
pastImageMessagesWithFileRef: BaseMessageLike[]
}): Promise<void> {
const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options)
pastChatHistory = updatedPastMessages
pastImageMessagesWithFileRef.push(...transformedPastMessages)
let pastMessages = [...pastChatHistory, ...runtimeChatHistory]
if (!runtimeChatHistory.length) {
/*
* If this is the first node:
* - Add images to messages if exist
*/
if (options.uploads) {
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
if (imageContents) {
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
pastMessages.push(imageMessageWithBase64)
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
}
}
}
const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options)
pastMessages = updatedMessages
pastImageMessagesWithFileRef.push(...transformedMessages)
if (pastMessages.length > 0) {
if (memoryType === 'windowSize') {
// Window memory: Keep the last N messages
const windowSize = nodeData.inputs?.conditionAgentMemoryWindowSize as number
const windowedMessages = pastMessages.slice(-windowSize * 2)
messages.push(...windowedMessages)
} else if (memoryType === 'conversationSummary') {
// Summary memory: Summarize all past messages
const summary = await llmNodeInstance.invoke(
[
{
role: 'user',
content: DEFAULT_SUMMARIZER_TEMPLATE.replace(
'{conversation}',
pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
)
}
],
{ signal: abortController?.signal }
)
messages.push({ role: 'assistant', content: summary.content as string })
} else if (memoryType === 'conversationSummaryBuffer') {
// Summary buffer: Summarize messages that exceed token limit
await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController)
} else {
// Default: Use all messages
messages.push(...pastMessages)
}
}
messages.push({
role: 'user',
content: input
})
}
/**
* Handles conversation summary buffer memory type
*/
private async handleSummaryBuffer(
messages: BaseMessageLike[],
pastMessages: BaseMessageLike[],
llmNodeInstance: BaseChatModel,
nodeData: INodeData,
abortController: AbortController
): Promise<void> {
const maxTokenLimit = (nodeData.inputs?.conditionAgentMemoryMaxTokenLimit as number) || 2000
// Convert past messages to a format suitable for token counting
const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
const tokenCount = await llmNodeInstance.getNumTokens(messagesString)
if (tokenCount > maxTokenLimit) {
// Calculate how many messages to summarize (messages that exceed the token limit)
let currBufferLength = tokenCount
const messagesToSummarize = []
const remainingMessages = [...pastMessages]
// Remove messages from the beginning until we're under the token limit
while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) {
const poppedMessage = remainingMessages.shift()
if (poppedMessage) {
messagesToSummarize.push(poppedMessage)
// Recalculate token count for remaining messages
const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString)
}
}
// Summarize the messages that were removed
const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
const summary = await llmNodeInstance.invoke(
[
{
role: 'user',
content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString)
}
],
{ signal: abortController?.signal }
)
// Add summary as a system message at the beginning, then add remaining messages
messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` })
messages.push(...remainingMessages)
} else {
// If under token limit, use all messages
messages.push(...pastMessages)
}
}
}
module.exports = { nodeClass: ConditionAgent_Agentflow }

View File

@ -0,0 +1,241 @@
import { DataSource } from 'typeorm'
import {
ICommonObject,
IDatabaseEntity,
INode,
INodeData,
INodeOptionsValue,
INodeParams,
IServerSideEventStreamer
} from '../../../src/Interface'
import { availableDependencies, defaultAllowBuiltInDep, getVars, prepareSandboxVars } from '../../../src/utils'
import { NodeVM } from '@flowiseai/nodevm'
import { updateFlowState } from '../utils'
interface ICustomFunctionInputVariables {
variableName: string
variableValue: string
}
const exampleFunc = `/*
* You can use any libraries imported in Flowise
* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid
* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state
* You can get custom variables: $vars.<variable-name>
* Must return a string value at the end of function
*/
const fetch = require('node-fetch');
const url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41&current_weather=true';
const options = {
method: 'GET',
headers: {
'Content-Type': 'application/json'
}
};
try {
const response = await fetch(url, options);
const text = await response.text();
return text;
} catch (error) {
console.error(error);
return '';
}`
class CustomFunction_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
hideOutput: boolean
hint: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Custom Function'
this.name = 'customFunctionAgentflow'
this.version = 1.0
this.type = 'CustomFunction'
this.category = 'Agent Flows'
this.description = 'Execute custom function'
this.baseClasses = [this.type]
this.color = '#E4B7FF'
this.inputs = [
{
label: 'Input Variables',
name: 'customFunctionInputVariables',
description: 'Input variables can be used in the function with prefix $. For example: $foo',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Variable Name',
name: 'variableName',
type: 'string'
},
{
label: 'Variable Value',
name: 'variableValue',
type: 'string',
acceptVariable: true
}
]
},
{
label: 'Javascript Function',
name: 'customFunctionJavascriptFunction',
type: 'code',
codeExample: exampleFunc,
description: 'The function to execute. Must return a string or an object that can be converted to a string.'
},
{
label: 'Update Flow State',
name: 'customFunctionUpdateState',
description: 'Update runtime state during the execution of the workflow',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Key',
name: 'key',
type: 'asyncOptions',
loadMethod: 'listRuntimeStateKeys',
freeSolo: true
},
{
label: 'Value',
name: 'value',
type: 'string',
acceptVariable: true,
acceptNodeOutputAsVariable: true
}
]
}
]
}
//@ts-ignore
loadMethods = {
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const previousNodes = options.previousNodes as ICommonObject[]
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
return state.map((item) => ({ label: item.key, name: item.key }))
}
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const javascriptFunction = nodeData.inputs?.customFunctionJavascriptFunction as string
const functionInputVariables = nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]
const _customFunctionUpdateState = nodeData.inputs?.customFunctionUpdateState
const state = options.agentflowRuntime?.state as ICommonObject
const chatId = options.chatId as string
const isLastNode = options.isLastNode as boolean
const isStreamable = isLastNode && options.sseStreamer !== undefined
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
// Update flow state if needed
let newState = { ...state }
if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) {
newState = updateFlowState(state, _customFunctionUpdateState)
}
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input
}
let sandbox: any = {
$input: input,
util: undefined,
Symbol: undefined,
child_process: undefined,
fs: undefined,
process: undefined
}
sandbox['$vars'] = prepareSandboxVars(variables)
sandbox['$flow'] = flow
for (const item of functionInputVariables) {
const variableName = item.variableName
const variableValue = item.variableValue
sandbox[`$${variableName}`] = variableValue
}
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
: defaultAllowBuiltInDep
const externalDeps = process.env.TOOL_FUNCTION_EXTERNAL_DEP ? process.env.TOOL_FUNCTION_EXTERNAL_DEP.split(',') : []
const deps = availableDependencies.concat(externalDeps)
const nodeVMOptions = {
console: 'inherit',
sandbox,
require: {
external: { modules: deps },
builtin: builtinDeps
},
eval: false,
wasm: false,
timeout: 10000
} as any
const vm = new NodeVM(nodeVMOptions)
try {
const response = await vm.run(`module.exports = async function() {${javascriptFunction}}()`, __dirname)
let finalOutput = response
if (typeof response === 'object') {
finalOutput = JSON.stringify(response, null, 2)
}
if (isStreamable) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
sseStreamer.streamTokenEvent(chatId, finalOutput)
}
// Process template variables in state
if (newState && Object.keys(newState).length > 0) {
for (const key in newState) {
if (newState[key].toString().includes('{{ output }}')) {
newState[key] = finalOutput
}
}
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {
inputVariables: functionInputVariables,
code: javascriptFunction
},
output: {
content: finalOutput
},
state: newState
}
return returnOutput
} catch (e) {
throw new Error(e)
}
}
}
module.exports = { nodeClass: CustomFunction_Agentflow }

View File

@ -0,0 +1,67 @@
import { ICommonObject, INode, INodeData, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
class DirectReply_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
hideOutput: boolean
hint: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Direct Reply'
this.name = 'directReplyAgentflow'
this.version = 1.0
this.type = 'DirectReply'
this.category = 'Agent Flows'
this.description = 'Directly reply to the user with a message'
this.baseClasses = [this.type]
this.color = '#4DDBBB'
this.hideOutput = true
this.inputs = [
{
label: 'Message',
name: 'directReplyMessage',
type: 'string',
rows: 4,
acceptVariable: true
}
]
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const directReplyMessage = nodeData.inputs?.directReplyMessage as string
const state = options.agentflowRuntime?.state as ICommonObject
const chatId = options.chatId as string
const isLastNode = options.isLastNode as boolean
const isStreamable = isLastNode && options.sseStreamer !== undefined
if (isStreamable) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
sseStreamer.streamTokenEvent(chatId, directReplyMessage)
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {},
output: {
content: directReplyMessage
},
state
}
return returnOutput
}
}
module.exports = { nodeClass: DirectReply_Agentflow }

View File

@ -0,0 +1,297 @@
import {
ICommonObject,
IDatabaseEntity,
INode,
INodeData,
INodeOptionsValue,
INodeParams,
IServerSideEventStreamer
} from '../../../src/Interface'
import axios, { AxiosRequestConfig } from 'axios'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
import { DataSource } from 'typeorm'
import { BaseMessageLike } from '@langchain/core/messages'
import { updateFlowState } from '../utils'
class ExecuteFlow_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Execute Flow'
this.name = 'executeFlowAgentflow'
this.version = 1.0
this.type = 'ExecuteFlow'
this.category = 'Agent Flows'
this.description = 'Execute another flow'
this.baseClasses = [this.type]
this.color = '#a3b18a'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['chatflowApi'],
optional: true
}
this.inputs = [
{
label: 'Select Flow',
name: 'executeFlowSelectedFlow',
type: 'asyncOptions',
loadMethod: 'listFlows'
},
{
label: 'Input',
name: 'executeFlowInput',
type: 'string',
rows: 4,
acceptVariable: true
},
{
label: 'Override Config',
name: 'executeFlowOverrideConfig',
description: 'Override the config passed to the flow',
type: 'json',
optional: true
},
{
label: 'Base URL',
name: 'executeFlowBaseURL',
type: 'string',
description:
'Base URL to Flowise. By default, it is the URL of the incoming request. Useful when you need to execute flow through an alternative route.',
placeholder: 'http://localhost:3000',
optional: true
},
{
label: 'Return Response As',
name: 'executeFlowReturnResponseAs',
type: 'options',
options: [
{
label: 'User Message',
name: 'userMessage'
},
{
label: 'Assistant Message',
name: 'assistantMessage'
}
],
default: 'userMessage'
},
{
label: 'Update Flow State',
name: 'executeFlowUpdateState',
description: 'Update runtime state during the execution of the workflow',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Key',
name: 'key',
type: 'asyncOptions',
loadMethod: 'listRuntimeStateKeys',
freeSolo: true
},
{
label: 'Value',
name: 'value',
type: 'string',
acceptVariable: true,
acceptNodeOutputAsVariable: true
}
]
}
]
}
//@ts-ignore
loadMethods = {
async listFlows(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const returnData: INodeOptionsValue[] = []
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
if (appDataSource === undefined || !appDataSource) {
return returnData
}
const chatflows = await appDataSource.getRepository(databaseEntities['ChatFlow']).find()
for (let i = 0; i < chatflows.length; i += 1) {
let cfType = 'Chatflow'
if (chatflows[i].type === 'AGENTFLOW') {
cfType = 'Agentflow V2'
} else if (chatflows[i].type === 'MULTIAGENT') {
cfType = 'Agentflow V1'
}
const data = {
label: chatflows[i].name,
name: chatflows[i].id,
description: cfType
} as INodeOptionsValue
returnData.push(data)
}
// order by label
return returnData.sort((a, b) => a.label.localeCompare(b.label))
},
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const previousNodes = options.previousNodes as ICommonObject[]
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
return state.map((item) => ({ label: item.key, name: item.key }))
}
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const baseURL = (nodeData.inputs?.executeFlowBaseURL as string) || (options.baseURL as string)
const selectedFlowId = nodeData.inputs?.executeFlowSelectedFlow as string
const flowInput = nodeData.inputs?.executeFlowInput as string
const returnResponseAs = nodeData.inputs?.executeFlowReturnResponseAs as string
const _executeFlowUpdateState = nodeData.inputs?.executeFlowUpdateState
const overrideConfig =
typeof nodeData.inputs?.executeFlowOverrideConfig === 'string' &&
nodeData.inputs.executeFlowOverrideConfig.startsWith('{') &&
nodeData.inputs.executeFlowOverrideConfig.endsWith('}')
? JSON.parse(nodeData.inputs.executeFlowOverrideConfig)
: nodeData.inputs?.executeFlowOverrideConfig
const state = options.agentflowRuntime?.state as ICommonObject
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
const isLastNode = options.isLastNode as boolean
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
try {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const chatflowApiKey = getCredentialParam('chatflowApiKey', credentialData, nodeData)
if (selectedFlowId === options.chatflowid) throw new Error('Cannot call the same agentflow!')
let headers: Record<string, string> = {
'Content-Type': 'application/json'
}
if (chatflowApiKey) headers = { ...headers, Authorization: `Bearer ${chatflowApiKey}` }
const finalUrl = `${baseURL}/api/v1/prediction/${selectedFlowId}`
const requestConfig: AxiosRequestConfig = {
method: 'POST',
url: finalUrl,
headers,
data: {
question: flowInput,
chatId: options.chatId,
overrideConfig
}
}
const response = await axios(requestConfig)
let resultText = ''
if (response.data.text) resultText = response.data.text
else if (response.data.json) resultText = '```json\n' + JSON.stringify(response.data.json, null, 2)
else resultText = JSON.stringify(response.data, null, 2)
if (isLastNode && sseStreamer) {
sseStreamer.streamTokenEvent(options.chatId, resultText)
}
// Update flow state if needed
let newState = { ...state }
if (_executeFlowUpdateState && Array.isArray(_executeFlowUpdateState) && _executeFlowUpdateState.length > 0) {
newState = updateFlowState(state, _executeFlowUpdateState)
}
// Process template variables in state
if (newState && Object.keys(newState).length > 0) {
for (const key in newState) {
if (newState[key].toString().includes('{{ output }}')) {
newState[key] = resultText
}
}
}
// Only add to runtime chat history if this is the first node
const inputMessages = []
if (!runtimeChatHistory.length) {
inputMessages.push({ role: 'user', content: flowInput })
}
let returnRole = 'user'
if (returnResponseAs === 'assistantMessage') {
returnRole = 'assistant'
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {
messages: [
{
role: 'user',
content: flowInput
}
]
},
output: {
content: resultText
},
state: newState,
chatHistory: [
...inputMessages,
{
role: returnRole,
content: resultText,
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
}
]
}
return returnOutput
} catch (error) {
console.error('ExecuteFlow Error:', error)
// Format error response
const errorResponse: any = {
id: nodeData.id,
name: this.name,
input: {
messages: [
{
role: 'user',
content: flowInput
}
]
},
error: {
name: error.name || 'Error',
message: error.message || 'An error occurred during the execution of the flow'
},
state
}
// Add more error details if available
if (error.response) {
errorResponse.error.status = error.response.status
errorResponse.error.statusText = error.response.statusText
errorResponse.error.data = error.response.data
errorResponse.error.headers = error.response.headers
}
throw new Error(error)
}
}
}
module.exports = { nodeClass: ExecuteFlow_Agentflow }

View File

@ -0,0 +1,368 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import axios, { AxiosRequestConfig, Method, ResponseType } from 'axios'
import FormData from 'form-data'
import * as querystring from 'querystring'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
class HTTP_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'HTTP'
this.name = 'httpAgentflow'
this.version = 1.0
this.type = 'HTTP'
this.category = 'Agent Flows'
this.description = 'Send a HTTP request'
this.baseClasses = [this.type]
this.color = '#FF7F7F'
this.credential = {
label: 'HTTP Credential',
name: 'credential',
type: 'credential',
credentialNames: ['httpBasicAuth', 'httpBearerToken', 'httpApiKey'],
optional: true
}
this.inputs = [
{
label: 'Method',
name: 'method',
type: 'options',
options: [
{
label: 'GET',
name: 'GET'
},
{
label: 'POST',
name: 'POST'
},
{
label: 'PUT',
name: 'PUT'
},
{
label: 'DELETE',
name: 'DELETE'
},
{
label: 'PATCH',
name: 'PATCH'
}
],
default: 'GET'
},
{
label: 'URL',
name: 'url',
type: 'string'
},
{
label: 'Headers',
name: 'headers',
type: 'array',
array: [
{
label: 'Key',
name: 'key',
type: 'string',
default: ''
},
{
label: 'Value',
name: 'value',
type: 'string',
default: ''
}
],
optional: true
},
{
label: 'Query Params',
name: 'queryParams',
type: 'array',
array: [
{
label: 'Key',
name: 'key',
type: 'string',
default: ''
},
{
label: 'Value',
name: 'value',
type: 'string',
default: ''
}
],
optional: true
},
{
label: 'Body Type',
name: 'bodyType',
type: 'options',
options: [
{
label: 'JSON',
name: 'json'
},
{
label: 'Raw',
name: 'raw'
},
{
label: 'Form Data',
name: 'formData'
},
{
label: 'x-www-form-urlencoded',
name: 'xWwwFormUrlencoded'
}
],
optional: true
},
{
label: 'Body',
name: 'body',
type: 'string',
acceptVariable: true,
rows: 4,
show: {
bodyType: ['raw', 'json']
},
optional: true
},
{
label: 'Body',
name: 'body',
type: 'array',
show: {
bodyType: ['xWwwFormUrlencoded', 'formData']
},
array: [
{
label: 'Key',
name: 'key',
type: 'string',
default: ''
},
{
label: 'Value',
name: 'value',
type: 'string',
default: ''
}
],
optional: true
},
{
label: 'Response Type',
name: 'responseType',
type: 'options',
options: [
{
label: 'JSON',
name: 'json'
},
{
label: 'Text',
name: 'text'
},
{
label: 'Array Buffer',
name: 'arraybuffer'
},
{
label: 'Raw (Base64)',
name: 'base64'
}
],
optional: true
}
]
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const method = nodeData.inputs?.method as 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'
const url = nodeData.inputs?.url as string
const headers = nodeData.inputs?.headers as ICommonObject
const queryParams = nodeData.inputs?.queryParams as ICommonObject
const bodyType = nodeData.inputs?.bodyType as 'json' | 'raw' | 'formData' | 'xWwwFormUrlencoded'
const body = nodeData.inputs?.body as ICommonObject | string | ICommonObject[]
const responseType = nodeData.inputs?.responseType as 'json' | 'text' | 'arraybuffer' | 'base64'
const state = options.agentflowRuntime?.state as ICommonObject
try {
// Prepare headers
const requestHeaders: Record<string, string> = {}
// Add headers from inputs
if (headers && Array.isArray(headers)) {
for (const header of headers) {
if (header.key && header.value) {
requestHeaders[header.key] = header.value
}
}
}
// Add credentials if provided
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
if (credentialData && Object.keys(credentialData).length !== 0) {
const basicAuthUsername = getCredentialParam('username', credentialData, nodeData)
const basicAuthPassword = getCredentialParam('password', credentialData, nodeData)
const bearerToken = getCredentialParam('token', credentialData, nodeData)
const apiKeyName = getCredentialParam('key', credentialData, nodeData)
const apiKeyValue = getCredentialParam('value', credentialData, nodeData)
// Determine which type of auth to use based on available credentials
if (basicAuthUsername && basicAuthPassword) {
// Basic Auth
const auth = Buffer.from(`${basicAuthUsername}:${basicAuthPassword}`).toString('base64')
requestHeaders['Authorization'] = `Basic ${auth}`
} else if (bearerToken) {
// Bearer Token
requestHeaders['Authorization'] = `Bearer ${bearerToken}`
} else if (apiKeyName && apiKeyValue) {
// API Key in header
requestHeaders[apiKeyName] = apiKeyValue
}
}
// Prepare query parameters
let queryString = ''
if (queryParams && Array.isArray(queryParams)) {
const params = new URLSearchParams()
for (const param of queryParams) {
if (param.key && param.value) {
params.append(param.key, param.value)
}
}
queryString = params.toString()
}
// Build final URL with query parameters
const finalUrl = queryString ? `${url}${url.includes('?') ? '&' : '?'}${queryString}` : url
// Prepare request config
const requestConfig: AxiosRequestConfig = {
method: method as Method,
url: finalUrl,
headers: requestHeaders,
responseType: (responseType || 'json') as ResponseType
}
// Handle request body based on body type
if (method !== 'GET' && body) {
switch (bodyType) {
case 'json':
requestConfig.data = typeof body === 'string' ? JSON.parse(body) : body
requestHeaders['Content-Type'] = 'application/json'
break
case 'raw':
requestConfig.data = body
break
case 'formData': {
const formData = new FormData()
if (Array.isArray(body) && body.length > 0) {
for (const item of body) {
formData.append(item.key, item.value)
}
}
requestConfig.data = formData
break
}
case 'xWwwFormUrlencoded':
requestConfig.data = querystring.stringify(typeof body === 'string' ? JSON.parse(body) : body)
requestHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
break
}
}
// Make the HTTP request
const response = await axios(requestConfig)
// Process response based on response type
let responseData
if (responseType === 'base64' && response.data) {
responseData = Buffer.from(response.data, 'binary').toString('base64')
} else {
responseData = response.data
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {
http: {
method,
url,
headers,
queryParams,
bodyType,
body,
responseType
}
},
output: {
http: {
data: responseData,
status: response.status,
statusText: response.statusText,
headers: response.headers
}
},
state
}
return returnOutput
} catch (error) {
console.error('HTTP Request Error:', error)
// Format error response
const errorResponse: any = {
id: nodeData.id,
name: this.name,
input: {
http: {
method,
url,
headers,
queryParams,
bodyType,
body,
responseType
}
},
error: {
name: error.name || 'Error',
message: error.message || 'An error occurred during the HTTP request'
},
state
}
// Add more error details if available
if (error.response) {
errorResponse.error.status = error.response.status
errorResponse.error.statusText = error.response.statusText
errorResponse.error.data = error.response.data
errorResponse.error.headers = error.response.headers
}
throw new Error(error)
}
}
}
module.exports = { nodeClass: HTTP_Agentflow }

View File

@ -0,0 +1,271 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import {
ICommonObject,
ICondition,
IHumanInput,
INode,
INodeData,
INodeOptionsValue,
INodeOutputsValue,
INodeParams,
IServerSideEventStreamer
} from '../../../src/Interface'
import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages'
import { DEFAULT_HUMAN_INPUT_DESCRIPTION, DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML } from '../prompt'
class HumanInput_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Human Input'
this.name = 'humanInputAgentflow'
this.version = 1.0
this.type = 'HumanInput'
this.category = 'Agent Flows'
this.description = 'Request human input, approval or rejection during execution'
this.color = '#6E6EFD'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Description Type',
name: 'humanInputDescriptionType',
type: 'options',
options: [
{
label: 'Fixed',
name: 'fixed',
description: 'Specify a fixed description'
},
{
label: 'Dynamic',
name: 'dynamic',
description: 'Use LLM to generate a description'
}
]
},
{
label: 'Description',
name: 'humanInputDescription',
type: 'string',
placeholder: 'Are you sure you want to proceed?',
acceptVariable: true,
rows: 4,
show: {
humanInputDescriptionType: 'fixed'
}
},
{
label: 'Model',
name: 'humanInputModel',
type: 'asyncOptions',
loadMethod: 'listModels',
loadConfig: true,
show: {
humanInputDescriptionType: 'dynamic'
}
},
{
label: 'Prompt',
name: 'humanInputModelPrompt',
type: 'string',
default: DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML,
acceptVariable: true,
generateInstruction: true,
rows: 4,
show: {
humanInputDescriptionType: 'dynamic'
}
},
{
label: 'Enable Feedback',
name: 'humanInputEnableFeedback',
type: 'boolean',
default: true
}
]
this.outputs = [
{
label: 'Proceed',
name: 'proceed'
},
{
label: 'Reject',
name: 'reject'
}
]
}
//@ts-ignore
loadMethods = {
async listModels(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const componentNodes = options.componentNodes as {
[key: string]: INode
}
const returnOptions: INodeOptionsValue[] = []
for (const nodeName in componentNodes) {
const componentNode = componentNodes[nodeName]
if (componentNode.category === 'Chat Models') {
if (componentNode.tags?.includes('LlamaIndex')) {
continue
}
returnOptions.push({
label: componentNode.label,
name: nodeName,
imageSrc: componentNode.icon
})
}
}
return returnOptions
}
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const _humanInput = nodeData.inputs?.humanInput
const humanInput: IHumanInput = typeof _humanInput === 'string' ? JSON.parse(_humanInput) : _humanInput
const humanInputEnableFeedback = nodeData.inputs?.humanInputEnableFeedback as boolean
let humanInputDescriptionType = nodeData.inputs?.humanInputDescriptionType as string
const model = nodeData.inputs?.humanInputModel as string
const modelConfig = nodeData.inputs?.humanInputModelConfig as ICommonObject
const _humanInputModelPrompt = nodeData.inputs?.humanInputModelPrompt as string
const humanInputModelPrompt = _humanInputModelPrompt ? _humanInputModelPrompt : DEFAULT_HUMAN_INPUT_DESCRIPTION
// Extract runtime state and history
const state = options.agentflowRuntime?.state as ICommonObject
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
const chatId = options.chatId as string
const isStreamable = options.sseStreamer !== undefined
if (humanInput) {
const outcomes: Partial<ICondition>[] & Partial<IHumanInput>[] = [
{
type: 'proceed',
startNodeId: humanInput?.startNodeId,
feedback: humanInputEnableFeedback && humanInput?.feedback ? humanInput.feedback : undefined,
isFulfilled: false
},
{
type: 'reject',
startNodeId: humanInput?.startNodeId,
feedback: humanInputEnableFeedback && humanInput?.feedback ? humanInput.feedback : undefined,
isFulfilled: false
}
]
// Only one outcome can be fulfilled at a time
switch (humanInput?.type) {
case 'proceed':
outcomes[0].isFulfilled = true
break
case 'reject':
outcomes[1].isFulfilled = true
break
}
const messages = [
...pastChatHistory,
...runtimeChatHistory,
{
role: 'user',
content: humanInput.feedback || humanInput.type
}
]
const input = { ...humanInput, messages }
const output = { conditions: outcomes }
const nodeOutput = {
id: nodeData.id,
name: this.name,
input,
output,
state
}
if (humanInput.feedback) {
;(nodeOutput as any).chatHistory = [{ role: 'user', content: humanInput.feedback }]
}
return nodeOutput
} else {
let humanInputDescription = ''
if (humanInputDescriptionType === 'fixed') {
humanInputDescription = (nodeData.inputs?.humanInputDescription as string) || 'Do you want to proceed?'
const messages = [...pastChatHistory, ...runtimeChatHistory]
// Find the last message in the messages array
const lastMessage = (messages[messages.length - 1] as any).content || ''
humanInputDescription = `${lastMessage}\n\n${humanInputDescription}`
if (isStreamable) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
sseStreamer.streamTokenEvent(chatId, humanInputDescription)
}
} else {
if (model && modelConfig) {
const nodeInstanceFilePath = options.componentNodes[model].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newNodeInstance = new nodeModule.nodeClass()
const newNodeData = {
...nodeData,
credential: modelConfig['FLOWISE_CREDENTIAL_ID'],
inputs: {
...nodeData.inputs,
...modelConfig
}
}
const llmNodeInstance = (await newNodeInstance.init(newNodeData, '', options)) as BaseChatModel
const messages = [
...pastChatHistory,
...runtimeChatHistory,
{
role: 'user',
content: humanInputModelPrompt || DEFAULT_HUMAN_INPUT_DESCRIPTION
}
]
let response: AIMessageChunk = new AIMessageChunk('')
if (isStreamable) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
for await (const chunk of await llmNodeInstance.stream(messages)) {
sseStreamer.streamTokenEvent(chatId, chunk.content.toString())
response = response.concat(chunk)
}
humanInputDescription = response.content as string
} else {
const response = await llmNodeInstance.invoke(messages)
humanInputDescription = response.content as string
}
}
}
const input = { messages: [...pastChatHistory, ...runtimeChatHistory], humanInputEnableFeedback }
const output = { content: humanInputDescription }
const nodeOutput = {
id: nodeData.id,
name: this.name,
input,
output,
state,
chatHistory: [{ role: 'assistant', content: humanInputDescription }]
}
return nodeOutput
}
}
}
module.exports = { nodeClass: HumanInput_Agentflow }

View File

@ -0,0 +1,17 @@
export interface ILLMMessage {
role: 'system' | 'assistant' | 'user' | 'tool' | 'developer'
content: string
}
export interface IStructuredOutput {
key: string
type: 'string' | 'stringArray' | 'number' | 'boolean' | 'enum' | 'jsonArray'
enumValues?: string
description?: string
jsonSchema?: string
}
export interface IFlowState {
key: string
value: string
}

View File

@ -0,0 +1,69 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
class Iteration_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Iteration'
this.name = 'iterationAgentflow'
this.version = 1.0
this.type = 'Iteration'
this.category = 'Agent Flows'
this.description = 'Execute the nodes within the iteration block through N iterations'
this.baseClasses = [this.type]
this.color = '#9C89B8'
this.inputs = [
{
label: 'Array Input',
name: 'iterationInput',
type: 'string',
description: 'The input array to iterate over',
acceptVariable: true,
rows: 4
}
]
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const iterationInput = nodeData.inputs?.iterationInput
// Helper function to clean JSON strings with redundant backslashes
const cleanJsonString = (str: string): string => {
return str.replace(/\\(["'[\]{}])/g, '$1')
}
const iterationInputArray =
typeof iterationInput === 'string' && iterationInput !== '' ? JSON.parse(cleanJsonString(iterationInput)) : iterationInput
if (!iterationInputArray || !Array.isArray(iterationInputArray)) {
throw new Error('Invalid input array')
}
const state = options.agentflowRuntime?.state as ICommonObject
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {
iterationInput: iterationInputArray
},
output: {},
state
}
return returnOutput
}
}
module.exports = { nodeClass: Iteration_Agentflow }

View File

@ -0,0 +1,985 @@
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages'
import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
import { z } from 'zod'
import { AnalyticHandler } from '../../../src/handler'
import { ILLMMessage, IStructuredOutput } from '../Interface.Agentflow'
import {
getPastChatHistoryImageMessages,
getUniqueImageMessages,
processMessagesWithImages,
replaceBase64ImagesWithFileReferences,
updateFlowState
} from '../utils'
import { get } from 'lodash'
class LLM_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'LLM'
this.name = 'llmAgentflow'
this.version = 1.0
this.type = 'LLM'
this.category = 'Agent Flows'
this.description = 'Large language models to analyze user-provided inputs and generate responses'
this.color = '#64B5F6'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Model',
name: 'llmModel',
type: 'asyncOptions',
loadMethod: 'listModels',
loadConfig: true
},
{
label: 'Messages',
name: 'llmMessages',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Role',
name: 'role',
type: 'options',
options: [
{
label: 'System',
name: 'system'
},
{
label: 'Assistant',
name: 'assistant'
},
{
label: 'Developer',
name: 'developer'
},
{
label: 'User',
name: 'user'
}
]
},
{
label: 'Content',
name: 'content',
type: 'string',
acceptVariable: true,
generateInstruction: true,
rows: 4
}
]
},
{
label: 'Enable Memory',
name: 'llmEnableMemory',
type: 'boolean',
description: 'Enable memory for the conversation thread',
default: true,
optional: true
},
{
label: 'Memory Type',
name: 'llmMemoryType',
type: 'options',
options: [
{
label: 'All Messages',
name: 'allMessages',
description: 'Retrieve all messages from the conversation'
},
{
label: 'Window Size',
name: 'windowSize',
description: 'Uses a fixed window size to surface the last N messages'
},
{
label: 'Conversation Summary',
name: 'conversationSummary',
description: 'Summarizes the whole conversation'
},
{
label: 'Conversation Summary Buffer',
name: 'conversationSummaryBuffer',
description: 'Summarize conversations once token limit is reached. Default to 2000'
}
],
optional: true,
default: 'allMessages',
show: {
llmEnableMemory: true
}
},
{
label: 'Window Size',
name: 'llmMemoryWindowSize',
type: 'number',
default: '20',
description: 'Uses a fixed window size to surface the last N messages',
show: {
llmMemoryType: 'windowSize'
}
},
{
label: 'Max Token Limit',
name: 'llmMemoryMaxTokenLimit',
type: 'number',
default: '2000',
description: 'Summarize conversations once token limit is reached. Default to 2000',
show: {
llmMemoryType: 'conversationSummaryBuffer'
}
},
{
label: 'Input Message',
name: 'llmUserMessage',
type: 'string',
description: 'Add an input message as user message at the end of the conversation',
rows: 4,
optional: true,
acceptVariable: true,
show: {
llmEnableMemory: true
}
},
{
label: 'Return Response As',
name: 'llmReturnResponseAs',
type: 'options',
options: [
{
label: 'User Message',
name: 'userMessage'
},
{
label: 'Assistant Message',
name: 'assistantMessage'
}
],
default: 'userMessage'
},
{
label: 'JSON Structured Output',
name: 'llmStructuredOutput',
description: 'Instruct the LLM to give output in a JSON structured schema',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Key',
name: 'key',
type: 'string'
},
{
label: 'Type',
name: 'type',
type: 'options',
options: [
{
label: 'String',
name: 'string'
},
{
label: 'String Array',
name: 'stringArray'
},
{
label: 'Number',
name: 'number'
},
{
label: 'Boolean',
name: 'boolean'
},
{
label: 'Enum',
name: 'enum'
},
{
label: 'JSON Array',
name: 'jsonArray'
}
]
},
{
label: 'Enum Values',
name: 'enumValues',
type: 'string',
placeholder: 'value1, value2, value3',
description: 'Enum values. Separated by comma',
optional: true,
show: {
'llmStructuredOutput[$index].type': 'enum'
}
},
{
label: 'JSON Schema',
name: 'jsonSchema',
type: 'code',
placeholder: `{
"answer": {
"type": "string",
"description": "Value of the answer"
},
"reason": {
"type": "string",
"description": "Reason for the answer"
},
"optional": {
"type": "boolean"
},
"count": {
"type": "number"
},
"children": {
"type": "array",
"items": {
"type": "object",
"properties": {
"value": {
"type": "string",
"description": "Value of the children's answer"
}
}
}
}
}`,
description: 'JSON schema for the structured output',
optional: true,
show: {
'llmStructuredOutput[$index].type': 'jsonArray'
}
},
{
label: 'Description',
name: 'description',
type: 'string',
placeholder: 'Description of the key'
}
]
},
{
label: 'Update Flow State',
name: 'llmUpdateState',
description: 'Update runtime state during the execution of the workflow',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Key',
name: 'key',
type: 'asyncOptions',
loadMethod: 'listRuntimeStateKeys',
freeSolo: true
},
{
label: 'Value',
name: 'value',
type: 'string',
acceptVariable: true,
acceptNodeOutputAsVariable: true
}
]
}
]
}
//@ts-ignore
loadMethods = {
async listModels(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const componentNodes = options.componentNodes as {
[key: string]: INode
}
const returnOptions: INodeOptionsValue[] = []
for (const nodeName in componentNodes) {
const componentNode = componentNodes[nodeName]
if (componentNode.category === 'Chat Models') {
if (componentNode.tags?.includes('LlamaIndex')) {
continue
}
returnOptions.push({
label: componentNode.label,
name: nodeName,
imageSrc: componentNode.icon
})
}
}
return returnOptions
},
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const previousNodes = options.previousNodes as ICommonObject[]
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
return state.map((item) => ({ label: item.key, name: item.key }))
}
}
async run(nodeData: INodeData, input: string | Record<string, any>, options: ICommonObject): Promise<any> {
let llmIds: ICommonObject | undefined
let analyticHandlers = options.analyticHandlers as AnalyticHandler
try {
const abortController = options.abortController as AbortController
// Extract input parameters
const model = nodeData.inputs?.llmModel as string
const modelConfig = nodeData.inputs?.llmModelConfig as ICommonObject
if (!model) {
throw new Error('Model is required')
}
// Extract memory and configuration options
const enableMemory = nodeData.inputs?.llmEnableMemory as boolean
const memoryType = nodeData.inputs?.llmMemoryType as string
const userMessage = nodeData.inputs?.llmUserMessage as string
const _llmUpdateState = nodeData.inputs?.llmUpdateState
const _llmStructuredOutput = nodeData.inputs?.llmStructuredOutput
const llmMessages = (nodeData.inputs?.llmMessages as unknown as ILLMMessage[]) ?? []
// Extract runtime state and history
const state = options.agentflowRuntime?.state as ICommonObject
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
const chatId = options.chatId as string
// Initialize the LLM model instance
const nodeInstanceFilePath = options.componentNodes[model].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newLLMNodeInstance = new nodeModule.nodeClass()
const newNodeData = {
...nodeData,
credential: modelConfig['FLOWISE_CREDENTIAL_ID'],
inputs: {
...nodeData.inputs,
...modelConfig
}
}
let llmNodeInstance = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel
// Prepare messages array
const messages: BaseMessageLike[] = []
// Use to store messages with image file references as we do not want to store the base64 data into database
let runtimeImageMessagesWithFileRef: BaseMessageLike[] = []
// Use to keep track of past messages with image file references
let pastImageMessagesWithFileRef: BaseMessageLike[] = []
for (const msg of llmMessages) {
const role = msg.role
const content = msg.content
if (role && content) {
messages.push({ role, content })
}
}
// Handle memory management if enabled
if (enableMemory) {
await this.handleMemory({
messages,
memoryType,
pastChatHistory,
runtimeChatHistory,
llmNodeInstance,
nodeData,
userMessage,
input,
abortController,
options,
modelConfig,
runtimeImageMessagesWithFileRef,
pastImageMessagesWithFileRef
})
} else if (!runtimeChatHistory.length) {
/*
* If this is the first node:
* - Add images to messages if exist
* - Add user message
*/
if (options.uploads) {
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
if (imageContents) {
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
messages.push(imageMessageWithBase64)
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
}
}
if (input && typeof input === 'string') {
messages.push({
role: 'user',
content: input
})
}
}
delete nodeData.inputs?.llmMessages
// Configure structured output if specified
const isStructuredOutput = _llmStructuredOutput && Array.isArray(_llmStructuredOutput) && _llmStructuredOutput.length > 0
if (isStructuredOutput) {
llmNodeInstance = this.configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
}
// Initialize response and determine if streaming is possible
let response: AIMessageChunk = new AIMessageChunk('')
const isLastNode = options.isLastNode as boolean
const isStreamable = isLastNode && options.sseStreamer !== undefined && modelConfig?.streaming !== false && !isStructuredOutput
// Start analytics
if (analyticHandlers && options.parentTraceIds) {
const llmLabel = options?.componentNodes?.[model]?.label || model
llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds)
}
// Track execution time
const startTime = Date.now()
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
if (isStreamable) {
response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
} else {
response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
// Stream whole response back to UI if this is the last node
if (isLastNode && options.sseStreamer) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
let responseContent = JSON.stringify(response, null, 2)
if (typeof response.content === 'string') {
responseContent = response.content
}
sseStreamer.streamTokenEvent(chatId, responseContent)
}
}
// Calculate execution time
const endTime = Date.now()
const timeDelta = endTime - startTime
// Update flow state if needed
let newState = { ...state }
if (_llmUpdateState && Array.isArray(_llmUpdateState) && _llmUpdateState.length > 0) {
newState = updateFlowState(state, _llmUpdateState)
}
// Clean up empty inputs
for (const key in nodeData.inputs) {
if (nodeData.inputs[key] === '') {
delete nodeData.inputs[key]
}
}
// Prepare final response and output object
const finalResponse = (response.content as string) ?? JSON.stringify(response, null, 2)
const output = this.prepareOutputObject(response, finalResponse, startTime, endTime, timeDelta)
// End analytics tracking
if (analyticHandlers && llmIds) {
await analyticHandlers.onLLMEnd(llmIds, finalResponse)
}
// Send additional streaming events if needed
if (isStreamable) {
this.sendStreamingEvents(options, chatId, response)
}
// Process template variables in state
if (newState && Object.keys(newState).length > 0) {
for (const key in newState) {
const stateValue = newState[key].toString()
if (stateValue.includes('{{ output')) {
// Handle simple output replacement
if (stateValue === '{{ output }}') {
newState[key] = finalResponse
continue
}
// Handle JSON path expressions like {{ output.item1 }}
// eslint-disable-next-line
const match = stateValue.match(/{{[\s]*output\.([\w\.]+)[\s]*}}/)
if (match) {
try {
// Parse the response if it's JSON
const jsonResponse = typeof finalResponse === 'string' ? JSON.parse(finalResponse) : finalResponse
// Get the value using lodash get
const path = match[1]
const value = get(jsonResponse, path)
newState[key] = value ?? stateValue // Fall back to original if path not found
} catch (e) {
// If JSON parsing fails, keep original template
console.warn(`Failed to parse JSON or find path in output: ${e}`)
newState[key] = stateValue
}
}
}
}
}
// Replace the actual messages array with one that includes the file references for images instead of base64 data
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
messages,
runtimeImageMessagesWithFileRef,
pastImageMessagesWithFileRef
)
// Only add to runtime chat history if this is the first node
const inputMessages = []
if (!runtimeChatHistory.length) {
if (runtimeImageMessagesWithFileRef.length) {
inputMessages.push(...runtimeImageMessagesWithFileRef)
}
if (input && typeof input === 'string') {
inputMessages.push({ role: 'user', content: input })
}
}
const returnResponseAs = nodeData.inputs?.llmReturnResponseAs as string
let returnRole = 'user'
if (returnResponseAs === 'assistantMessage') {
returnRole = 'assistant'
}
// Prepare and return the final output
return {
id: nodeData.id,
name: this.name,
input: {
messages: messagesWithFileReferences,
...nodeData.inputs
},
output,
state: newState,
chatHistory: [
...inputMessages,
// LLM response
{
role: returnRole,
content: finalResponse,
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
}
]
}
} catch (error) {
if (options.analyticHandlers && llmIds) {
await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error))
}
if (error instanceof Error && error.message === 'Aborted') {
throw error
}
throw new Error(`Error in LLM node: ${error instanceof Error ? error.message : String(error)}`)
}
}
/**
* Handles memory management based on the specified memory type
*/
private async handleMemory({
messages,
memoryType,
pastChatHistory,
runtimeChatHistory,
llmNodeInstance,
nodeData,
userMessage,
input,
abortController,
options,
modelConfig,
runtimeImageMessagesWithFileRef,
pastImageMessagesWithFileRef
}: {
messages: BaseMessageLike[]
memoryType: string
pastChatHistory: BaseMessageLike[]
runtimeChatHistory: BaseMessageLike[]
llmNodeInstance: BaseChatModel
nodeData: INodeData
userMessage: string
input: string | Record<string, any>
abortController: AbortController
options: ICommonObject
modelConfig: ICommonObject
runtimeImageMessagesWithFileRef: BaseMessageLike[]
pastImageMessagesWithFileRef: BaseMessageLike[]
}): Promise<void> {
const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options)
pastChatHistory = updatedPastMessages
pastImageMessagesWithFileRef.push(...transformedPastMessages)
let pastMessages = [...pastChatHistory, ...runtimeChatHistory]
if (!runtimeChatHistory.length && input && typeof input === 'string') {
/*
* If this is the first node:
* - Add images to messages if exist
* - Add user message
*/
if (options.uploads) {
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
if (imageContents) {
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
pastMessages.push(imageMessageWithBase64)
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
}
}
pastMessages.push({
role: 'user',
content: input
})
}
const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options)
pastMessages = updatedMessages
pastImageMessagesWithFileRef.push(...transformedMessages)
if (pastMessages.length > 0) {
if (memoryType === 'windowSize') {
// Window memory: Keep the last N messages
const windowSize = nodeData.inputs?.llmMemoryWindowSize as number
const windowedMessages = pastMessages.slice(-windowSize * 2)
messages.push(...windowedMessages)
} else if (memoryType === 'conversationSummary') {
// Summary memory: Summarize all past messages
const summary = await llmNodeInstance.invoke(
[
{
role: 'user',
content: DEFAULT_SUMMARIZER_TEMPLATE.replace(
'{conversation}',
pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
)
}
],
{ signal: abortController?.signal }
)
messages.push({ role: 'assistant', content: summary.content as string })
} else if (memoryType === 'conversationSummaryBuffer') {
// Summary buffer: Summarize messages that exceed token limit
await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController)
} else {
// Default: Use all messages
messages.push(...pastMessages)
}
}
// Add user message
if (userMessage) {
messages.push({
role: 'user',
content: userMessage
})
}
}
/**
* Handles conversation summary buffer memory type
*/
private async handleSummaryBuffer(
messages: BaseMessageLike[],
pastMessages: BaseMessageLike[],
llmNodeInstance: BaseChatModel,
nodeData: INodeData,
abortController: AbortController
): Promise<void> {
const maxTokenLimit = (nodeData.inputs?.llmMemoryMaxTokenLimit as number) || 2000
// Convert past messages to a format suitable for token counting
const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
const tokenCount = await llmNodeInstance.getNumTokens(messagesString)
if (tokenCount > maxTokenLimit) {
// Calculate how many messages to summarize (messages that exceed the token limit)
let currBufferLength = tokenCount
const messagesToSummarize = []
const remainingMessages = [...pastMessages]
// Remove messages from the beginning until we're under the token limit
while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) {
const poppedMessage = remainingMessages.shift()
if (poppedMessage) {
messagesToSummarize.push(poppedMessage)
// Recalculate token count for remaining messages
const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString)
}
}
// Summarize the messages that were removed
const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
const summary = await llmNodeInstance.invoke(
[
{
role: 'user',
content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString)
}
],
{ signal: abortController?.signal }
)
// Add summary as a system message at the beginning, then add remaining messages
messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` })
messages.push(...remainingMessages)
} else {
// If under token limit, use all messages
messages.push(...pastMessages)
}
}
/**
* Configures structured output for the LLM
*/
private configureStructuredOutput(llmNodeInstance: BaseChatModel, llmStructuredOutput: IStructuredOutput[]): BaseChatModel {
try {
const zodObj: ICommonObject = {}
for (const sch of llmStructuredOutput) {
if (sch.type === 'string') {
zodObj[sch.key] = z.string().describe(sch.description || '')
} else if (sch.type === 'stringArray') {
zodObj[sch.key] = z.array(z.string()).describe(sch.description || '')
} else if (sch.type === 'number') {
zodObj[sch.key] = z.number().describe(sch.description || '')
} else if (sch.type === 'boolean') {
zodObj[sch.key] = z.boolean().describe(sch.description || '')
} else if (sch.type === 'enum') {
const enumValues = sch.enumValues?.split(',').map((item: string) => item.trim()) || []
zodObj[sch.key] = z
.enum(enumValues.length ? (enumValues as [string, ...string[]]) : ['default'])
.describe(sch.description || '')
} else if (sch.type === 'jsonArray') {
const jsonSchema = sch.jsonSchema
if (jsonSchema) {
try {
// Parse the JSON schema
const schemaObj = JSON.parse(jsonSchema)
// Create a Zod schema from the JSON schema
const itemSchema = this.createZodSchemaFromJSON(schemaObj)
// Create an array schema of the item schema
zodObj[sch.key] = z.array(itemSchema).describe(sch.description || '')
} catch (err) {
console.error(`Error parsing JSON schema for ${sch.key}:`, err)
// Fallback to generic array of records
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
}
} else {
// If no schema provided, use generic array of records
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
}
}
}
const structuredOutput = z.object(zodObj)
// @ts-ignore
return llmNodeInstance.withStructuredOutput(structuredOutput)
} catch (exception) {
console.error(exception)
return llmNodeInstance
}
}
/**
* Handles streaming response from the LLM
*/
private async handleStreamingResponse(
sseStreamer: IServerSideEventStreamer | undefined,
llmNodeInstance: BaseChatModel,
messages: BaseMessageLike[],
chatId: string,
abortController: AbortController
): Promise<AIMessageChunk> {
let response = new AIMessageChunk('')
try {
for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) {
if (sseStreamer) {
let content = ''
if (Array.isArray(chunk.content) && chunk.content.length > 0) {
const contents = chunk.content as MessageContentText[]
content = contents.map((item) => item.text).join('')
} else {
content = chunk.content.toString()
}
sseStreamer.streamTokenEvent(chatId, content)
}
response = response.concat(chunk)
}
} catch (error) {
console.error('Error during streaming:', error)
throw error
}
if (Array.isArray(response.content) && response.content.length > 0) {
const responseContents = response.content as MessageContentText[]
response.content = responseContents.map((item) => item.text).join('')
}
return response
}
/**
* Prepares the output object with response and metadata
*/
private prepareOutputObject(
response: AIMessageChunk,
finalResponse: string,
startTime: number,
endTime: number,
timeDelta: number
): any {
const output: any = {
content: finalResponse,
timeMetadata: {
start: startTime,
end: endTime,
delta: timeDelta
}
}
if (response.tool_calls) {
output.calledTools = response.tool_calls
}
if (response.usage_metadata) {
output.usageMetadata = response.usage_metadata
}
return output
}
/**
* Sends additional streaming events for tool calls and metadata
*/
private sendStreamingEvents(options: ICommonObject, chatId: string, response: AIMessageChunk): void {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
if (response.tool_calls) {
sseStreamer.streamCalledToolsEvent(chatId, response.tool_calls)
}
if (response.usage_metadata) {
sseStreamer.streamUsageMetadataEvent(chatId, response.usage_metadata)
}
sseStreamer.streamEndEvent(chatId)
}
/**
* Creates a Zod schema from a JSON schema object
* @param jsonSchema The JSON schema object
* @returns A Zod schema
*/
private createZodSchemaFromJSON(jsonSchema: any): z.ZodTypeAny {
// If the schema is an object with properties, create an object schema
if (typeof jsonSchema === 'object' && jsonSchema !== null) {
const schemaObj: Record<string, z.ZodTypeAny> = {}
// Process each property in the schema
for (const [key, value] of Object.entries(jsonSchema)) {
if (value === null) {
// Handle null values
schemaObj[key] = z.null()
} else if (typeof value === 'object' && !Array.isArray(value)) {
// Check if the property has a type definition
if ('type' in value) {
const type = value.type as string
const description = ('description' in value ? (value.description as string) : '') || ''
// Create the appropriate Zod type based on the type property
if (type === 'string') {
schemaObj[key] = z.string().describe(description)
} else if (type === 'number') {
schemaObj[key] = z.number().describe(description)
} else if (type === 'boolean') {
schemaObj[key] = z.boolean().describe(description)
} else if (type === 'array') {
// If it's an array type, check if items is defined
if ('items' in value && value.items) {
const itemSchema = this.createZodSchemaFromJSON(value.items)
schemaObj[key] = z.array(itemSchema).describe(description)
} else {
// Default to array of any if items not specified
schemaObj[key] = z.array(z.any()).describe(description)
}
} else if (type === 'object') {
// If it's an object type, check if properties is defined
if ('properties' in value && value.properties) {
const nestedSchema = this.createZodSchemaFromJSON(value.properties)
schemaObj[key] = nestedSchema.describe(description)
} else {
// Default to record of any if properties not specified
schemaObj[key] = z.record(z.any()).describe(description)
}
} else {
// Default to any for unknown types
schemaObj[key] = z.any().describe(description)
}
// Check if the property is optional
if ('optional' in value && value.optional === true) {
schemaObj[key] = schemaObj[key].optional()
}
} else if (Array.isArray(value)) {
// Array values without a type property
if (value.length > 0) {
// If the array has items, recursively create a schema for the first item
const itemSchema = this.createZodSchemaFromJSON(value[0])
schemaObj[key] = z.array(itemSchema)
} else {
// Empty array, allow any array
schemaObj[key] = z.array(z.any())
}
} else {
// It's a nested object without a type property, recursively create schema
schemaObj[key] = this.createZodSchemaFromJSON(value)
}
} else if (Array.isArray(value)) {
// Array values
if (value.length > 0) {
// If the array has items, recursively create a schema for the first item
const itemSchema = this.createZodSchemaFromJSON(value[0])
schemaObj[key] = z.array(itemSchema)
} else {
// Empty array, allow any array
schemaObj[key] = z.array(z.any())
}
} else {
// For primitive values (which shouldn't be in the schema directly)
// Use the corresponding Zod type
if (typeof value === 'string') {
schemaObj[key] = z.string()
} else if (typeof value === 'number') {
schemaObj[key] = z.number()
} else if (typeof value === 'boolean') {
schemaObj[key] = z.boolean()
} else {
schemaObj[key] = z.any()
}
}
}
return z.object(schemaObj)
}
// Fallback to any for unknown types
return z.any()
}
}
module.exports = { nodeClass: LLM_Agentflow }

View File

@ -0,0 +1,94 @@
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
class Loop_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
hideOutput: boolean
hint: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Loop'
this.name = 'loopAgentflow'
this.version = 1.0
this.type = 'Loop'
this.category = 'Agent Flows'
this.description = 'Loop back to a previous node'
this.baseClasses = [this.type]
this.color = '#FFA07A'
this.hint = 'Make sure to have memory enabled in the LLM/Agent node to retain the chat history'
this.hideOutput = true
this.inputs = [
{
label: 'Loop Back To',
name: 'loopBackToNode',
type: 'asyncOptions',
loadMethod: 'listPreviousNodes',
freeSolo: true
},
{
label: 'Max Loop Count',
name: 'maxLoopCount',
type: 'number',
default: 5
}
]
}
//@ts-ignore
loadMethods = {
async listPreviousNodes(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const previousNodes = options.previousNodes as ICommonObject[]
const returnOptions: INodeOptionsValue[] = []
for (const node of previousNodes) {
returnOptions.push({
label: node.label,
name: `${node.id}-${node.label}`,
description: node.id
})
}
return returnOptions
}
}
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const loopBackToNode = nodeData.inputs?.loopBackToNode as string
const _maxLoopCount = nodeData.inputs?.maxLoopCount as string
const state = options.agentflowRuntime?.state as ICommonObject
const loopBackToNodeId = loopBackToNode.split('-')[0]
const loopBackToNodeLabel = loopBackToNode.split('-')[1]
const data = {
nodeID: loopBackToNodeId,
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: data,
output: {
content: 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`,
nodeID: loopBackToNodeId,
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
},
state
}
return returnOutput
}
}
module.exports = { nodeClass: Loop_Agentflow }

View File

@ -0,0 +1,227 @@
import {
ICommonObject,
IDatabaseEntity,
INode,
INodeData,
INodeOptionsValue,
INodeParams,
IServerSideEventStreamer
} from '../../../src/Interface'
import { updateFlowState } from '../utils'
import { DataSource } from 'typeorm'
import { BaseRetriever } from '@langchain/core/retrievers'
import { Document } from '@langchain/core/documents'
interface IKnowledgeBase {
documentStore: string
}
class Retriever_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
hideOutput: boolean
hint: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Retriever'
this.name = 'retrieverAgentflow'
this.version = 1.0
this.type = 'Retriever'
this.category = 'Agent Flows'
this.description = 'Retrieve information from vector database'
this.baseClasses = [this.type]
this.color = '#b8bedd'
this.inputs = [
{
label: 'Knowledge (Document Stores)',
name: 'retrieverKnowledgeDocumentStores',
type: 'array',
description: 'Document stores to retrieve information from. Document stores must be upserted in advance.',
array: [
{
label: 'Document Store',
name: 'documentStore',
type: 'asyncOptions',
loadMethod: 'listStores'
}
]
},
{
label: 'Retriever Query',
name: 'retrieverQuery',
type: 'string',
placeholder: 'Enter your query here',
rows: 4,
acceptVariable: true
},
{
label: 'Output Format',
name: 'outputFormat',
type: 'options',
options: [
{ label: 'Text', name: 'text' },
{ label: 'Text with Metadata', name: 'textWithMetadata' }
],
default: 'text'
},
{
label: 'Update Flow State',
name: 'retrieverUpdateState',
description: 'Update runtime state during the execution of the workflow',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Key',
name: 'key',
type: 'asyncOptions',
loadMethod: 'listRuntimeStateKeys',
freeSolo: true
},
{
label: 'Value',
name: 'value',
type: 'string',
acceptVariable: true,
acceptNodeOutputAsVariable: true
}
]
}
]
}
//@ts-ignore
loadMethods = {
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const previousNodes = options.previousNodes as ICommonObject[]
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
return state.map((item) => ({ label: item.key, name: item.key }))
},
async listStores(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const returnData: INodeOptionsValue[] = []
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
if (appDataSource === undefined || !appDataSource) {
return returnData
}
const stores = await appDataSource.getRepository(databaseEntities['DocumentStore']).find()
for (const store of stores) {
if (store.status === 'UPSERTED') {
const obj = {
name: `${store.id}:${store.name}`,
label: store.name,
description: store.description
}
returnData.push(obj)
}
}
return returnData
}
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const retrieverQuery = nodeData.inputs?.retrieverQuery as string
const outputFormat = nodeData.inputs?.outputFormat as string
const _retrieverUpdateState = nodeData.inputs?.retrieverUpdateState
const state = options.agentflowRuntime?.state as ICommonObject
const chatId = options.chatId as string
const isLastNode = options.isLastNode as boolean
const isStreamable = isLastNode && options.sseStreamer !== undefined
const abortController = options.abortController as AbortController
// Extract knowledge
let docs: Document[] = []
const knowledgeBases = nodeData.inputs?.retrieverKnowledgeDocumentStores as IKnowledgeBase[]
if (knowledgeBases && knowledgeBases.length > 0) {
for (const knowledgeBase of knowledgeBases) {
const [storeId, _] = knowledgeBase.documentStore.split(':')
const docStoreVectorInstanceFilePath = options.componentNodes['documentStoreVS'].filePath as string
const docStoreVectorModule = await import(docStoreVectorInstanceFilePath)
const newDocStoreVectorInstance = new docStoreVectorModule.nodeClass()
const docStoreVectorInstance = (await newDocStoreVectorInstance.init(
{
...nodeData,
inputs: {
...nodeData.inputs,
selectedStore: storeId
},
outputs: {
output: 'retriever'
}
},
'',
options
)) as BaseRetriever
docs = await docStoreVectorInstance.invoke(retrieverQuery || input, { signal: abortController?.signal })
}
}
const docsText = docs.map((doc) => doc.pageContent).join('\n')
// Update flow state if needed
let newState = { ...state }
if (_retrieverUpdateState && Array.isArray(_retrieverUpdateState) && _retrieverUpdateState.length > 0) {
newState = updateFlowState(state, _retrieverUpdateState)
}
try {
let finalOutput = ''
if (outputFormat === 'text') {
finalOutput = docsText
} else if (outputFormat === 'textWithMetadata') {
finalOutput = JSON.stringify(docs, null, 2)
}
if (isStreamable) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
sseStreamer.streamTokenEvent(chatId, finalOutput)
}
// Process template variables in state
if (newState && Object.keys(newState).length > 0) {
for (const key in newState) {
if (newState[key].toString().includes('{{ output }}')) {
newState[key] = finalOutput
}
}
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {
question: retrieverQuery || input
},
output: {
content: finalOutput
},
state: newState
}
return returnOutput
} catch (e) {
throw new Error(e)
}
}
}
module.exports = { nodeClass: Retriever_Agentflow }

View File

@ -0,0 +1,217 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
class Start_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
hideInput: boolean
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Start'
this.name = 'startAgentflow'
this.version = 1.0
this.type = 'Start'
this.category = 'Agent Flows'
this.description = 'Starting point of the agentflow'
this.baseClasses = [this.type]
this.color = '#7EE787'
this.hideInput = true
this.inputs = [
{
label: 'Input Type',
name: 'startInputType',
type: 'options',
options: [
{
label: 'Chat Input',
name: 'chatInput',
description: 'Start the conversation with chat input'
},
{
label: 'Form Input',
name: 'formInput',
description: 'Start the workflow with form inputs'
}
],
default: 'chatInput'
},
{
label: 'Form Title',
name: 'formTitle',
type: 'string',
placeholder: 'Please Fill Out The Form',
show: {
startInputType: 'formInput'
}
},
{
label: 'Form Description',
name: 'formDescription',
type: 'string',
placeholder: 'Complete all fields below to continue',
show: {
startInputType: 'formInput'
}
},
{
label: 'Form Input Types',
name: 'formInputTypes',
description: 'Specify the type of form input',
type: 'array',
show: {
startInputType: 'formInput'
},
array: [
{
label: 'Type',
name: 'type',
type: 'options',
options: [
{
label: 'String',
name: 'string'
},
{
label: 'Number',
name: 'number'
},
{
label: 'Boolean',
name: 'boolean'
},
{
label: 'Options',
name: 'options'
}
],
default: 'string'
},
{
label: 'Label',
name: 'label',
type: 'string',
placeholder: 'Label for the input'
},
{
label: 'Variable Name',
name: 'name',
type: 'string',
placeholder: 'Variable name for the input (must be camel case)',
description: 'Variable name must be camel case. For example: firstName, lastName, etc.'
},
{
label: 'Add Options',
name: 'addOptions',
type: 'array',
show: {
'formInputTypes[$index].type': 'options'
},
array: [
{
label: 'Option',
name: 'option',
type: 'string'
}
]
}
]
},
{
label: 'Ephemeral Memory',
name: 'startEphemeralMemory',
type: 'boolean',
description: 'Start fresh for every execution without past chat history',
optional: true
},
{
label: 'Flow State',
name: 'startState',
description: 'Runtime state during the execution of the workflow',
type: 'array',
optional: true,
array: [
{
label: 'Key',
name: 'key',
type: 'string',
placeholder: 'Foo'
},
{
label: 'Value',
name: 'value',
type: 'string',
placeholder: 'Bar',
optional: true
}
]
}
]
}
async run(nodeData: INodeData, input: string | Record<string, any>, options: ICommonObject): Promise<any> {
const _flowState = nodeData.inputs?.startState as string
const startInputType = nodeData.inputs?.startInputType as string
const startEphemeralMemory = nodeData.inputs?.startEphemeralMemory as boolean
let flowStateArray = []
if (_flowState) {
try {
flowStateArray = typeof _flowState === 'string' ? JSON.parse(_flowState) : _flowState
} catch (error) {
throw new Error('Invalid Flow State')
}
}
let flowState: Record<string, any> = {}
for (const state of flowStateArray) {
flowState[state.key] = state.value
}
const inputData: ICommonObject = {}
const outputData: ICommonObject = {}
if (startInputType === 'chatInput') {
inputData.question = input
outputData.question = input
}
if (startInputType === 'formInput') {
inputData.form = {
title: nodeData.inputs?.formTitle,
description: nodeData.inputs?.formDescription,
inputs: nodeData.inputs?.formInputTypes
}
let form = input
if (options.agentflowRuntime?.form && Object.keys(options.agentflowRuntime.form).length) {
form = options.agentflowRuntime.form
}
outputData.form = form
}
if (startEphemeralMemory) {
outputData.ephemeralMemory = true
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: inputData,
output: outputData,
state: flowState
}
return returnOutput
}
}
module.exports = { nodeClass: Start_Agentflow }

View File

@ -0,0 +1,42 @@
import { INode, INodeParams } from '../../../src/Interface'
class StickyNote_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
tags: string[]
baseClasses: string[]
inputs: INodeParams[]
constructor() {
this.label = 'Sticky Note'
this.name = 'stickyNoteAgentflow'
this.version = 1.0
this.type = 'StickyNote'
this.color = '#fee440'
this.category = 'Agent Flows'
this.description = 'Add notes to the agent flow'
this.inputs = [
{
label: '',
name: 'note',
type: 'string',
rows: 1,
placeholder: 'Type something here',
optional: true
}
]
this.baseClasses = [this.type]
}
async run(): Promise<any> {
return undefined
}
}
module.exports = { nodeClass: StickyNote_Agentflow }

View File

@ -0,0 +1,304 @@
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
import { updateFlowState } from '../utils'
import { Tool } from '@langchain/core/tools'
import { ARTIFACTS_PREFIX } from '../../../src/agents'
import zodToJsonSchema from 'zod-to-json-schema'
interface IToolInputArgs {
inputArgName: string
inputArgValue: string
}
class Tool_Agentflow implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
color: string
hideOutput: boolean
hint: string
baseClasses: string[]
documentation?: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Tool'
this.name = 'toolAgentflow'
this.version = 1.0
this.type = 'Tool'
this.category = 'Agent Flows'
this.description = 'Tools allow LLM to interact with external systems'
this.baseClasses = [this.type]
this.color = '#d4a373'
this.inputs = [
{
label: 'Tool',
name: 'selectedTool',
type: 'asyncOptions',
loadMethod: 'listTools',
loadConfig: true
},
{
label: 'Tool Input Arguments',
name: 'toolInputArgs',
type: 'array',
acceptVariable: true,
refresh: true,
array: [
{
label: 'Input Argument Name',
name: 'inputArgName',
type: 'asyncOptions',
loadMethod: 'listToolInputArgs',
refresh: true
},
{
label: 'Input Argument Value',
name: 'inputArgValue',
type: 'string',
acceptVariable: true
}
],
show: {
selectedTool: '.+'
}
},
{
label: 'Update Flow State',
name: 'toolUpdateState',
description: 'Update runtime state during the execution of the workflow',
type: 'array',
optional: true,
acceptVariable: true,
array: [
{
label: 'Key',
name: 'key',
type: 'asyncOptions',
loadMethod: 'listRuntimeStateKeys',
freeSolo: true
},
{
label: 'Value',
name: 'value',
type: 'string',
acceptVariable: true,
acceptNodeOutputAsVariable: true
}
]
}
]
}
//@ts-ignore
loadMethods = {
async listTools(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const componentNodes = options.componentNodes as {
[key: string]: INode
}
const removeTools = ['chainTool', 'retrieverTool', 'webBrowser']
const returnOptions: INodeOptionsValue[] = []
for (const nodeName in componentNodes) {
const componentNode = componentNodes[nodeName]
if (componentNode.category === 'Tools' || componentNode.category === 'Tools (MCP)') {
if (componentNode.tags?.includes('LlamaIndex')) {
continue
}
if (removeTools.includes(nodeName)) {
continue
}
returnOptions.push({
label: componentNode.label,
name: nodeName,
imageSrc: componentNode.icon
})
}
}
return returnOptions
},
async listToolInputArgs(nodeData: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const currentNode = options.currentNode as ICommonObject
const selectedTool = currentNode?.inputs?.selectedTool as string
const selectedToolConfig = currentNode?.inputs?.selectedToolConfig as ICommonObject
const nodeInstanceFilePath = options.componentNodes[selectedTool].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newToolNodeInstance = new nodeModule.nodeClass()
const newNodeData = {
...nodeData,
credential: selectedToolConfig['FLOWISE_CREDENTIAL_ID'],
inputs: {
...nodeData.inputs,
...selectedToolConfig
}
}
try {
const toolInstance = (await newToolNodeInstance.init(newNodeData, '', options)) as Tool
let toolInputArgs: ICommonObject = {}
if (Array.isArray(toolInstance)) {
// Combine schemas from all tools in the array
const allProperties = toolInstance.reduce((acc, tool) => {
if (tool?.schema) {
const schema: Record<string, any> = zodToJsonSchema(tool.schema)
return { ...acc, ...(schema.properties || {}) }
}
return acc
}, {})
toolInputArgs = { properties: allProperties }
} else {
// Handle single tool instance
toolInputArgs = toolInstance.schema ? zodToJsonSchema(toolInstance.schema) : {}
}
if (toolInputArgs && Object.keys(toolInputArgs).length > 0) {
delete toolInputArgs.$schema
}
return Object.keys(toolInputArgs.properties || {}).map((item) => ({
label: item,
name: item,
description: toolInputArgs.properties[item].description
}))
} catch (e) {
return []
}
},
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
const previousNodes = options.previousNodes as ICommonObject[]
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
return state.map((item) => ({ label: item.key, name: item.key }))
}
}
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const selectedTool = nodeData.inputs?.selectedTool as string
const selectedToolConfig = nodeData.inputs?.selectedToolConfig as ICommonObject
const toolInputArgs = nodeData.inputs?.toolInputArgs as IToolInputArgs[]
const _toolUpdateState = nodeData.inputs?.toolUpdateState
const state = options.agentflowRuntime?.state as ICommonObject
const chatId = options.chatId as string
const isLastNode = options.isLastNode as boolean
const isStreamable = isLastNode && options.sseStreamer !== undefined
const abortController = options.abortController as AbortController
// Update flow state if needed
let newState = { ...state }
if (_toolUpdateState && Array.isArray(_toolUpdateState) && _toolUpdateState.length > 0) {
newState = updateFlowState(state, _toolUpdateState)
}
if (!selectedTool) {
throw new Error('Tool not selected')
}
const nodeInstanceFilePath = options.componentNodes[selectedTool].filePath as string
const nodeModule = await import(nodeInstanceFilePath)
const newToolNodeInstance = new nodeModule.nodeClass()
const newNodeData = {
...nodeData,
credential: selectedToolConfig['FLOWISE_CREDENTIAL_ID'],
inputs: {
...nodeData.inputs,
...selectedToolConfig
}
}
const toolInstance = (await newToolNodeInstance.init(newNodeData, '', options)) as Tool | Tool[]
let toolCallArgs: Record<string, any> = {}
for (const item of toolInputArgs) {
const variableName = item.inputArgName
const variableValue = item.inputArgValue
toolCallArgs[variableName] = variableValue
}
const flowConfig = {
sessionId: options.sessionId,
chatId: options.chatId,
input: input,
state: options.agentflowRuntime?.state
}
try {
let toolOutput: string
if (Array.isArray(toolInstance)) {
// Execute all tools and combine their outputs
const outputs = await Promise.all(
toolInstance.map((tool) =>
//@ts-ignore
tool.call(toolCallArgs, { signal: abortController?.signal }, undefined, flowConfig)
)
)
toolOutput = outputs.join('\n')
} else {
//@ts-ignore
toolOutput = await toolInstance.call(toolCallArgs, { signal: abortController?.signal }, undefined, flowConfig)
}
let parsedArtifacts
// Extract artifacts if present
if (typeof toolOutput === 'string' && toolOutput.includes(ARTIFACTS_PREFIX)) {
const [output, artifact] = toolOutput.split(ARTIFACTS_PREFIX)
toolOutput = output
try {
parsedArtifacts = JSON.parse(artifact)
} catch (e) {
console.error('Error parsing artifacts from tool:', e)
}
}
if (typeof toolOutput === 'object') {
toolOutput = JSON.stringify(toolOutput, null, 2)
}
if (isStreamable) {
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
sseStreamer.streamTokenEvent(chatId, toolOutput)
}
// Process template variables in state
if (newState && Object.keys(newState).length > 0) {
for (const key in newState) {
if (newState[key].toString().includes('{{ output }}')) {
newState[key] = toolOutput
}
}
}
const returnOutput = {
id: nodeData.id,
name: this.name,
input: {
toolInputArgs: toolInputArgs,
selectedTool: selectedTool
},
output: {
content: toolOutput,
artifacts: parsedArtifacts
},
state: newState
}
return returnOutput
} catch (e) {
throw new Error(e)
}
}
}
module.exports = { nodeClass: Tool_Agentflow }

View File

@ -0,0 +1,75 @@
export const DEFAULT_SUMMARIZER_TEMPLATE = `Progressively summarize the conversation provided and return a new summary.
EXAMPLE:
Human: Why do you think artificial intelligence is a force for good?
AI: Because artificial intelligence will help humans reach their full potential.
New summary:
The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.
END OF EXAMPLE
Conversation:
{conversation}
New summary:`
export const DEFAULT_HUMAN_INPUT_DESCRIPTION = `Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.
- Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed.
- Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced.
- Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message
## Output Format The output should be structured in three parts in text:
- A summary of the conversation (1-3 sentences).
- The last assistant message (exactly as it appeared).
- Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed.
`
export const DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML = `<p>Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback. </p>
<ul>
<li>Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed.</li>
<li>Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced.</li>
<li>Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message</li>
</ul>
<h2 id="output-format-the-output-should-be-structured-in-three-parts-">Output Format The output should be structured in three parts in text:</h2>
<ul>
<li>A summary of the conversation (1-3 sentences).</li>
<li>The last assistant message (exactly as it appeared).</li>
<li>Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed.</li>
</ul>
`
export const CONDITION_AGENT_SYSTEM_PROMPT = `You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios. If none of the scenarios match the input, you should return "default."
- **Input**: A string representing the user's query or message.
- **Scenarios**: A list of predefined scenarios that relate to the input.
- **Instruction**: Determine if the input fits any of the scenarios.
## Steps
1. **Read the input string** and the list of scenarios.
2. **Analyze the content of the input** to identify its main topic or intention.
3. **Compare the input with each scenario**:
- If a scenario matches the main topic of the input, select that scenario.
- If no scenarios match, prepare to output "\`\`\`json\n{"output": "default"}\`\`\`"
4. **Output the result**: If a match is found, return the corresponding scenario in JSON; otherwise, return "\`\`\`json\n{"output": "default"}\`\`\`"
## Output Format
Output should be a JSON object that either names the matching scenario or returns "\`\`\`json\n{"output": "default"}\`\`\`" if no scenarios match. No explanation is needed.
## Examples
1. **Input**: {"input": "Hello", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"}
**Output**: "\`\`\`json\n{"output": "default"}\`\`\`"
2. **Input**: {"input": "What is AIGC?", "scenarios": ["user is asking about AI", "default"], "instruction": "Your task is to check and see if user is asking topic about AI"}
**Output**: "\`\`\`json\n{"output": "user is asking about AI"}\`\`\`"
3. **Input**: {"input": "Can you explain deep learning?", "scenarios": ["user is interested in AI topics", "default"], "instruction": "Determine if the user is interested in learning about AI"}
**Output**: "\`\`\`json\n{"output": "user is interested in AI topics"}\`\`\`"
## Note
- Ensure that the input scenarios align well with potential user queries for accurate matching
- DO NOT include anything other than the JSON in your response.
`

View File

@ -0,0 +1,407 @@
import { BaseMessage, MessageContentImageUrl } from '@langchain/core/messages'
import { getImageUploads } from '../../src/multiModalUtils'
import { getFileFromStorage } from '../../src/storageUtils'
import { ICommonObject, IFileUpload } from '../../src/Interface'
import { BaseMessageLike } from '@langchain/core/messages'
import { IFlowState } from './Interface.Agentflow'
import { mapMimeTypeToInputField } from '../../src/utils'
export const addImagesToMessages = async (
options: ICommonObject,
allowImageUploads: boolean,
imageResolution?: 'auto' | 'low' | 'high'
): Promise<MessageContentImageUrl[]> => {
const imageContent: MessageContentImageUrl[] = []
if (allowImageUploads && options?.uploads && options?.uploads.length > 0) {
const imageUploads = getImageUploads(options.uploads)
for (const upload of imageUploads) {
let bf = upload.data
if (upload.type == 'stored-file') {
const contents = await getFileFromStorage(upload.name, options.chatflowid, options.chatId)
// as the image is stored in the server, read the file and convert it to base64
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
imageContent.push({
type: 'image_url',
image_url: {
url: bf,
detail: imageResolution ?? 'low'
}
})
} else if (upload.type == 'url' && bf) {
imageContent.push({
type: 'image_url',
image_url: {
url: bf,
detail: imageResolution ?? 'low'
}
})
}
}
}
return imageContent
}
/**
* Process message array to replace stored file references with base64 image data
* @param messages Array of messages that may contain image references
* @param options Common options object containing chatflowid and chatId
* @returns Object containing updated messages array and transformed original messages
*/
export const processMessagesWithImages = async (
messages: BaseMessageLike[],
options: ICommonObject
): Promise<{
updatedMessages: BaseMessageLike[]
transformedMessages: BaseMessageLike[]
}> => {
if (!messages || !options.chatflowid || !options.chatId) {
return {
updatedMessages: messages,
transformedMessages: []
}
}
// Create a deep copy of the messages to avoid mutating the original
const updatedMessages = JSON.parse(JSON.stringify(messages))
// Track which messages were transformed
const transformedMessages: BaseMessageLike[] = []
// Scan through all messages looking for stored-file references
for (let i = 0; i < updatedMessages.length; i++) {
const message = updatedMessages[i]
// Skip non-user messages or messages without content
if (message.role !== 'user' || !message.content) {
continue
}
// Handle array content (typically containing file references)
if (Array.isArray(message.content)) {
const imageContents: MessageContentImageUrl[] = []
let hasImageReferences = false
// Process each content item
for (const item of message.content) {
// Look for stored-file type items
if (item.type === 'stored-file' && item.name && item.mime.startsWith('image/')) {
hasImageReferences = true
try {
// Get file contents from storage
const contents = await getFileFromStorage(item.name, options.chatflowid, options.chatId)
// Create base64 data URL
const base64Data = 'data:' + item.mime + ';base64,' + contents.toString('base64')
// Add to image content array
imageContents.push({
type: 'image_url',
image_url: {
url: base64Data,
detail: item.imageResolution ?? 'low'
}
})
} catch (error) {
console.error(`Failed to load image ${item.name}:`, error)
}
}
}
// Replace the content with the image content array
if (imageContents.length > 0) {
// Store the original message before modifying
if (hasImageReferences) {
transformedMessages.push(JSON.parse(JSON.stringify(messages[i])))
}
updatedMessages[i].content = imageContents
}
}
}
return {
updatedMessages,
transformedMessages
}
}
/**
* Replace base64 image data in messages with file references
* @param messages Array of messages that may contain base64 image data
* @param uniqueImageMessages Array of messages with file references for new images
* @param pastImageMessages Array of messages with file references for previous images
* @returns Updated messages array with file references instead of base64 data
*/
export const replaceBase64ImagesWithFileReferences = (
messages: BaseMessageLike[],
uniqueImageMessages: BaseMessageLike[] = [],
pastImageMessages: BaseMessageLike[] = []
): BaseMessageLike[] => {
// Create a deep copy to avoid mutating the original
const updatedMessages = JSON.parse(JSON.stringify(messages))
// Track positions in replacement arrays
let pastMessageIndex = 0
let pastContentIndex = 0
let uniqueMessageIndex = 0
let uniqueContentIndex = 0
for (let i = 0; i < updatedMessages.length; i++) {
const message = updatedMessages[i]
if (message.content && Array.isArray(message.content)) {
for (let j = 0; j < message.content.length; j++) {
const item = message.content[j]
if (item.type === 'image_url') {
// Try past images first
let replacement = null
if (pastMessageIndex < pastImageMessages.length) {
const pastMessage = pastImageMessages[pastMessageIndex] as BaseMessage | undefined
if (pastMessage && Array.isArray(pastMessage.content)) {
if (pastContentIndex < pastMessage.content.length) {
replacement = pastMessage.content[pastContentIndex]
pastContentIndex++
// Move to next message if we've used all content in current one
if (pastContentIndex >= pastMessage.content.length) {
pastMessageIndex++
pastContentIndex = 0
}
} else {
// Current message has no more content, move to next
pastMessageIndex++
pastContentIndex = 0
// Try again with the next message
if (pastMessageIndex < pastImageMessages.length) {
const nextPastMessage = pastImageMessages[pastMessageIndex] as BaseMessage | undefined
if (nextPastMessage && Array.isArray(nextPastMessage.content) && nextPastMessage.content.length > 0) {
replacement = nextPastMessage.content[0]
pastContentIndex = 1
}
}
}
}
}
// Try unique images if no past image replacement found
if (!replacement && uniqueMessageIndex < uniqueImageMessages.length) {
const uniqueMessage = uniqueImageMessages[uniqueMessageIndex] as BaseMessage | undefined
if (uniqueMessage && Array.isArray(uniqueMessage.content)) {
if (uniqueContentIndex < uniqueMessage.content.length) {
replacement = uniqueMessage.content[uniqueContentIndex]
uniqueContentIndex++
// Move to next message if we've used all content in current one
if (uniqueContentIndex >= uniqueMessage.content.length) {
uniqueMessageIndex++
uniqueContentIndex = 0
}
} else {
// Current message has no more content, move to next
uniqueMessageIndex++
uniqueContentIndex = 0
// Try again with the next message
if (uniqueMessageIndex < uniqueImageMessages.length) {
const nextUniqueMessage = uniqueImageMessages[uniqueMessageIndex] as BaseMessage | undefined
if (
nextUniqueMessage &&
Array.isArray(nextUniqueMessage.content) &&
nextUniqueMessage.content.length > 0
) {
replacement = nextUniqueMessage.content[0]
uniqueContentIndex = 1
}
}
}
}
}
// Apply replacement if found
if (replacement) {
message.content[j] = {
...replacement
}
}
}
}
}
}
return updatedMessages
}
/**
* Get unique image messages from uploads
* @param options Common options object containing uploads
* @param messages Array of messages to check for existing images
* @param modelConfig Model configuration object containing allowImageUploads and imageResolution
* @returns Object containing imageMessageWithFileRef and imageMessageWithBase64
*/
export const getUniqueImageMessages = async (
options: ICommonObject,
messages: BaseMessageLike[],
modelConfig?: ICommonObject
): Promise<{ imageMessageWithFileRef: BaseMessageLike; imageMessageWithBase64: BaseMessageLike } | undefined> => {
if (!options.uploads) return undefined
// Get images from uploads
const images = await addImagesToMessages(options, modelConfig?.allowImageUploads, modelConfig?.imageResolution)
// Filter out images that are already in previous messages
const uniqueImages = images.filter((image) => {
// Check if this image is already in any existing message
return !messages.some((msg: any) => {
// For multimodal content (arrays with image objects)
if (Array.isArray(msg.content)) {
return msg.content.some(
(item: any) =>
// Compare by image URL/content for image objects
item.type === 'image_url' && image.type === 'image_url' && JSON.stringify(item) === JSON.stringify(image)
)
}
// For direct comparison of simple content
return JSON.stringify(msg.content) === JSON.stringify(image)
})
})
if (uniqueImages.length === 0) {
return undefined
}
// Create messages with the original file references for storage/display
const imageMessageWithFileRef = {
role: 'user',
content: options.uploads.map((upload: IFileUpload) => ({
type: upload.type,
name: upload.name,
mime: upload.mime,
imageResolution: modelConfig?.imageResolution
}))
}
// Create messages with base64 data for the LLM
const imageMessageWithBase64 = {
role: 'user',
content: uniqueImages
}
return {
imageMessageWithFileRef,
imageMessageWithBase64
}
}
/**
* Get past chat history image messages
* @param pastChatHistory Array of past chat history messages
* @param options Common options object
* @returns Object containing updatedPastMessages and transformedPastMessages
*/
export const getPastChatHistoryImageMessages = async (
pastChatHistory: BaseMessageLike[],
options: ICommonObject
): Promise<{ updatedPastMessages: BaseMessageLike[]; transformedPastMessages: BaseMessageLike[] }> => {
const chatHistory = []
const transformedPastMessages = []
for (let i = 0; i < pastChatHistory.length; i++) {
const message = pastChatHistory[i] as BaseMessage & { role: string }
const messageRole = message.role || 'user'
if (message.additional_kwargs && message.additional_kwargs.fileUploads) {
// example: [{"type":"stored-file","name":"0_DiXc4ZklSTo3M8J4.jpg","mime":"image/jpeg"}]
const fileUploads = message.additional_kwargs.fileUploads
try {
let messageWithFileUploads = ''
const uploads: IFileUpload[] = typeof fileUploads === 'string' ? JSON.parse(fileUploads) : fileUploads
const imageContents: MessageContentImageUrl[] = []
for (const upload of uploads) {
if (upload.type === 'stored-file' && upload.mime.startsWith('image/')) {
const fileData = await getFileFromStorage(upload.name, options.chatflowid, options.chatId)
// as the image is stored in the server, read the file and convert it to base64
const bf = 'data:' + upload.mime + ';base64,' + fileData.toString('base64')
imageContents.push({
type: 'image_url',
image_url: {
url: bf
}
})
} else if (upload.type === 'url' && upload.mime.startsWith('image') && upload.data) {
imageContents.push({
type: 'image_url',
image_url: {
url: upload.data
}
})
} else if (upload.type === 'stored-file:full') {
const fileLoaderNodeModule = await import('../../nodes/documentloaders/File/File')
// @ts-ignore
const fileLoaderNodeInstance = new fileLoaderNodeModule.nodeClass()
const nodeOptions = {
retrieveAttachmentChatId: true,
chatflowid: options.chatflowid,
chatId: options.chatId
}
let fileInputFieldFromMimeType = 'txtFile'
fileInputFieldFromMimeType = mapMimeTypeToInputField(upload.mime)
const nodeData = {
inputs: {
[fileInputFieldFromMimeType]: `FILE-STORAGE::${JSON.stringify([upload.name])}`
}
}
const documents: string = await fileLoaderNodeInstance.init(nodeData, '', nodeOptions)
messageWithFileUploads += `<doc name='${upload.name}'>${documents}</doc>\n\n`
}
}
const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content
if (imageContents.length > 0) {
chatHistory.push({
role: messageRole,
content: imageContents
})
transformedPastMessages.push({
role: messageRole,
content: [...JSON.parse((pastChatHistory[i] as any).additional_kwargs.fileUploads)]
})
}
chatHistory.push({
role: messageRole,
content: messageContent
})
} catch (e) {
// failed to parse fileUploads, continue with text only
chatHistory.push({
role: messageRole,
content: message.content
})
}
} else {
chatHistory.push({
role: messageRole,
content: message.content
})
}
}
return {
updatedPastMessages: chatHistory,
transformedPastMessages
}
}
/**
* Updates the flow state with new values
*/
export const updateFlowState = (state: ICommonObject, llmUpdateState: IFlowState[]): ICommonObject => {
let newFlowState: Record<string, any> = {}
for (const state of llmUpdateState) {
newFlowState[state.key] = state.value
}
return {
...state,
...newFlowState
}
}

View File

@ -18,6 +18,7 @@ import { AnalyticHandler } from '../../../src/handler'
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addSingleFileToStorage } from '../../../src/storageUtils'
import { DynamicStructuredTool } from '../../tools/OpenAPIToolkit/core'
const lenticularBracketRegex = /【[^】]*】/g
const imageRegex = /<img[^>]*\/>/g
@ -223,7 +224,7 @@ class OpenAIAssistant_Agents implements INode {
const openai = new OpenAI({ apiKey: openAIApiKey })
// Start analytics
const analyticHandlers = new AnalyticHandler(nodeData, options)
const analyticHandlers = AnalyticHandler.getInstance(nodeData, options)
await analyticHandlers.init()
const parentIds = await analyticHandlers.onChainStart('OpenAIAssistant', input)
@ -504,7 +505,6 @@ class OpenAIAssistant_Agents implements INode {
toolCallId: item.id
})
})
const submitToolOutputs = []
for (let i = 0; i < actions.length; i += 1) {
const tool = tools.find((tool: any) => tool.name === actions[i].tool)
@ -539,30 +539,23 @@ class OpenAIAssistant_Agents implements INode {
}
try {
const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, {
tool_outputs: submitToolOutputs
await handleToolSubmission({
openai,
threadId,
runThreadId,
submitToolOutputs,
tools,
analyticHandlers,
parentIds,
llmIds,
sseStreamer,
chatId,
options,
input,
usedTools,
text,
isStreamingStarted
})
for await (const event of stream) {
if (event.event === 'thread.message.delta') {
const chunk = event.data.delta.content?.[0]
if (chunk && 'text' in chunk && chunk.text?.value) {
text += chunk.text.value
if (!isStreamingStarted) {
isStreamingStarted = true
if (sseStreamer) {
sseStreamer.streamStartEvent(chatId, chunk.text.value)
}
}
if (sseStreamer) {
sseStreamer.streamTokenEvent(chatId, chunk.text.value)
}
}
}
}
if (sseStreamer) {
sseStreamer.streamUsedToolsEvent(chatId, usedTools)
}
} catch (error) {
console.error('Error submitting tool outputs:', error)
await openai.beta.threads.runs.cancel(threadId, runThreadId)
@ -634,7 +627,6 @@ class OpenAIAssistant_Agents implements INode {
toolCallId: item.id
})
})
const submitToolOutputs = []
for (let i = 0; i < actions.length; i += 1) {
const tool = tools.find((tool: any) => tool.name === actions[i].tool)
@ -751,7 +743,7 @@ class OpenAIAssistant_Agents implements INode {
state = await promise(threadId, newRunThread.id)
} else {
const errMsg = `Error processing thread: ${state}, Thread ID: ${threadId}`
await analyticHandlers.onChainError(parentIds, errMsg)
await analyticHandlers.onChainError(parentIds, errMsg, true)
throw new Error(errMsg)
}
}
@ -895,15 +887,212 @@ const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string
}
}
interface ToolSubmissionParams {
openai: OpenAI
threadId: string
runThreadId: string
submitToolOutputs: any[]
tools: any[]
analyticHandlers: AnalyticHandler
parentIds: ICommonObject
llmIds: ICommonObject
sseStreamer: IServerSideEventStreamer
chatId: string
options: ICommonObject
input: string
usedTools: IUsedTool[]
text: string
isStreamingStarted: boolean
}
interface ToolSubmissionResult {
text: string
isStreamingStarted: boolean
}
async function handleToolSubmission(params: ToolSubmissionParams): Promise<ToolSubmissionResult> {
const {
openai,
threadId,
runThreadId,
submitToolOutputs,
tools,
analyticHandlers,
parentIds,
llmIds,
sseStreamer,
chatId,
options,
input,
usedTools
} = params
let updatedText = params.text
let updatedIsStreamingStarted = params.isStreamingStarted
const stream = openai.beta.threads.runs.submitToolOutputsStream(threadId, runThreadId, {
tool_outputs: submitToolOutputs
})
try {
for await (const event of stream) {
if (event.event === 'thread.message.delta') {
const chunk = event.data.delta.content?.[0]
if (chunk && 'text' in chunk && chunk.text?.value) {
updatedText += chunk.text.value
if (!updatedIsStreamingStarted) {
updatedIsStreamingStarted = true
if (sseStreamer) {
sseStreamer.streamStartEvent(chatId, chunk.text.value)
}
}
if (sseStreamer) {
sseStreamer.streamTokenEvent(chatId, chunk.text.value)
}
}
} else if (event.event === 'thread.run.requires_action') {
if (event.data.required_action?.submit_tool_outputs.tool_calls) {
const actions: ICommonObject[] = []
event.data.required_action.submit_tool_outputs.tool_calls.forEach((item) => {
const functionCall = item.function
let args = {}
try {
args = JSON.parse(functionCall.arguments)
} catch (e) {
console.error('Error parsing arguments, default to empty object')
}
actions.push({
tool: functionCall.name,
toolInput: args,
toolCallId: item.id
})
})
const nestedToolOutputs = []
for (let i = 0; i < actions.length; i += 1) {
const tool = tools.find((tool: any) => tool.name === actions[i].tool)
if (!tool) continue
const toolIds = await analyticHandlers.onToolStart(tool.name, actions[i].toolInput, parentIds)
try {
const toolOutput = await tool.call(actions[i].toolInput, undefined, undefined, {
sessionId: threadId,
chatId: options.chatId,
input
})
await analyticHandlers.onToolEnd(toolIds, toolOutput)
nestedToolOutputs.push({
tool_call_id: actions[i].toolCallId,
output: toolOutput
})
usedTools.push({
tool: tool.name,
toolInput: actions[i].toolInput,
toolOutput
})
} catch (e) {
await analyticHandlers.onToolEnd(toolIds, e)
console.error('Error executing tool', e)
throw new Error(`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`)
}
}
// Recursively handle nested tool submissions
const result = await handleToolSubmission({
openai,
threadId,
runThreadId,
submitToolOutputs: nestedToolOutputs,
tools,
analyticHandlers,
parentIds,
llmIds,
sseStreamer,
chatId,
options,
input,
usedTools,
text: updatedText,
isStreamingStarted: updatedIsStreamingStarted
})
updatedText = result.text
updatedIsStreamingStarted = result.isStreamingStarted
}
}
}
if (sseStreamer) {
sseStreamer.streamUsedToolsEvent(chatId, usedTools)
}
return {
text: updatedText,
isStreamingStarted: updatedIsStreamingStarted
}
} catch (error) {
console.error('Error submitting tool outputs:', error)
await openai.beta.threads.runs.cancel(threadId, runThreadId)
const errMsg = `Error submitting tool outputs. Thread ID: ${threadId}. Run ID: ${runThreadId}`
await analyticHandlers.onLLMError(llmIds, errMsg)
await analyticHandlers.onChainError(parentIds, errMsg, true)
throw new Error(errMsg)
}
}
interface JSONSchema {
type?: string
properties?: Record<string, JSONSchema>
additionalProperties?: boolean
required?: string[]
[key: string]: any
}
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.FunctionTool => {
return {
const parameters = zodToJsonSchema(tool.schema) as JSONSchema
// For strict tools, we need to:
// 1. Set additionalProperties to false
// 2. Make all parameters required
// 3. Set the strict flag
if (tool instanceof DynamicStructuredTool && tool.isStrict()) {
// Get all property names from the schema
const properties = parameters.properties || {}
const allPropertyNames = Object.keys(properties)
parameters.additionalProperties = false
parameters.required = allPropertyNames
// Handle nested objects
for (const [_, prop] of Object.entries(properties)) {
if (prop.type === 'object') {
prop.additionalProperties = false
if (prop.properties) {
prop.required = Object.keys(prop.properties)
}
}
}
}
const functionTool: OpenAI.Beta.FunctionTool = {
type: 'function',
function: {
name: tool.name,
description: tool.description,
parameters: zodToJsonSchema(tool.schema)
parameters
}
}
// Add strict property if the tool is marked as strict
if (tool instanceof DynamicStructuredTool && tool.isStrict()) {
;(functionTool.function as any).strict = true
}
return functionTool
}
module.exports = { nodeClass: OpenAIAssistant_Agents }

View File

@ -24,7 +24,7 @@ import {
IUsedTool,
IVisionChatModal
} from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { ConsoleCallbackHandler, CustomChainHandler, CustomStreamingHandler, additionalCallbacks } from '../../../src/handler'
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
@ -101,6 +101,15 @@ class ToolAgent_Agents implements INode {
type: 'number',
optional: true,
additionalParams: true
},
{
label: 'Enable Detailed Streaming',
name: 'enableDetailedStreaming',
type: 'boolean',
default: false,
description: 'Stream detailed intermediate steps during agent execution',
optional: true,
additionalParams: true
}
]
this.sessionId = fields?.sessionId
@ -113,6 +122,7 @@ class ToolAgent_Agents implements INode {
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
const memory = nodeData.inputs?.memory as FlowiseMemory
const moderations = nodeData.inputs?.inputModeration as Moderation[]
const enableDetailedStreaming = nodeData.inputs?.enableDetailedStreaming as boolean
const shouldStreamResponse = options.shouldStreamResponse
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
@ -136,6 +146,13 @@ class ToolAgent_Agents implements INode {
const loggerHandler = new ConsoleCallbackHandler(options.logger)
const callbacks = await additionalCallbacks(nodeData, options)
// Add custom streaming handler if detailed streaming is enabled
let customStreamingHandler = null
if (enableDetailedStreaming && shouldStreamResponse) {
customStreamingHandler = new CustomStreamingHandler(sseStreamer, chatId)
}
let res: ChainValues = {}
let sourceDocuments: ICommonObject[] = []
let usedTools: IUsedTool[] = []
@ -143,7 +160,14 @@ class ToolAgent_Agents implements INode {
if (shouldStreamResponse) {
const handler = new CustomChainHandler(sseStreamer, chatId)
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
const allCallbacks = [loggerHandler, handler, ...callbacks]
// Add detailed streaming handler if enabled
if (enableDetailedStreaming && customStreamingHandler) {
allCallbacks.push(customStreamingHandler)
}
res = await executor.invoke({ input }, { callbacks: allCallbacks })
if (res.sourceDocuments) {
if (sseStreamer) {
sseStreamer.streamSourceDocumentsEvent(chatId, flatten(res.sourceDocuments))
@ -174,7 +198,14 @@ class ToolAgent_Agents implements INode {
}
}
} else {
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
const allCallbacks = [loggerHandler, ...callbacks]
// Add detailed streaming handler if enabled
if (enableDetailedStreaming && customStreamingHandler) {
allCallbacks.push(customStreamingHandler)
}
res = await executor.invoke({ input }, { callbacks: allCallbacks })
if (res.sourceDocuments) {
sourceDocuments = res.sourceDocuments
}

View File

@ -0,0 +1,33 @@
import { INode, INodeParams } from '../../../src/Interface'
class Opik_Analytic implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs?: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'Opik'
this.name = 'opik'
this.version = 1.0
this.type = 'Opik'
this.icon = 'opik.png'
this.category = 'Analytic'
this.baseClasses = [this.type]
this.inputs = []
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['opikApi']
}
}
}
module.exports = { nodeClass: Opik_Analytic }

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

View File

@ -0,0 +1,44 @@
import type { CachedContentBase, CachedContent, Content } from '@google/generative-ai'
import { GoogleAICacheManager as GoogleAICacheManagerBase } from '@google/generative-ai/server'
import hash from 'object-hash'
type CacheContentOptions = Omit<CachedContentBase, 'contents'> & { contents?: Content[] }
export class GoogleAICacheManager extends GoogleAICacheManagerBase {
private ttlSeconds: number
private cachedContents: Map<string, CachedContent> = new Map()
setTtlSeconds(ttlSeconds: number) {
this.ttlSeconds = ttlSeconds
}
async lookup(options: CacheContentOptions): Promise<CachedContent | undefined> {
const { model, tools, contents } = options
if (!contents?.length) {
return undefined
}
const hashKey = hash({
model,
tools,
contents
})
if (this.cachedContents.has(hashKey)) {
return this.cachedContents.get(hashKey)
}
const { cachedContents } = await this.list()
const cachedContent = (cachedContents ?? []).find((cache) => cache.displayName === hashKey)
if (cachedContent) {
this.cachedContents.set(hashKey, cachedContent)
return cachedContent
}
const res = await this.create({
...(options as CachedContentBase),
displayName: hashKey,
ttlSeconds: this.ttlSeconds
})
this.cachedContents.set(hashKey, res)
return res
}
}
export default GoogleAICacheManager

View File

@ -0,0 +1,34 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<mask id="mask0_42_15021" style="mask-type:alpha" maskUnits="userSpaceOnUse" x="4" y="4" width="24" height="24">
<path d="M16.9976 4.93059C16.9611 4.40651 16.5253 4 16 4C15.4747 4 15.0389 4.40651 15.0024 4.93059L14.951 5.66926C14.6048 10.645 10.645 14.6048 5.66926 14.951L4.93059 15.0024C4.40651 15.0389 4 15.4747 4 16C4 16.5253 4.40651 16.9611 4.93059 16.9976L5.66926 17.049C10.645 17.3952 14.6048 21.355 14.951 26.3307L15.0024 27.0694C15.0389 27.5935 15.4747 28 16 28C16.5253 28 16.9611 27.5935 16.9976 27.0694L17.049 26.3307C17.3952 21.355 21.355 17.3952 26.3307 17.049L27.0694 16.9976C27.5935 16.9611 28 16.5253 28 16C28 15.4747 27.5935 15.0389 27.0694 15.0024L26.3307 14.951C21.355 14.6048 17.3952 10.645 17.049 5.66926L16.9976 4.93059Z" fill="black"/>
</mask>
<g mask="url(#mask0_42_15021)">
<path d="M16.9976 4.93059C16.9611 4.40651 16.5253 4 16 4C15.4747 4 15.0389 4.40651 15.0024 4.93059L14.951 5.66926C14.6048 10.645 10.645 14.6048 5.66926 14.951L4.93059 15.0024C4.40651 15.0389 4 15.4747 4 16C4 16.5253 4.40651 16.9611 4.93059 16.9976L5.66926 17.049C10.645 17.3952 14.6048 21.355 14.951 26.3307L15.0024 27.0694C15.0389 27.5935 15.4747 28 16 28C16.5253 28 16.9611 27.5935 16.9976 27.0694L17.049 26.3307C17.3952 21.355 21.355 17.3952 26.3307 17.049L27.0694 16.9976C27.5935 16.9611 28 16.5253 28 16C28 15.4747 27.5935 15.0389 27.0694 15.0024L26.3307 14.951C21.355 14.6048 17.3952 10.645 17.049 5.66926L16.9976 4.93059Z" fill="white"/>
<g filter="url(#filter0_f_42_15021)">
<circle cx="10.4616" cy="13.2307" r="8.30769" fill="#77B6E5"/>
</g>
<g filter="url(#filter1_f_42_15021)">
<circle cx="16" cy="22.4615" r="8.30769" fill="#1E90C9"/>
</g>
<g filter="url(#filter2_f_42_15021)">
<ellipse cx="21.5385" cy="10.4615" rx="10.1538" ry="8.30769" fill="#E9E5DF"/>
</g>
</g>
<defs>
<filter id="filter0_f_42_15021" x="-7.84613" y="-5.07697" width="36.6154" height="36.6154" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
<feBlend mode="normal" in="SourceGraphic" in2="BackgroundImageFix" result="shape"/>
<feGaussianBlur stdDeviation="5" result="effect1_foregroundBlur_42_15021"/>
</filter>
<filter id="filter1_f_42_15021" x="-0.307678" y="6.15381" width="32.6154" height="32.6154" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
<feBlend mode="normal" in="SourceGraphic" in2="BackgroundImageFix" result="shape"/>
<feGaussianBlur stdDeviation="4" result="effect1_foregroundBlur_42_15021"/>
</filter>
<filter id="filter2_f_42_15021" x="3.38464" y="-5.84619" width="36.3077" height="32.6154" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
<feBlend mode="normal" in="SourceGraphic" in2="BackgroundImageFix" result="shape"/>
<feGaussianBlur stdDeviation="4" result="effect1_foregroundBlur_42_15021"/>
</filter>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 3.0 KiB

View File

@ -0,0 +1,53 @@
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
import FlowiseGoogleAICacheManager from './FlowiseGoogleAICacheManager'
class GoogleGenerativeAIContextCache implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
constructor() {
this.label = 'Google GenAI Context Cache'
this.name = 'googleGenerativeAIContextCache'
this.version = 1.0
this.type = 'GoogleAICacheManager'
this.description = 'Large context cache for Google Gemini large language models'
this.icon = 'GoogleGemini.svg'
this.category = 'Cache'
this.baseClasses = [this.type, ...getBaseClasses(FlowiseGoogleAICacheManager)]
this.inputs = [
{
label: 'TTL',
name: 'ttl',
type: 'number',
default: 60 * 60 * 24 * 30
}
]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['googleGenerativeAI'],
optional: false,
description: 'Google Generative AI credential.'
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const ttl = nodeData.inputs?.ttl as number
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData)
const manager = new FlowiseGoogleAICacheManager(apiKey)
manager.setTtlSeconds(ttl)
return manager
}
}
module.exports = { nodeClass: GoogleGenerativeAIContextCache }

View File

@ -126,10 +126,19 @@ const getRedisClient = async (nodeData: INodeData, options: ICommonObject) => {
host,
username,
password,
keepAlive:
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
: undefined,
...tlsOptions
})
} else {
client = new Redis(redisUrl)
client = new Redis(redisUrl, {
keepAlive:
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
: undefined
})
}
return client

View File

@ -83,10 +83,19 @@ class RedisEmbeddingsCache implements INode {
host,
username,
password,
keepAlive:
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
: undefined,
...tlsOptions
})
} else {
client = new Redis(redisUrl)
client = new Redis(redisUrl, {
keepAlive:
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
: undefined
})
}
ttl ??= '3600'

View File

@ -92,6 +92,21 @@ export class APIChain extends BaseChain implements APIChainInput {
const { url, data } = JSON.parse(api_url_body)
// Validate request is not to internal/private networks
const urlObj = new URL(url)
const hostname = urlObj.hostname
if (
hostname === 'localhost' ||
hostname === '127.0.0.1' ||
hostname.startsWith('192.168.') ||
hostname.startsWith('10.') ||
hostname.startsWith('172.16.') ||
hostname.includes('internal')
) {
throw new Error('Access to internal networks is not allowed')
}
const res = await fetch(url, {
method: 'POST',
headers: this.headers,

View File

@ -27,7 +27,7 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
}
setVisionModel(): void {
if (!this.model.startsWith('claude-3')) {
if (!this.model.includes('claude-3')) {
this.model = DEFAULT_IMAGE_MODEL
this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : DEFAULT_IMAGE_MAX_TOKEN
}

View File

@ -237,7 +237,7 @@ class AzureChatOpenAI_ChatModels implements INode {
console.error('Error parsing base options', exception)
}
}
if (modelName === 'o3-mini') {
if (modelName === 'o3-mini' || modelName.includes('o1')) {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {

View File

@ -21,7 +21,7 @@ class ChatAnthropic_ChatModels implements INode {
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic'
this.version = 7.0
this.version = 8.0
this.type = 'ChatAnthropic'
this.icon = 'Anthropic.svg'
this.category = 'Chat Models'
@ -87,6 +87,24 @@ class ChatAnthropic_ChatModels implements INode {
optional: true,
additionalParams: true
},
{
label: 'Extended Thinking',
name: 'extendedThinking',
type: 'boolean',
description: 'Enable extended thinking for reasoning model such as Claude Sonnet 3.7',
optional: true,
additionalParams: true
},
{
label: 'Budget Tokens',
name: 'budgetTokens',
type: 'number',
step: 1,
default: 1024,
description: 'Maximum number of tokens Claude is allowed use for its internal reasoning process',
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
@ -114,6 +132,8 @@ class ChatAnthropic_ChatModels implements INode {
const topK = nodeData.inputs?.topK as string
const streaming = nodeData.inputs?.streaming as boolean
const cache = nodeData.inputs?.cache as BaseCache
const extendedThinking = nodeData.inputs?.extendedThinking as boolean
const budgetTokens = nodeData.inputs?.budgetTokens as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
@ -131,6 +151,13 @@ class ChatAnthropic_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseFloat(topK)
if (cache) obj.cache = cache
if (extendedThinking) {
obj.thinking = {
type: 'enabled',
budget_tokens: parseInt(budgetTokens, 10)
}
delete obj.temperature
}
const multiModalOption: IMultiModalOption = {
image: {

View File

@ -5,6 +5,7 @@ import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue,
import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI'
import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager'
class GoogleGenerativeAI_ChatModels implements INode {
label: string
@ -42,6 +43,12 @@ class GoogleGenerativeAI_ChatModels implements INode {
type: 'BaseCache',
optional: true
},
{
label: 'Context Cache',
name: 'contextCache',
type: 'GoogleAICacheManager',
optional: true
},
{
label: 'Model Name',
name: 'modelName',
@ -55,7 +62,8 @@ class GoogleGenerativeAI_ChatModels implements INode {
type: 'string',
placeholder: 'gemini-1.5-pro-exp-0801',
description: 'Custom model name to use. If provided, it will override the model selected',
additionalParams: true
additionalParams: true,
optional: true
},
{
label: 'Temperature',
@ -156,6 +164,14 @@ class GoogleGenerativeAI_ChatModels implements INode {
optional: true,
additionalParams: true
},
{
label: 'Base URL',
name: 'baseUrl',
type: 'string',
description: 'Base URL for the API. Leave empty to use the default.',
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
@ -188,7 +204,9 @@ class GoogleGenerativeAI_ChatModels implements INode {
const harmCategory = nodeData.inputs?.harmCategory as string
const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
const cache = nodeData.inputs?.cache as BaseCache
const contextCache = nodeData.inputs?.contextCache as FlowiseGoogleAICacheManager
const streaming = nodeData.inputs?.streaming as boolean
const baseUrl = nodeData.inputs?.baseUrl as string | undefined
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
@ -203,6 +221,7 @@ class GoogleGenerativeAI_ChatModels implements INode {
if (topK) obj.topK = parseFloat(topK)
if (cache) obj.cache = cache
if (temperature) obj.temperature = parseFloat(temperature)
if (baseUrl) obj.baseUrl = baseUrl
// Safety Settings
let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
@ -225,6 +244,7 @@ class GoogleGenerativeAI_ChatModels implements INode {
const model = new ChatGoogleGenerativeAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
if (contextCache) model.setContextCache(contextCache)
return model
}

View File

@ -25,6 +25,7 @@ import { StructuredToolInterface } from '@langchain/core/tools'
import { isStructuredTool } from '@langchain/core/utils/function_calling'
import { zodToJsonSchema } from 'zod-to-json-schema'
import { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager'
const DEFAULT_IMAGE_MAX_TOKEN = 8192
const DEFAULT_IMAGE_MODEL = 'gemini-1.5-flash-latest'
@ -80,12 +81,16 @@ class LangchainChatGoogleGenerativeAI
apiKey?: string
baseUrl?: string
streaming = false
streamUsage = true
private client: GenerativeModel
private contextCache?: FlowiseGoogleAICacheManager
get _isMultimodalModel() {
return this.modelName.includes('vision') || this.modelName.startsWith('gemini-1.5')
}
@ -147,20 +152,33 @@ class LangchainChatGoogleGenerativeAI
this.getClient()
}
getClient(tools?: Tool[]) {
this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel({
model: this.modelName,
tools,
safetySettings: this.safetySettings as SafetySetting[],
generationConfig: {
candidateCount: 1,
stopSequences: this.stopSequences,
maxOutputTokens: this.maxOutputTokens,
temperature: this.temperature,
topP: this.topP,
topK: this.topK
async getClient(prompt?: Content[], tools?: Tool[]) {
this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel(
{
model: this.modelName,
tools,
safetySettings: this.safetySettings as SafetySetting[],
generationConfig: {
candidateCount: 1,
stopSequences: this.stopSequences,
maxOutputTokens: this.maxOutputTokens,
temperature: this.temperature,
topP: this.topP,
topK: this.topK
}
},
{
baseUrl: this.baseUrl
}
})
)
if (this.contextCache) {
const cachedContent = await this.contextCache.lookup({
contents: prompt ? [{ ...prompt[0], parts: prompt[0].parts.slice(0, 1) }] : [],
model: this.modelName,
tools
})
this.client.cachedContent = cachedContent as any
}
}
_combineLLMOutput() {
@ -209,6 +227,16 @@ class LangchainChatGoogleGenerativeAI
}
}
setContextCache(contextCache: FlowiseGoogleAICacheManager): void {
this.contextCache = contextCache
}
async getNumTokens(prompt: BaseMessage[]) {
const contents = convertBaseMessagesToContent(prompt, this._isMultimodalModel)
const { totalTokens } = await this.client.countTokens({ contents })
return totalTokens
}
async _generateNonStreaming(
prompt: Content[],
options: this['ParsedCallOptions'],
@ -220,9 +248,9 @@ class LangchainChatGoogleGenerativeAI
this.convertFunctionResponse(prompt)
if (tools.length > 0) {
this.getClient(tools as Tool[])
await this.getClient(prompt, tools as Tool[])
} else {
this.getClient()
await this.getClient(prompt)
}
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
let output
@ -290,9 +318,9 @@ class LangchainChatGoogleGenerativeAI
const tools = options.tools ?? []
if (tools.length > 0) {
this.getClient(tools as Tool[])
await this.getClient(prompt, tools as Tool[])
} else {
this.getClient()
await this.getClient(prompt)
}
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
@ -394,24 +422,18 @@ function getMessageAuthor(message: BaseMessage) {
}
function convertAuthorToRole(author: string) {
switch (author) {
/**
* Note: Gemini currently is not supporting system messages
* we will convert them to human messages and merge with following
* */
switch (author.toLowerCase()) {
case 'ai':
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
case 'assistant':
case 'model':
return 'model'
case 'system':
case 'human':
return 'user'
case 'function':
case 'tool':
return 'function'
case 'system':
case 'human':
default:
// Instead of throwing, we return model (Needed for Multi Agent)
// throw new Error(`Unknown / unsupported author: ${author}`)
return 'model'
return 'user'
}
}
@ -499,17 +521,29 @@ function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: b
function checkIfEmptyContentAndSameRole(contents: Content[]) {
let prevRole = ''
const removedContents: Content[] = []
const validContents: Content[] = []
for (const content of contents) {
const role = content.role
if (content.parts.length && content.parts[0].text === '' && role === prevRole) {
removedContents.push(content)
// Skip only if completely empty
if (!content.parts || !content.parts.length) {
continue
}
prevRole = role
// Ensure role is always either 'user' or 'model'
content.role = content.role === 'model' ? 'model' : 'user'
// Handle consecutive messages
if (content.role === prevRole && validContents.length > 0) {
// Merge with previous content if same role
validContents[validContents.length - 1].parts.push(...content.parts)
continue
}
validContents.push(content)
prevRole = content.role
}
return contents.filter((content) => !removedContents.includes(content))
return validContents
}
function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
@ -547,7 +581,7 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel
}
}
let actualRole = role
if (actualRole === 'function') {
if (actualRole === 'function' || actualRole === 'tool') {
// GenerativeAI API will throw an error if the role is not "user" or "model."
actualRole = 'user'
}
@ -649,13 +683,39 @@ function zodToGeminiParameters(zodObj: any) {
const jsonSchema: any = zodToJsonSchema(zodObj)
// eslint-disable-next-line unused-imports/no-unused-vars
const { $schema, additionalProperties, ...rest } = jsonSchema
// Ensure all properties have type specified
if (rest.properties) {
Object.keys(rest.properties).forEach((key) => {
if (rest.properties[key].enum?.length) {
rest.properties[key] = { type: 'string', format: 'enum', enum: rest.properties[key].enum }
const prop = rest.properties[key]
// Handle enum types
if (prop.enum?.length) {
rest.properties[key] = {
type: 'string',
format: 'enum',
enum: prop.enum
}
}
// Handle missing type
else if (!prop.type && !prop.oneOf && !prop.anyOf && !prop.allOf) {
// Infer type from other properties
if (prop.minimum !== undefined || prop.maximum !== undefined) {
prop.type = 'number'
} else if (prop.format === 'date-time') {
prop.type = 'string'
} else if (prop.items) {
prop.type = 'array'
} else if (prop.properties) {
prop.type = 'object'
} else {
// Default to string if type can't be inferred
prop.type = 'string'
}
}
})
}
return rest
}

View File

@ -99,7 +99,8 @@ class GoogleVertexAI_ChatModels implements INode {
type: 'string',
placeholder: 'gemini-1.5-pro-exp-0801',
description: 'Custom model name to use. If provided, it will override the model selected',
additionalParams: true
additionalParams: true,
optional: true
},
{
label: 'Temperature',

View File

@ -0,0 +1,135 @@
import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
class ChatLitellm_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatLitellm'
this.name = 'chatLitellm'
this.version = 1.0
this.type = 'ChatLitellm'
this.icon = 'litellm.jpg'
this.category = 'Chat Models'
this.description = 'Connect to a Litellm server using OpenAI-compatible API'
this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(ChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['litellmApi'],
optional: true
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Base URL',
name: 'basePath',
type: 'string',
placeholder: 'http://localhost:8000'
},
{
label: 'Model Name',
name: 'modelName',
type: 'string',
placeholder: 'model_name'
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Streaming',
name: 'streaming',
type: 'boolean',
default: true,
optional: true,
additionalParams: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top P',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const cache = nodeData.inputs?.cache as BaseCache
const basePath = nodeData.inputs?.basePath as string
const modelName = nodeData.inputs?.modelName as string
const temperature = nodeData.inputs?.temperature as string
const streaming = nodeData.inputs?.streaming as boolean
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('litellmApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> &
BaseLLMParams & { openAIApiKey?: string } & { configuration?: { baseURL?: string; defaultHeaders?: ICommonObject } } = {
temperature: parseFloat(temperature),
modelName,
streaming: streaming ?? true
}
if (basePath) {
obj.configuration = {
baseURL: basePath
}
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (apiKey) obj.openAIApiKey = apiKey
const model = new ChatOpenAI(obj)
return model
}
}
module.exports = { nodeClass: ChatLitellm_ChatModels }

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

View File

@ -1,5 +1,5 @@
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -16,19 +16,19 @@ class ChatNvdiaNIM_ChatModels implements INode {
inputs: INodeParams[]
constructor() {
this.label = 'Chat Nvdia NIM'
this.name = 'chatNvdiaNIM'
this.version = 1.0
this.type = 'ChatNvdiaNIM'
this.label = 'Chat NVIDIA NIM'
this.name = 'Chat NVIDIA NIM'
this.version = 1.1
this.type = 'Chat NVIDIA NIM'
this.icon = 'nvdia.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Nvdia NIM Inference API'
this.description = 'Wrapper around NVIDIA NIM Inference API'
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['nvdiaNIMApi'],
credentialNames: ['nvidiaNIMApi'],
optional: true
}
this.inputs = [
@ -44,6 +44,13 @@ class ChatNvdiaNIM_ChatModels implements INode {
type: 'string',
placeholder: 'microsoft/phi-3-mini-4k-instruct'
},
{
label: 'Base Path',
name: 'basePath',
type: 'string',
description: 'Specify the URL of the deployed NIM Inference API',
placeholder: 'https://integrate.api.nvidia.com/v1'
},
{
label: 'Temperature',
name: 'temperature',
@ -52,13 +59,6 @@ class ChatNvdiaNIM_ChatModels implements INode {
default: 0.9,
optional: true
},
{
label: 'Base Path',
name: 'basePath',
type: 'string',
description: 'Specify the URL of the deployed NIM Inference API',
placeholder: 'https://integrate.api.nvidia.com/v1'
},
{
label: 'Streaming',
name: 'streaming',
@ -131,12 +131,12 @@ class ChatNvdiaNIM_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData)
const nvidiaNIMApiKey = getCredentialParam('nvidiaNIMApiKey', credentialData, nodeData)
const obj: ChatOpenAIFields & { nvdiaNIMApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: nvdiaNIMApiKey,
openAIApiKey: nvidiaNIMApiKey ?? 'sk-',
streaming: streaming ?? true
}
@ -153,7 +153,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
try {
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
} catch (exception) {
throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception)
throw new Error("Invalid JSON in the Chat NVIDIA NIM's baseOptions: " + exception)
}
}

View File

@ -21,7 +21,7 @@ class ChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI'
this.version = 8.1
this.version = 8.2
this.type = 'ChatOpenAI'
this.icon = 'openai.svg'
this.category = 'Chat Models'
@ -172,7 +172,9 @@ class ChatOpenAI_ChatModels implements INode {
],
default: 'low',
optional: false,
additionalParams: true
show: {
allowImageUploads: true
}
},
{
label: 'Reasoning Effort',
@ -241,7 +243,7 @@ class ChatOpenAI_ChatModels implements INode {
streaming: streaming ?? true
}
if (modelName.includes('o3')) {
if (modelName.includes('o3') || modelName.includes('o1')) {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {

View File

@ -0,0 +1,237 @@
import { ChatPerplexity as LangchainChatPerplexity, PerplexityChatInput } from '@langchain/community/chat_models/perplexity'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatPerplexity } from './FlowiseChatPerplexity'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
class ChatPerplexity_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'ChatPerplexity'
this.name = 'chatPerplexity'
this.version = 0.1
this.type = 'ChatPerplexity'
this.icon = 'perplexity.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Perplexity large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatPerplexity)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['perplexityApi']
}
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'Model Name',
name: 'model',
type: 'asyncOptions',
loadMethod: 'listModels',
default: 'sonar'
},
{
label: 'Temperature',
name: 'temperature',
type: 'number',
step: 0.1,
default: 1,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Top P',
name: 'topP',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Top K',
name: 'topK',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Presence Penalty',
name: 'presencePenalty',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Frequency Penalty',
name: 'frequencyPenalty',
type: 'number',
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Streaming',
name: 'streaming',
type: 'boolean',
default: true,
optional: true,
additionalParams: true
},
{
label: 'Timeout',
name: 'timeout',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
// {
// label: 'Search Domain Filter',
// name: 'searchDomainFilter',
// type: 'json',
// optional: true,
// additionalParams: true,
// description: 'Limit citations to URLs from specified domains (e.g., ["example.com", "anotherexample.org"])'
// },
// Currently disabled as output is stored as additional_kwargs
// {
// label: 'Return Images',
// name: 'returnImages',
// type: 'boolean',
// optional: true,
// additionalParams: true,
// description: 'Whether the model should return images (if supported by the model)'
// },
// Currently disabled as output is stored as additional_kwargs
// {
// label: 'Return Related Questions',
// name: 'returnRelatedQuestions',
// type: 'boolean',
// optional: true,
// additionalParams: true,
// description: 'Whether the online model should return related questions'
// },
// {
// label: 'Search Recency Filter',
// name: 'searchRecencyFilter',
// type: 'options',
// options: [
// { label: 'Not Set', name: '' },
// { label: 'Month', name: 'month' },
// { label: 'Week', name: 'week' },
// { label: 'Day', name: 'day' },
// { label: 'Hour', name: 'hour' }
// ],
// default: '',
// optional: true,
// additionalParams: true,
// description: 'Filter search results by time interval (does not apply to images)'
// },
{
label: 'Proxy Url',
name: 'proxyUrl',
type: 'string',
optional: true,
additionalParams: true
}
// LangchainJS currently does not has a web_search_options, search_after_date_filter or search_before_date_filter parameter.
// To add web_search_options (user_location, search_context_size) and search_after_date_filter, search_before_date_filter as a modelKwargs parameter.
]
}
//@ts-ignore
loadMethods = {
async listModels(): Promise<INodeOptionsValue[]> {
return await getModels(MODEL_TYPE.CHAT, 'chatPerplexity')
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const model = nodeData.inputs?.model as string
const temperature = nodeData.inputs?.temperature as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
const topK = nodeData.inputs?.topK as string
const presencePenalty = nodeData.inputs?.presencePenalty as string
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const streaming = nodeData.inputs?.streaming as boolean
const timeout = nodeData.inputs?.timeout as string
const searchDomainFilterRaw = nodeData.inputs?.searchDomainFilter
const returnImages = nodeData.inputs?.returnImages as boolean
const returnRelatedQuestions = nodeData.inputs?.returnRelatedQuestions as boolean
const searchRecencyFilter = nodeData.inputs?.searchRecencyFilter as string
const proxyUrl = nodeData.inputs?.proxyUrl as string
const cache = nodeData.inputs?.cache as BaseCache
if (nodeData.inputs?.credentialId) {
nodeData.credential = nodeData.inputs?.credentialId
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('perplexityApiKey', credentialData, nodeData)
if (!apiKey) {
throw new Error('Perplexity API Key missing from credential')
}
const obj: PerplexityChatInput = {
model,
apiKey,
streaming: streaming ?? true
}
if (temperature) obj.temperature = parseFloat(temperature)
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (topK) obj.topK = parseInt(topK, 10)
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (returnImages) obj.returnImages = returnImages
if (returnRelatedQuestions) obj.returnRelatedQuestions = returnRelatedQuestions
if (searchRecencyFilter && searchRecencyFilter !== '') obj.searchRecencyFilter = searchRecencyFilter
if (cache) obj.cache = cache
if (searchDomainFilterRaw) {
try {
obj.searchDomainFilter =
typeof searchDomainFilterRaw === 'object' ? searchDomainFilterRaw : JSON.parse(searchDomainFilterRaw)
} catch (exception) {
throw new Error('Invalid JSON in Search Domain Filter: ' + exception)
}
}
if (proxyUrl) {
console.warn('Proxy configuration for ChatPerplexity might require adjustments to FlowiseChatPerplexity wrapper.')
}
const perplexityModel = new ChatPerplexity(nodeData.id, obj)
return perplexityModel
}
}
module.exports = { nodeClass: ChatPerplexity_ChatModels }

View File

@ -0,0 +1,32 @@
import { ChatPerplexity as LangchainChatPerplexity, type PerplexityChatInput } from '@langchain/community/chat_models/perplexity'
import { IMultiModalOption, IVisionChatModal } from '../../../src'
// Extend the Langchain ChatPerplexity class to include Flowise-specific properties and methods
export class ChatPerplexity extends LangchainChatPerplexity implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string
constructor(id: string, fields: PerplexityChatInput) {
super(fields)
this.id = id
this.configuredModel = fields?.model ?? '' // Use model from fields
this.configuredMaxToken = fields?.maxTokens
}
// Method to revert to the original model configuration
revertToOriginalModel(): void {
this.model = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
// Method to set multimodal options
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
// pass
}
}

View File

@ -0,0 +1,8 @@
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
viewBox="0 0 48 48" style="enable-background:new 0 0 48 48;" xml:space="preserve">
<path style="fill-rule:evenodd;clip-rule:evenodd;fill:black;" d="M11.469,4l11.39,10.494v-0.002V4.024h2.217v10.517
L36.518,4v11.965h4.697v17.258h-4.683v10.654L25.077,33.813v10.18h-2.217V33.979L11.482,44V33.224H6.785V15.965h4.685V4z
M21.188,18.155H9.002v12.878h2.477v-4.062L21.188,18.155z M13.699,27.943v11.17l9.16-8.068V19.623L13.699,27.943z M25.141,30.938
V19.612l9.163,8.321v5.291h0.012v5.775L25.141,30.938z M36.532,31.033h2.466V18.155H26.903l9.629,8.725V31.033z M34.301,15.965
V9.038l-7.519,6.927H34.301z M21.205,15.965h-7.519V9.038L21.205,15.965z"/>
</svg>

After

Width:  |  Height:  |  Size: 775 B

View File

@ -48,6 +48,14 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
step: 0.1,
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
@ -62,7 +70,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const groqApiKey = getCredentialParam('groqApiKey', credentialData, nodeData)
@ -71,7 +79,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
model: modelName,
apiKey: groqApiKey
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
const model = new Groq(obj)
return model
}

View File

@ -54,6 +54,14 @@ class Groq_ChatModels implements INode {
default: 0.9,
optional: true
},
{
label: 'Max Tokens',
name: 'maxTokens',
type: 'number',
step: 1,
optional: true,
additionalParams: true
},
{
label: 'Streaming',
name: 'streaming',
@ -73,6 +81,7 @@ class Groq_ChatModels implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string
const cache = nodeData.inputs?.cache as BaseCache
const temperature = nodeData.inputs?.temperature as string
const streaming = nodeData.inputs?.streaming as boolean
@ -86,6 +95,7 @@ class Groq_ChatModels implements INode {
apiKey: groqApiKey,
streaming: streaming ?? true
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (cache) obj.cache = cache
const model = new ChatGroq(obj)

View File

@ -47,7 +47,7 @@ class File_DocumentLoaders implements INode {
},
{
label: 'Pdf Usage',
name: 'pdfUsage',
name: 'usage',
type: 'options',
description: 'Only when loading PDF files',
options: [
@ -64,6 +64,14 @@ class File_DocumentLoaders implements INode {
optional: true,
additionalParams: true
},
{
label: 'Use Legacy Build',
name: 'legacyBuild',
type: 'boolean',
description: 'Use legacy build for PDF compatibility issues',
optional: true,
additionalParams: true
},
{
label: 'JSONL Pointer Extraction',
name: 'pointerName',
@ -113,7 +121,8 @@ class File_DocumentLoaders implements INode {
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const fileBase64 = nodeData.inputs?.file as string
const metadata = nodeData.inputs?.metadata
const pdfUsage = nodeData.inputs?.pdfUsage
const pdfUsage = nodeData.inputs?.pdfUsage || nodeData.inputs?.usage
const legacyBuild = nodeData.inputs?.legacyBuild as boolean
const pointerName = nodeData.inputs?.pointerName as string
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
const output = nodeData.outputs?.output as string
@ -173,10 +182,21 @@ class File_DocumentLoaders implements INode {
const match = file.match(/^data:([A-Za-z-+\/]+);base64,/)
if (!match) {
fileBlobs.push({
blob,
ext: extension
})
// Fallback: check if there's a filename pattern at the end
const filenameMatch = file.match(/,filename:(.+\.\w+)$/)
if (filenameMatch && filenameMatch[1]) {
const filename = filenameMatch[1]
const fileExt = filename.split('.').pop() || ''
fileBlobs.push({
blob,
ext: fileExt
})
} else {
fileBlobs.push({
blob,
ext: extension
})
}
} else {
const mimeType = match[1]
fileBlobs.push({
@ -199,9 +219,18 @@ class File_DocumentLoaders implements INode {
pdf: (blob) =>
pdfUsage === 'perFile'
? // @ts-ignore
new PDFLoader(blob, { splitPages: false, pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') })
new PDFLoader(blob, {
splitPages: false,
pdfjs: () =>
// @ts-ignore
legacyBuild ? import('pdfjs-dist/legacy/build/pdf.js') : import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js')
})
: // @ts-ignore
new PDFLoader(blob, { pdfjs: () => import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js') }),
new PDFLoader(blob, {
pdfjs: () =>
// @ts-ignore
legacyBuild ? import('pdfjs-dist/legacy/build/pdf.js') : import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js')
}),
'': (blob) => new TextLoader(blob)
})
let docs = []

View File

@ -61,6 +61,24 @@ class Github_DocumentLoaders implements INode {
optional: true,
additionalParams: true
},
{
label: 'Github Base URL',
name: 'githubBaseUrl',
type: 'string',
placeholder: `https://git.example.com`,
description: 'Custom Github Base Url (e.g. Enterprise)',
optional: true,
additionalParams: true
},
{
label: 'Github Instance API',
name: 'githubInstanceApi',
type: 'string',
placeholder: `https://api.github.com`,
description: 'Custom Github API Url (e.g. Enterprise)',
optional: true,
additionalParams: true
},
{
label: 'Ignore Paths',
name: 'ignorePath',
@ -134,6 +152,8 @@ class Github_DocumentLoaders implements INode {
const ignorePath = nodeData.inputs?.ignorePath as string
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
const output = nodeData.outputs?.output as string
const githubInstanceApi = nodeData.inputs?.githubInstanceApi as string
const githubBaseUrl = nodeData.inputs?.githubBaseUrl as string
let omitMetadataKeys: string[] = []
if (_omitMetadataKeys) {
@ -153,6 +173,12 @@ class Github_DocumentLoaders implements INode {
if (maxConcurrency) githubOptions.maxConcurrency = parseInt(maxConcurrency, 10)
if (maxRetries) githubOptions.maxRetries = parseInt(maxRetries, 10)
if (ignorePath) githubOptions.ignorePaths = JSON.parse(ignorePath)
if (githubInstanceApi) {
githubOptions.apiUrl = githubInstanceApi.endsWith('/') ? githubInstanceApi.slice(0, -1) : githubInstanceApi
}
if (githubBaseUrl) {
githubOptions.baseUrl = githubBaseUrl.endsWith('/') ? githubBaseUrl.slice(0, -1) : githubBaseUrl
}
const loader = new GithubRepoLoader(repoLink, githubOptions)

View File

@ -0,0 +1,194 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { JiraProjectLoaderParams, JiraProjectLoader } from '@langchain/community/document_loaders/web/jira'
import { getCredentialData, getCredentialParam, handleEscapeCharacters, INodeOutputsValue } from '../../../src'
class Jira_DocumentLoaders implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Jira'
this.name = 'jira'
this.version = 1.0
this.type = 'Document'
this.icon = 'jira.svg'
this.category = 'Document Loaders'
this.description = `Load issues from Jira`
this.baseClasses = [this.type]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
description: 'Jira API Credential',
credentialNames: ['jiraApi']
}
this.inputs = [
{
label: 'Host',
name: 'host',
type: 'string',
placeholder: 'https://jira.example.com'
},
{
label: 'Project Key',
name: 'projectKey',
type: 'string',
default: 'main'
},
{
label: 'Limit per request',
name: 'limitPerRequest',
type: 'number',
step: 1,
optional: true,
placeholder: '100'
},
{
label: 'Created after',
name: 'createdAfter',
type: 'string',
optional: true,
placeholder: '2024-01-01'
},
{
label: 'Text Splitter',
name: 'textSplitter',
type: 'TextSplitter',
optional: true
},
{
label: 'Additional Metadata',
name: 'metadata',
type: 'json',
description: 'Additional metadata to be added to the extracted documents',
optional: true,
additionalParams: true
},
{
label: 'Omit Metadata Keys',
name: 'omitMetadataKeys',
type: 'string',
rows: 4,
description:
'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, seperated by comma. Use * to omit all metadata keys execept the ones you specify in the Additional Metadata field',
placeholder: 'key1, key2, key3.nestedKey1',
optional: true,
additionalParams: true
}
]
this.outputs = [
{
label: 'Document',
name: 'document',
description: 'Array of document objects containing metadata and pageContent',
baseClasses: [...this.baseClasses, 'json']
},
{
label: 'Text',
name: 'text',
description: 'Concatenated string from pageContent of documents',
baseClasses: ['string', 'json']
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const host = nodeData.inputs?.host as string
const projectKey = nodeData.inputs?.projectKey as string
const limitPerRequest = nodeData.inputs?.limitPerRequest as string
const createdAfter = nodeData.inputs?.createdAfter as string
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
const metadata = nodeData.inputs?.metadata
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
const output = nodeData.outputs?.output as string
let omitMetadataKeys: string[] = []
if (_omitMetadataKeys) {
omitMetadataKeys = _omitMetadataKeys.split(',').map((key) => key.trim())
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const username = getCredentialParam('username', credentialData, nodeData)
const accessToken = getCredentialParam('accessToken', credentialData, nodeData)
const jiraOptions: JiraProjectLoaderParams = {
projectKey,
host,
username,
accessToken
}
if (limitPerRequest) {
jiraOptions.limitPerRequest = parseInt(limitPerRequest)
}
if (createdAfter) {
jiraOptions.createdAfter = new Date(createdAfter)
}
const loader = new JiraProjectLoader(jiraOptions)
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)
docs = docs.map((doc) => ({
...doc,
metadata:
_omitMetadataKeys === '*'
? {
...parsedMetadata
}
: omit(
{
...doc.metadata,
...parsedMetadata
},
omitMetadataKeys
)
}))
} else {
docs = docs.map((doc) => ({
...doc,
metadata:
_omitMetadataKeys === '*'
? {}
: omit(
{
...doc.metadata
},
omitMetadataKeys
)
}))
}
if (output === 'document') {
return docs
} else {
let finaltext = ''
for (const doc of docs) {
finaltext += `${doc.pageContent}\n`
}
return handleEscapeCharacters(finaltext, false)
}
}
}
module.exports = { nodeClass: Jira_DocumentLoaders }

View File

@ -0,0 +1,2 @@
<?xml version="1.0" encoding="utf-8"?><!-- Uploaded to: SVG Repo, www.svgrepo.com, Generator: SVG Repo Mixer Tools -->
<svg width="800px" height="800px" viewBox="0 0 48 48" xmlns="http://www.w3.org/2000/svg"><defs><style>.a{fill:none;stroke:#000000;stroke-linecap:round;stroke-linejoin:round;}</style></defs><path class="a" d="M5.5,22.9722h0a8.7361,8.7361,0,0,0,8.7361,8.7361h2.0556v2.0556A8.7361,8.7361,0,0,0,25.0278,42.5h0V22.9722Z"/><path class="a" d="M14.2361,14.2361h0a8.7361,8.7361,0,0,0,8.7361,8.7361h2.0556v2.0556a8.7361,8.7361,0,0,0,8.7361,8.7361h0V14.2361Z"/><path class="a" d="M22.9722,5.5h0a8.7361,8.7361,0,0,0,8.7361,8.7361h2.0556v2.0556A8.7361,8.7361,0,0,0,42.5,25.0278h0V5.5Z"/></svg>

After

Width:  |  Height:  |  Size: 699 B

View File

@ -0,0 +1,375 @@
import { Mem0Memory as BaseMem0Memory, Mem0MemoryInput, ClientOptions } from '@mem0/community'
import { MemoryOptions, SearchOptions } from 'mem0ai'
import { BaseMessage } from '@langchain/core/messages'
import { InputValues, MemoryVariables, OutputValues } from '@langchain/core/memory'
import { ICommonObject, IDatabaseEntity } from '../../../src'
import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam, mapChatMessageToBaseMessage } from '../../../src/utils'
import { DataSource } from 'typeorm'
import { v4 as uuidv4 } from 'uuid'
interface BufferMemoryExtendedInput {
sessionId: string
appDataSource: DataSource
databaseEntities: IDatabaseEntity
chatflowid: string
}
class Mem0_Memory implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Mem0'
this.name = 'mem0'
this.version = 1.1
this.type = 'Mem0'
this.icon = 'mem0.svg'
this.category = 'Memory'
this.description = 'Stores and manages chat memory using Mem0 service'
this.baseClasses = [this.type, ...getBaseClasses(BaseMem0Memory)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
optional: false,
description: 'Configure API Key for Mem0 service',
credentialNames: ['mem0MemoryApi']
}
this.inputs = [
{
label: 'User ID',
name: 'user_id',
type: 'string',
description: 'Unique identifier for the user. Required only if "Use Flowise Chat ID" is OFF.',
default: 'flowise-default-user',
optional: true
},
// Added toggle to use Flowise chat ID
{
label: 'Use Flowise Chat ID',
name: 'useFlowiseChatId',
type: 'boolean',
description: 'Use the Flowise internal Chat ID as the Mem0 User ID, overriding the "User ID" field above.',
default: false,
optional: true
},
{
label: 'Search Only',
name: 'searchOnly',
type: 'boolean',
description: 'Search only mode',
default: false,
optional: true,
additionalParams: true
},
{
label: 'Run ID',
name: 'run_id',
type: 'string',
description: 'Unique identifier for the run session',
default: '',
optional: true,
additionalParams: true
},
{
label: 'Agent ID',
name: 'agent_id',
type: 'string',
description: 'Identifier for the agent',
default: '',
optional: true,
additionalParams: true
},
{
label: 'App ID',
name: 'app_id',
type: 'string',
description: 'Identifier for the application',
default: '',
optional: true,
additionalParams: true
},
{
label: 'Project ID',
name: 'project_id',
type: 'string',
description: 'Identifier for the project',
default: '',
optional: true,
additionalParams: true
},
{
label: 'Organization ID',
name: 'org_id',
type: 'string',
description: 'Identifier for the organization',
default: '',
optional: true,
additionalParams: true
},
{
label: 'Memory Key',
name: 'memoryKey',
type: 'string',
default: 'history',
optional: true,
additionalParams: true
},
{
label: 'Input Key',
name: 'inputKey',
type: 'string',
default: 'input',
optional: true,
additionalParams: true
},
{
label: 'Output Key',
name: 'outputKey',
type: 'string',
default: 'text',
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
return await initializeMem0(nodeData, options)
}
}
const initializeMem0 = async (nodeData: INodeData, options: ICommonObject): Promise<BaseMem0Memory> => {
const initialUserId = nodeData.inputs?.user_id as string
const useFlowiseChatId = nodeData.inputs?.useFlowiseChatId as boolean
if (!useFlowiseChatId && !initialUserId) {
throw new Error('User ID field cannot be empty when "Use Flowise Chat ID" is OFF.')
}
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
const mem0Options: ClientOptions = {
apiKey: apiKey,
host: nodeData.inputs?.host as string,
organizationId: nodeData.inputs?.org_id as string,
projectId: nodeData.inputs?.project_id as string
}
const memOptionsUserId = initialUserId
const constructorSessionId = initialUserId || (useFlowiseChatId ? 'flowise-chat-id-placeholder' : '')
const memoryOptions: MemoryOptions & SearchOptions = {
user_id: memOptionsUserId,
run_id: (nodeData.inputs?.run_id as string) || undefined,
agent_id: (nodeData.inputs?.agent_id as string) || undefined,
app_id: (nodeData.inputs?.app_id as string) || undefined,
project_id: (nodeData.inputs?.project_id as string) || undefined,
org_id: (nodeData.inputs?.org_id as string) || undefined,
api_version: (nodeData.inputs?.api_version as string) || undefined,
enable_graph: (nodeData.inputs?.enable_graph as boolean) || false,
metadata: (nodeData.inputs?.metadata as Record<string, any>) || {},
filters: (nodeData.inputs?.filters as Record<string, any>) || {}
}
const obj: Mem0MemoryInput & Mem0MemoryExtendedInput & BufferMemoryExtendedInput & { searchOnly: boolean; useFlowiseChatId: boolean } =
{
apiKey: apiKey,
humanPrefix: nodeData.inputs?.humanPrefix as string,
aiPrefix: nodeData.inputs?.aiPrefix as string,
inputKey: nodeData.inputs?.inputKey as string,
sessionId: constructorSessionId,
mem0Options: mem0Options,
memoryOptions: memoryOptions,
separateMessages: false,
returnMessages: false,
appDataSource: options.appDataSource as DataSource,
databaseEntities: options.databaseEntities as IDatabaseEntity,
chatflowid: options.chatflowid as string,
searchOnly: (nodeData.inputs?.searchOnly as boolean) || false,
useFlowiseChatId: useFlowiseChatId
}
return new Mem0MemoryExtended(obj)
}
interface Mem0MemoryExtendedInput extends Mem0MemoryInput {
memoryOptions?: MemoryOptions | SearchOptions
useFlowiseChatId: boolean
}
class Mem0MemoryExtended extends BaseMem0Memory implements MemoryMethods {
initialUserId: string
userId: string
memoryKey: string
inputKey: string
appDataSource: DataSource
databaseEntities: IDatabaseEntity
chatflowid: string
searchOnly: boolean
useFlowiseChatId: boolean
constructor(
fields: Mem0MemoryInput & Mem0MemoryExtendedInput & BufferMemoryExtendedInput & { searchOnly: boolean; useFlowiseChatId: boolean }
) {
super(fields)
this.initialUserId = fields.memoryOptions?.user_id ?? ''
this.userId = this.initialUserId
this.memoryKey = 'history'
this.inputKey = fields.inputKey ?? 'input'
this.appDataSource = fields.appDataSource
this.databaseEntities = fields.databaseEntities
this.chatflowid = fields.chatflowid
this.searchOnly = fields.searchOnly
this.useFlowiseChatId = fields.useFlowiseChatId
}
// Selects Mem0 user_id based on toggle state (Flowise chat ID or input field)
private getEffectiveUserId(overrideUserId?: string): string {
let effectiveUserId: string | undefined
if (this.useFlowiseChatId) {
if (overrideUserId) {
effectiveUserId = overrideUserId
} else {
throw new Error('Mem0: "Use Flowise Chat ID" is ON, but no runtime chat ID (overrideUserId) was provided.')
}
} else {
// If toggle is OFF, ALWAYS use the ID from the input field.
effectiveUserId = this.initialUserId
}
// This check is now primarily for the case where the toggle is OFF and the initialUserId was somehow empty (should be caught by init validation).
if (!effectiveUserId) {
throw new Error('Mem0: Could not determine a valid User ID for the operation. Check User ID input field.')
}
return effectiveUserId
}
async loadMemoryVariables(values: InputValues, overrideUserId = ''): Promise<MemoryVariables> {
const effectiveUserId = this.getEffectiveUserId(overrideUserId)
this.userId = effectiveUserId
if (this.memoryOptions) {
this.memoryOptions.user_id = effectiveUserId
}
return super.loadMemoryVariables(values)
}
async saveContext(inputValues: InputValues, outputValues: OutputValues, overrideUserId = ''): Promise<void> {
if (this.searchOnly) {
return
}
const effectiveUserId = this.getEffectiveUserId(overrideUserId)
this.userId = effectiveUserId
if (this.memoryOptions) {
this.memoryOptions.user_id = effectiveUserId
}
return super.saveContext(inputValues, outputValues)
}
async clear(overrideUserId = ''): Promise<void> {
const effectiveUserId = this.getEffectiveUserId(overrideUserId)
this.userId = effectiveUserId
if (this.memoryOptions) {
this.memoryOptions.user_id = effectiveUserId
}
return super.clear()
}
async getChatMessages(
overrideUserId = '',
returnBaseMessages = false,
prependMessages?: IMessage[]
): Promise<IMessage[] | BaseMessage[]> {
const flowiseSessionId = overrideUserId
if (!flowiseSessionId) {
console.warn('Mem0: getChatMessages called without overrideUserId (Flowise Session ID). Cannot fetch DB messages.')
return []
}
let chatMessage = await this.appDataSource.getRepository(this.databaseEntities['ChatMessage']).find({
where: {
sessionId: flowiseSessionId,
chatflowid: this.chatflowid
},
order: {
createdDate: 'DESC'
},
take: 10
})
chatMessage = chatMessage.reverse()
let returnIMessages: IMessage[] = chatMessage.map((m) => ({
message: m.content as string,
type: m.role as MessageType
}))
if (prependMessages?.length) {
returnIMessages.unshift(...prependMessages)
// Reverted to original simpler unshift
chatMessage.unshift(...(prependMessages as any)) // Cast as any
}
if (returnBaseMessages) {
const memoryVariables = await this.loadMemoryVariables({}, overrideUserId)
const mem0History = memoryVariables[this.memoryKey]
if (mem0History && typeof mem0History === 'string') {
const systemMessage = {
role: 'apiMessage' as MessageType,
content: mem0History,
id: uuidv4()
}
// Ensure Mem0 history message also conforms structurally if mapChatMessageToBaseMessage is strict
chatMessage.unshift(systemMessage as any) // Cast needed if mixing structures
} else if (mem0History) {
console.warn('Mem0 history is not a string, cannot prepend directly.')
}
return await mapChatMessageToBaseMessage(chatMessage)
}
return returnIMessages
}
async addChatMessages(msgArray: { text: string; type: MessageType }[], overrideUserId = ''): Promise<void> {
const effectiveUserId = this.getEffectiveUserId(overrideUserId)
const input = msgArray.find((msg) => msg.type === 'userMessage')
const output = msgArray.find((msg) => msg.type === 'apiMessage')
if (input && output) {
const inputValues = { [this.inputKey ?? 'input']: input.text }
const outputValues = { output: output.text }
await this.saveContext(inputValues, outputValues, effectiveUserId)
} else {
console.warn('Mem0: Could not find both input and output messages to save context.')
}
}
async clearChatMessages(overrideUserId = ''): Promise<void> {
const effectiveUserId = this.getEffectiveUserId(overrideUserId)
await this.clear(effectiveUserId)
const flowiseSessionId = overrideUserId
if (flowiseSessionId) {
await this.appDataSource
.getRepository(this.databaseEntities['ChatMessage'])
.delete({ sessionId: flowiseSessionId, chatflowid: this.chatflowid })
} else {
console.warn('Mem0: clearChatMessages called without overrideUserId (Flowise Session ID). Cannot clear DB messages.')
}
}
}
module.exports = { nodeClass: Mem0_Memory }

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 6.6 KiB

View File

@ -155,7 +155,10 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
if (input) {
const newInputMessage = new HumanMessage(input.text)
const messageToAdd = [newInputMessage].map((msg) => msg.toDict())
const messageToAdd = [newInputMessage].map((msg) => ({
...msg.toDict(),
timestamp: new Date() // Add timestamp to the message
}))
await collection.updateOne(
{ sessionId: id },
{
@ -167,7 +170,10 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
if (output) {
const newOutputMessage = new AIMessage(output.text)
const messageToAdd = [newOutputMessage].map((msg) => msg.toDict())
const messageToAdd = [newOutputMessage].map((msg) => ({
...msg.toDict(),
timestamp: new Date() // Add timestamp to the message
}))
await collection.updateOne(
{ sessionId: id },
{

View File

@ -132,7 +132,21 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
}
private async withRedisClient<T>(fn: (client: Redis) => Promise<T>): Promise<T> {
const client = typeof this.redisOptions === 'string' ? new Redis(this.redisOptions) : new Redis(this.redisOptions)
const client =
typeof this.redisOptions === 'string'
? new Redis(this.redisOptions, {
keepAlive:
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
: undefined
})
: new Redis({
...this.redisOptions,
keepAlive:
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
: undefined
})
try {
return await fn(client)
} finally {

View File

@ -2,7 +2,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Inter
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ListKeyOptions, RecordManagerInterface, UpdateOptions } from '@langchain/community/indexes/base'
import { DataSource } from 'typeorm'
import { getHost } from '../../vectorstores/Postgres/utils'
import { getHost, getSSL } from '../../vectorstores/Postgres/utils'
import { getDatabase, getPort, getTableName } from './utils'
const serverCredentialsExists = !!process.env.POSTGRES_RECORDMANAGER_USER && !!process.env.POSTGRES_RECORDMANAGER_PASSWORD
@ -51,6 +51,14 @@ class PostgresRecordManager_RecordManager implements INode {
placeholder: getPort(),
optional: true
},
{
label: 'SSL',
name: 'ssl',
description: 'Use SSL to connect to Postgres',
type: 'boolean',
additionalParams: true,
optional: true
},
{
label: 'Additional Connection Configuration',
name: 'additionalConfig',
@ -149,6 +157,7 @@ class PostgresRecordManager_RecordManager implements INode {
type: 'postgres',
host: getHost(nodeData),
port: getPort(nodeData),
ssl: getSSL(nodeData),
username: user,
password: password,
database: getDatabase(nodeData)
@ -218,6 +227,8 @@ class PostgresRecordManager implements RecordManagerInterface {
const queryRunner = dataSource.createQueryRunner()
const tableName = this.sanitizeTableName(this.tableName)
await queryRunner.query('CREATE EXTENSION IF NOT EXISTS pgcrypto;')
await queryRunner.manager.query(`
CREATE TABLE IF NOT EXISTS "${tableName}" (
uuid UUID PRIMARY KEY DEFAULT gen_random_uuid(),
@ -249,9 +260,9 @@ class PostgresRecordManager implements RecordManagerInterface {
const dataSource = await this.getDataSource()
try {
const queryRunner = dataSource.createQueryRunner()
const res = await queryRunner.manager.query('SELECT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP)')
const res = await queryRunner.manager.query('SELECT EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) AS now')
await queryRunner.release()
return Number.parseFloat(res[0].extract)
return Number.parseFloat(res[0].now)
} catch (error) {
console.error('Error getting time in PostgresRecordManager:')
throw error

View File

@ -4,14 +4,15 @@ Postgres Record Manager integration for Flowise
## 🌱 Env Variables
| Variable | Description | Type | Default |
| --------------------------------- | ----------------------------------------------- | ------ | ----------------- |
| POSTGRES_RECORDMANAGER_HOST | Default `host` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_PORT | Default `port` for Postgres Record Manager | Number | 5432 |
| POSTGRES_RECORDMANAGER_USER | Default `user` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_PASSWORD | Default `password` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_DATABASE | Default `database` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_TABLE_NAME | Default `tableName` for Postgres Record Manager | String | upsertion_records |
| Variable | Description | Type | Default |
| --------------------------------- | ----------------------------------------------- | ------- | ----------------- |
| POSTGRES_RECORDMANAGER_HOST | Default `host` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_PORT | Default `port` for Postgres Record Manager | Number | 5432 |
| POSTGRES_RECORDMANAGER_USER | Default `user` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_PASSWORD | Default `password` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_DATABASE | Default `database` for Postgres Record Manager | String | |
| POSTGRES_RECORDMANAGER_TABLE_NAME | Default `tableName` for Postgres Record Manager | String | upsertion_records |
| POSTGRES_RECORDMANAGER_SSL | Default `ssl` for Postgres Vector Store | Boolean | false |
## License

View File

@ -12,6 +12,10 @@ export function getPort(nodeData?: INodeData) {
return defaultChain(nodeData?.inputs?.port, process.env.POSTGRES_RECORDMANAGER_PORT, '5432')
}
export function getSSL(nodeData?: INodeData) {
return defaultChain(nodeData?.inputs?.ssl, process.env.POSTGRES_RECORDMANAGER_SSL, false)
}
export function getTableName(nodeData?: INodeData) {
return defaultChain(nodeData?.inputs?.tableName, process.env.POSTGRES_RECORDMANAGER_TABLE_NAME, 'upsertion_records')
}

View File

@ -150,6 +150,7 @@ class ExtractMetadataRetriever_Retrievers implements INode {
prompt: dynamicMetadataFilterRetrieverPrompt,
topK: topK ? parseInt(topK, 10) : (vectorStore as any)?.k ?? 4
})
retriever.filter = vectorStore?.lc_kwargs?.filter ?? (vectorStore as any).filter
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(finalInputQuery)

View File

@ -172,6 +172,7 @@ Passage:`
else if (promptKey) obj.promptTemplate = promptKey
const retriever = new HydeRetriever(obj)
retriever.filter = vectorStore?.lc_kwargs?.filter ?? (vectorStore as any).filter
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)

View File

@ -71,7 +71,7 @@ class MultiQueryRetriever_Retrievers implements INode {
const retriever = MultiQueryRetriever.fromLLM({
llm: model,
retriever: vectorStore.asRetriever(),
retriever: vectorStore.asRetriever({ filter: vectorStore?.lc_kwargs?.filter ?? vectorStore?.filter }),
verbose: process.env.DEBUG === 'true',
// @ts-ignore
prompt: PromptTemplate.fromTemplate(prompt)

View File

@ -50,7 +50,7 @@ export class ReciprocalRankFusion extends BaseDocumentCompressor {
})
const docList: Document<Record<string, any>>[][] = []
for (let i = 0; i < queries.length; i++) {
const resultOne = await this.baseRetriever.vectorStore.similaritySearch(queries[i], 5)
const resultOne = await this.baseRetriever.vectorStore.similaritySearch(queries[i], 5, this.baseRetriever.filter)
const docs: any[] = []
resultOne.forEach((doc) => {
docs.push(doc)

View File

@ -100,6 +100,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
maxK: maxK ? parseInt(maxK, 10) : 100,
kIncrement: kIncrement ? parseInt(kIncrement, 10) : 2
})
retriever.filter = vectorStore?.lc_kwargs?.filter ?? (vectorStore as any).filter
if (output === 'retriever') return retriever
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)

View File

@ -1,4 +1,4 @@
import { flatten, uniq } from 'lodash'
import { difference, flatten, uniq } from 'lodash'
import { DataSource } from 'typeorm'
import { z } from 'zod'
import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchain/core/runnables'
@ -430,8 +430,15 @@ class LLMNode_SeqAgents implements INode {
const abortControllerSignal = options.signal as AbortController
const llmNodeInputVariables = uniq([...getInputVariables(systemPrompt), ...getInputVariables(humanPrompt)])
if (!llmNodeInputVariables.every((element) => Object.keys(llmNodeInputVariablesValues).includes(element))) {
throw new Error('LLM Node input variables values are not provided!')
const missingInputVars = difference(llmNodeInputVariables, Object.keys(llmNodeInputVariablesValues)).join(' ')
const allVariablesSatisfied = missingInputVars.length === 0
if (!allVariablesSatisfied) {
const nodeInputVars = llmNodeInputVariables.join(' ')
const providedInputVars = Object.keys(llmNodeInputVariablesValues).join(' ')
throw new Error(
`LLM Node input variables values are not provided! Required: ${nodeInputVars}, Provided: ${providedInputVars}. Missing: ${missingInputVars}`
)
}
const workerNode = async (state: ISeqAgentsState, config: RunnableConfig) => {

View File

@ -313,6 +313,7 @@ class ChatflowTool extends StructuredTool {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'flowise-tool': 'true',
...this.headers
},
body: JSON.stringify(body)

View File

@ -41,7 +41,7 @@ class Composio_Tools implements INode {
constructor() {
this.label = 'Composio'
this.name = 'composio'
this.version = 1.0
this.version = 2.0
this.type = 'Composio'
this.icon = 'composio.svg'
this.category = 'Tools'
@ -73,7 +73,7 @@ class Composio_Tools implements INode {
{
label: 'Actions to Use',
name: 'actions',
type: 'asyncOptions',
type: 'asyncMultiOptions',
loadMethod: 'listActions',
description: 'Select the actions you want to use',
refresh: true
@ -216,8 +216,18 @@ class Composio_Tools implements INode {
throw new Error('API Key Required')
}
const _actions = nodeData.inputs?.actions
let actions = []
if (_actions) {
try {
actions = typeof _actions === 'string' ? JSON.parse(_actions) : _actions
} catch (error) {
console.error('Error parsing actions:', error)
}
}
const toolset = new LangchainToolSet({ apiKey: composioApiKey })
const tools = await toolset.getTools({ actions: [nodeData.inputs?.actions as string] })
const tools = await toolset.getTools({ actions })
return tools
}
}

View File

@ -0,0 +1,74 @@
import { z } from 'zod'
import { INode } from '../../../src/Interface'
import { DynamicStructuredTool } from '../CustomTool/core'
const code = `
const now = new Date();
// Format date as YYYY-MM-DD
const date = now.toISOString().split('T')[0];
// Get time in HH:MM:SS format
const time = now.toTimeString().split(' ')[0];
// Get day of week
const days = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
const day = days[now.getDay()];
// Get timezone information
const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
const timezoneOffset = now.getTimezoneOffset();
const timezoneOffsetHours = Math.abs(Math.floor(timezoneOffset / 60));
const timezoneOffsetMinutes = Math.abs(timezoneOffset % 60);
const timezoneOffsetFormatted =
(timezoneOffset <= 0 ? '+' : '-') +
timezoneOffsetHours.toString().padStart(2, '0') + ':' +
timezoneOffsetMinutes.toString().padStart(2, '0');
return {
date,
time,
day,
timezone,
timezoneOffset: timezoneOffsetFormatted,
iso8601: now.toISOString(),
unix_timestamp: Math.floor(now.getTime() / 1000)
};
`
class CurrentDateTime_Tools implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
constructor() {
this.label = 'CurrentDateTime'
this.name = 'currentDateTime'
this.version = 1.0
this.type = 'CurrentDateTime'
this.icon = 'currentDateTime.svg'
this.category = 'Tools'
this.description = 'Get todays day, date and time.'
this.baseClasses = [this.type, 'Tool']
}
async init(): Promise<any> {
const obj = {
name: 'current_date_time',
description: 'Useful to get current day, date and time.',
schema: z.object({}),
code: code
}
let dynamicStructuredTool = new DynamicStructuredTool(obj)
return dynamicStructuredTool
}
}
module.exports = { nodeClass: CurrentDateTime_Tools }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-timezone"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M20.884 10.554a9 9 0 1 0 -10.337 10.328" /><path d="M3.6 9h16.8" /><path d="M3.6 15h6.9" /><path d="M11.5 3a17 17 0 0 0 -1.502 14.954" /><path d="M12.5 3a17 17 0 0 1 2.52 7.603" /><path d="M18 18m-4 0a4 4 0 1 0 8 0a4 4 0 1 0 -8 0" /><path d="M18 16.5v1.5l.5 .5" /></svg>

After

Width:  |  Height:  |  Size: 588 B

View File

@ -0,0 +1,108 @@
import { Tool } from '@langchain/core/tools'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface'
import { getCredentialData, getCredentialParam, getNodeModulesPackagePath } from '../../../../src/utils'
import { MCPToolkit } from '../core'
class BraveSearch_MCP implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
documentation: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Brave Search MCP'
this.name = 'braveSearchMCP'
this.version = 1.0
this.type = 'BraveSearch MCP Tool'
this.icon = 'brave.svg'
this.category = 'Tools (MCP)'
this.description = 'MCP server that integrates the Brave Search API - a real-time API to access web search capabilities'
this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/brave-search'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['braveSearchApi']
}
this.inputs = [
{
label: 'Available Actions',
name: 'mcpActions',
type: 'asyncMultiOptions',
loadMethod: 'listActions',
refresh: true
}
]
this.baseClasses = ['Tool']
}
//@ts-ignore
loadMethods = {
listActions: async (nodeData: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> => {
try {
const toolset = await this.getTools(nodeData, options)
toolset.sort((a: any, b: any) => a.name.localeCompare(b.name))
return toolset.map(({ name, ...rest }) => ({
label: name.toUpperCase(),
name: name,
description: rest.description || name
}))
} catch (error) {
return [
{
label: 'No Available Actions',
name: 'error',
description: 'No available actions, please check your API key and refresh'
}
]
}
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const tools = await this.getTools(nodeData, options)
const _mcpActions = nodeData.inputs?.mcpActions
let mcpActions = []
if (_mcpActions) {
try {
mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions
} catch (error) {
console.error('Error parsing mcp actions:', error)
}
}
return tools.filter((tool: any) => mcpActions.includes(tool.name))
}
async getTools(nodeData: INodeData, options: ICommonObject): Promise<Tool[]> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const braveApiKey = getCredentialParam('braveApiKey', credentialData, nodeData)
const packagePath = getNodeModulesPackagePath('@modelcontextprotocol/server-brave-search/dist/index.js')
const serverParams = {
command: 'node',
args: [packagePath],
env: {
BRAVE_API_KEY: braveApiKey
}
}
const toolkit = new MCPToolkit(serverParams, 'stdio')
await toolkit.initialize()
const tools = toolkit.tools ?? []
return tools as Tool[]
}
}
module.exports = { nodeClass: BraveSearch_MCP }

View File

@ -0,0 +1,8 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M26.2866 9.22667L26.8999 7.73333C26.8999 7.73333 26.1333 6.9 25.1866 5.96C24.2399 5.02 22.2466 5.56 22.2466 5.56L19.9999 3H11.9999L9.73325 5.58C9.73325 5.58 7.73325 5.03333 6.79325 5.97333C5.85325 6.91333 5.07992 7.75333 5.07992 7.75333L5.69326 9.24667L4.91992 11.4533C4.91992 11.4533 7.20659 20.12 7.46659 21.1533C7.99992 23.2267 8.35326 24.0333 9.84659 25.1C11.3399 26.1667 14.0466 27.9733 14.5133 28.2533C14.9415 28.6183 15.4515 28.8745 15.9999 29C16.4933 29 17.0466 28.5267 17.4999 28.2533C17.9533 27.98 20.6533 26.14 22.1666 25.1C23.6799 24.06 23.9999 23.2467 24.5333 21.1533L27.0799 11.4533L26.2866 9.22667Z" fill="#FF6520" stroke="#FF6520" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M12.4735 21.0133C11.7428 20.2529 11.1071 19.4067 10.5801 18.4933C10.4452 18.1438 10.3942 17.7674 10.4312 17.3946C10.4683 17.0217 10.5924 16.6628 10.7935 16.3466C10.8686 16.1363 10.8707 15.9067 10.7993 15.695C10.728 15.4833 10.5873 15.3019 10.4001 15.18C10.2068 14.9666 8.5868 13.2533 8.21346 12.8533C7.84013 12.4533 7.4668 12.2866 7.4668 11.52C7.4668 10.7533 10.3801 7.22665 10.3801 7.22665C10.3801 7.22665 12.8468 7.69998 13.1801 7.69998C13.7659 7.59152 14.3397 7.42629 14.8935 7.20665C15.2496 7.07842 15.6223 7.00208 16.0001 6.97998L14.8935 7.19998C15.2499 7.07402 15.6226 6.99993 16.0001 6.97998C16.3777 6.99993 16.7503 7.07402 17.1068 7.19998C17.6605 7.41963 18.2344 7.58485 18.8201 7.69331C19.1535 7.69331 21.6201 7.21998 21.6201 7.21998C21.6201 7.21998 24.5335 10.7466 24.5335 11.5133C24.5335 12.28 24.1601 12.46 23.7868 12.8466C23.4135 13.2333 21.7868 14.96 21.6001 15.1733C21.4129 15.2952 21.2723 15.4767 21.2009 15.6884C21.1295 15.9 21.1316 16.1296 21.2068 16.34C21.4079 16.6561 21.532 17.0151 21.569 17.3879C21.6061 17.7608 21.5551 18.1371 21.4201 18.4866C20.8932 19.4001 20.2574 20.2463 19.5268 21.0066" fill="white"/>
<path d="M21.46 9.87996C20.6375 9.74822 19.8011 9.72803 18.9733 9.81996C18.3639 9.94895 17.7696 10.1411 17.2 10.3933C17.1266 10.5266 17.0466 10.5266 17.1266 11.02C17.2066 11.5133 17.6733 13.82 17.7133 14.2333C17.7533 14.6466 17.8533 14.9 17.4 15.0266C16.926 15.1471 16.4451 15.2384 15.96 15.3C15.4749 15.2378 14.9941 15.1465 14.52 15.0266C14.0866 14.9266 14.1666 14.6466 14.2066 14.2333C14.2466 13.82 14.7 11.5133 14.8 11.02C14.9 10.5266 14.8 10.5266 14.72 10.3933C14.1516 10.1379 13.5569 9.94565 12.9466 9.81996C12.1193 9.71801 11.2814 9.73823 10.46 9.87996M16 19.1666V15.3333V19.1666ZM19.4266 20.72C19.6066 20.84 19.5066 21.0533 19.3333 21.1533C19.16 21.2533 16.9466 22.9866 16.7466 23.1866C16.5466 23.3866 16.22 23.68 16 23.68C15.78 23.68 15.4666 23.36 15.2533 23.1866C15.04 23.0133 12.8266 21.2733 12.6666 21.1533C12.5066 21.0333 12.3933 20.82 12.5733 20.72C12.7533 20.62 13.32 20.3266 14.0866 19.9133C14.6912 19.5865 15.3338 19.3357 16 19.1666C16.6661 19.3357 17.3087 19.5865 17.9133 19.9133L19.4266 20.72Z" fill="white"/>
<path d="M21.46 9.87996C20.6375 9.74822 19.8011 9.72803 18.9733 9.81996C18.3639 9.94895 17.7696 10.1411 17.2 10.3933C17.1266 10.5266 17.0466 10.5266 17.1266 11.02C17.2066 11.5133 17.6733 13.82 17.7133 14.2333C17.7533 14.6466 17.8533 14.9 17.4 15.0266C16.926 15.1471 16.4451 15.2384 15.96 15.3C15.4749 15.2378 14.9941 15.1465 14.52 15.0266C14.0866 14.9266 14.1666 14.6466 14.2066 14.2333C14.2466 13.82 14.7 11.5133 14.8 11.02C14.9 10.5266 14.8 10.5266 14.72 10.3933C14.1516 10.1379 13.5569 9.94565 12.9466 9.81996C12.1193 9.71801 11.2814 9.73823 10.46 9.87996M16 19.1666V15.3333M16 19.1666C15.3338 19.3357 14.6912 19.5865 14.0866 19.9133C13.32 20.3266 12.7533 20.62 12.5733 20.72C12.3933 20.82 12.5066 21.0333 12.6666 21.1533C12.8266 21.2733 15.04 23.0133 15.2533 23.1866C15.4666 23.36 15.78 23.68 16 23.68C16.22 23.68 16.5466 23.3866 16.7466 23.1866C16.9466 22.9866 19.16 21.2533 19.3333 21.1533C19.5066 21.0533 19.6066 20.84 19.4266 20.72L17.9133 19.9133C17.3087 19.5865 16.6661 19.3357 16 19.1666Z" stroke="#FF6520" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"/>
<path d="M18.839 21.7606C18.9867 21.672 19.1161 21.4053 18.9627 21.299L17.6582 20.6046C17.143 20.3149 16.5426 20.0811 15.9749 19.9313C15.4073 20.0811 14.8069 20.336 14.2917 20.6256C13.6383 20.9919 13.1932 21.2524 13.0398 21.341C12.8864 21.4297 13.0212 21.6543 13.1575 21.7606C13.2939 21.867 15.1801 23.4091 15.3619 23.5627C15.5438 23.7163 15.8108 23.9999 15.9983 23.9999C16.1858 23.9999 16.4641 23.74 16.6346 23.5627C16.805 23.3855 18.6913 21.8493 18.839 21.7606Z" fill="white"/>
<path d="M18.839 21.7606C18.9867 21.672 19.1161 21.4053 18.9627 21.299L17.6582 20.6046C17.143 20.3149 16.5426 20.0811 15.9749 19.9313C15.4073 20.0811 14.8069 20.336 14.2917 20.6256C13.6383 20.9919 13.1932 21.2524 13.0398 21.341C12.8864 21.4297 13.0212 21.6543 13.1575 21.7606C13.2939 21.867 15.1801 23.4091 15.3619 23.5627C15.5438 23.7163 15.8108 23.9999 15.9983 23.9999C16.1858 23.9999 16.4641 23.74 16.6346 23.5627C16.805 23.3855 18.6913 21.8493 18.839 21.7606Z" fill="white"/>
</svg>

After

Width:  |  Height:  |  Size: 4.9 KiB

View File

@ -0,0 +1,136 @@
import { Tool } from '@langchain/core/tools'
import { INode, INodeData, INodeOptionsValue, INodeParams } from '../../../../src/Interface'
import { MCPToolkit } from '../core'
const mcpServerConfig = `{
"command": "npx",
"args": ["-y", "@modelcontextprotocol/server-filesystem", "/path/to/allowed/files"]
}`
class Custom_MCP implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
documentation: string
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Custom MCP'
this.name = 'customMCP'
this.version = 1.0
this.type = 'Custom MCP Tool'
this.icon = 'customMCP.png'
this.category = 'Tools (MCP)'
this.description = 'Custom MCP Config'
this.documentation = 'https://github.com/modelcontextprotocol/servers/tree/main/src/brave-search'
this.inputs = [
{
label: 'MCP Server Config',
name: 'mcpServerConfig',
type: 'code',
hideCodeExecute: true,
placeholder: mcpServerConfig
},
{
label: 'Available Actions',
name: 'mcpActions',
type: 'asyncMultiOptions',
loadMethod: 'listActions',
refresh: true
}
]
this.baseClasses = ['Tool']
}
//@ts-ignore
loadMethods = {
listActions: async (nodeData: INodeData): Promise<INodeOptionsValue[]> => {
try {
const toolset = await this.getTools(nodeData)
toolset.sort((a: any, b: any) => a.name.localeCompare(b.name))
return toolset.map(({ name, ...rest }) => ({
label: name.toUpperCase(),
name: name,
description: rest.description || name
}))
} catch (error) {
return [
{
label: 'No Available Actions',
name: 'error',
description: 'No available actions, please check your API key and refresh'
}
]
}
}
}
async init(nodeData: INodeData): Promise<any> {
const tools = await this.getTools(nodeData)
const _mcpActions = nodeData.inputs?.mcpActions
let mcpActions = []
if (_mcpActions) {
try {
mcpActions = typeof _mcpActions === 'string' ? JSON.parse(_mcpActions) : _mcpActions
} catch (error) {
console.error('Error parsing mcp actions:', error)
}
}
return tools.filter((tool: any) => mcpActions.includes(tool.name))
}
async getTools(nodeData: INodeData): Promise<Tool[]> {
const mcpServerConfig = nodeData.inputs?.mcpServerConfig as string
if (!mcpServerConfig) {
throw new Error('MCP Server Config is required')
}
try {
let serverParams
if (typeof mcpServerConfig === 'object') {
serverParams = mcpServerConfig
} else if (typeof mcpServerConfig === 'string') {
const serverParamsString = convertToValidJSONString(mcpServerConfig)
serverParams = JSON.parse(serverParamsString)
}
// Compatible with stdio and SSE
let toolkit: MCPToolkit
if (serverParams?.command === undefined) {
toolkit = new MCPToolkit(serverParams, 'sse')
} else {
toolkit = new MCPToolkit(serverParams, 'stdio')
}
await toolkit.initialize()
const tools = toolkit.tools ?? []
return tools as Tool[]
} catch (error) {
throw new Error(`Invalid MCP Server Config: ${error}`)
}
}
}
function convertToValidJSONString(inputString: string) {
try {
const jsObject = Function('return ' + inputString)()
return JSON.stringify(jsObject, null, 2)
} catch (error) {
console.error('Error converting to JSON:', error)
return ''
}
}
module.exports = { nodeClass: Custom_MCP }

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.0 KiB

Some files were not shown because too many files have changed in this diff Show More