Compare commits
1 Commits
main
...
fix/predic
| Author | SHA1 | Date |
|---|---|---|
|
|
f61a1ad248 |
|
|
@ -1,72 +0,0 @@
|
|||
name: Docker Image CI - Docker Hub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise:${{ steps.defaults.outputs.tag_version }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push worker image
|
||||
# -------------------------
|
||||
- name: Build and push worker image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: docker/worker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise-worker:${{ steps.defaults.outputs.tag_version }}
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
name: Docker Image CI - AWS ECR
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ format('{0}.dkr.ecr.{1}.amazonaws.com/flowise:{2}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.tag_version) }}
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
registry:
|
||||
description: 'Container Registry to push the image to.'
|
||||
type: choice
|
||||
required: true
|
||||
default: 'aws_ecr'
|
||||
options:
|
||||
- 'docker_hub'
|
||||
- 'aws_ecr'
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
image_type:
|
||||
description: 'Type of image to build and push.'
|
||||
type: choice
|
||||
required: true
|
||||
default: 'main'
|
||||
options:
|
||||
- 'main'
|
||||
- 'worker'
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "registry=${{ github.event.inputs.registry || 'aws_ecr' }}" >> $GITHUB_OUTPUT
|
||||
echo "image_type=${{ github.event.inputs.image_type || 'main' }}" >> $GITHUB_OUTPUT
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
# ------------------------
|
||||
# Login Steps (conditional)
|
||||
# ------------------------
|
||||
- name: Login to Docker Hub
|
||||
if: steps.defaults.outputs.registry == 'docker_hub'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
if: steps.defaults.outputs.registry == 'aws_ecr'
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
if: steps.defaults.outputs.registry == 'aws_ecr'
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push (conditional tags)
|
||||
# -------------------------
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: |
|
||||
${{
|
||||
steps.defaults.outputs.image_type == 'worker' && 'docker/worker/Dockerfile' ||
|
||||
(steps.defaults.outputs.registry == 'docker_hub' && './docker/Dockerfile' || 'Dockerfile')
|
||||
}}
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{
|
||||
steps.defaults.outputs.registry == 'docker_hub' &&
|
||||
format('flowiseai/flowise{0}:{1}',
|
||||
steps.defaults.outputs.image_type == 'worker' && '-worker' || '',
|
||||
steps.defaults.outputs.tag_version) ||
|
||||
format('{0}.dkr.ecr.{1}.amazonaws.com/flowise{2}:{3}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.image_type == 'worker' && '-worker' || '',
|
||||
steps.defaults.outputs.tag_version)
|
||||
}}
|
||||
|
|
@ -114,52 +114,50 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
to make sure everything works fine in production.
|
||||
|
||||
11. Commit code and submit Pull Request from forked branch pointing to [Flowise main](https://github.com/FlowiseAI/Flowise/tree/main).
|
||||
11. Commit code and submit Pull Request from forked branch pointing to [Flowise master](https://github.com/FlowiseAI/Flowise/tree/master).
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Custom Tool or Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Custom Tool or Function | String | |
|
||||
| ALLOW_BUILTIN_DEP | Allow project dependencies to be used for Custom Tool or Function | Boolean | false |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
|
||||
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
|
||||
| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
|
||||
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
|
||||
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
|
||||
| TRUST_PROXY | Configure proxy trust settings for proper IP detection. Values: 'true' (trust all), 'false' (disable), number (hop count), or Express proxy values (e.g., 'loopback', 'linklocal', 'uniquelocal', IP addresses). [Learn More](https://expressjs.com/en/guide/behind-proxies.html) | Boolean/String/Number | true |
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
|
||||
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
|
||||
| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
|
||||
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
|
||||
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
|
||||
|
||||
You can also specify the env variables when using `npx`. For example:
|
||||
|
||||
|
|
|
|||
39
Dockerfile
|
|
@ -5,41 +5,34 @@
|
|||
# docker run -d -p 3000:3000 flowise
|
||||
|
||||
FROM node:20-alpine
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install system dependencies and build tools
|
||||
RUN apk update && \
|
||||
apk add --no-cache \
|
||||
libc6-compat \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
build-base \
|
||||
cairo-dev \
|
||||
pango-dev \
|
||||
chromium \
|
||||
curl && \
|
||||
npm install -g pnpm
|
||||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
# Install curl for container-level health checks
|
||||
# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
|
||||
WORKDIR /usr/src/flowise
|
||||
WORKDIR /usr/src
|
||||
|
||||
# Copy app source
|
||||
COPY . .
|
||||
|
||||
# Install dependencies and build
|
||||
RUN pnpm install && \
|
||||
pnpm build
|
||||
RUN pnpm install
|
||||
|
||||
# Give the node user ownership of the application files
|
||||
RUN chown -R node:node .
|
||||
|
||||
# Switch to non-root user (node user already exists in node:20-alpine)
|
||||
USER node
|
||||
RUN pnpm build
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD [ "pnpm", "start" ]
|
||||
CMD [ "pnpm", "start" ]
|
||||
|
|
|
|||
51
README.md
|
|
@ -23,16 +23,16 @@ English | [繁體中文](./i18n/README-TW.md) | [简体中文](./i18n/README-ZH.
|
|||
|
||||
## 📚 Table of Contents
|
||||
|
||||
- [⚡ Quick Start](#-quick-start)
|
||||
- [🐳 Docker](#-docker)
|
||||
- [👨💻 Developers](#-developers)
|
||||
- [🌱 Env Variables](#-env-variables)
|
||||
- [📖 Documentation](#-documentation)
|
||||
- [🌐 Self Host](#-self-host)
|
||||
- [☁️ Flowise Cloud](#️-flowise-cloud)
|
||||
- [🙋 Support](#-support)
|
||||
- [🙌 Contributing](#-contributing)
|
||||
- [📄 License](#-license)
|
||||
- [⚡ Quick Start](#-quick-start)
|
||||
- [🐳 Docker](#-docker)
|
||||
- [👨💻 Developers](#-developers)
|
||||
- [🌱 Env Variables](#-env-variables)
|
||||
- [📖 Documentation](#-documentation)
|
||||
- [🌐 Self Host](#-self-host)
|
||||
- [☁️ Flowise Cloud](#️-flowise-cloud)
|
||||
- [🙋 Support](#-support)
|
||||
- [🙌 Contributing](#-contributing)
|
||||
- [📄 License](#-license)
|
||||
|
||||
## ⚡Quick Start
|
||||
|
||||
|
|
@ -64,19 +64,18 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
### Docker Image
|
||||
|
||||
1. Build the image locally:
|
||||
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
|
||||
2. Run image:
|
||||
|
||||
|
||||
```bash
|
||||
docker run -d --name flowise -p 3000:3000 flowise
|
||||
```
|
||||
|
||||
3. Stop image:
|
||||
|
||||
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
|
@ -125,24 +124,10 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
<details>
|
||||
<summary>Exit code 134 (JavaScript heap out of memory)</summary>
|
||||
If you get this error when running the above `build` script, try increasing the Node.js heap size and run the script again:
|
||||
If you get this error when running the above `build` script, try increasing the Node.js heap size and run the script again:
|
||||
|
||||
```bash
|
||||
# macOS / Linux / Git Bash
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Windows PowerShell
|
||||
$env:NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Windows CMD
|
||||
set NODE_OPTIONS=--max-old-space-size=4096
|
||||
```
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
pnpm build
|
||||
|
||||
</details>
|
||||
|
||||
|
|
@ -190,10 +175,6 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Northflank](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
[](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/configuration/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/configuration/deployment/render)
|
||||
|
|
|
|||
58
SECURITY.md
|
|
@ -1,38 +1,40 @@
|
|||
### Responsible Disclosure Policy
|
||||
### Responsible Disclosure Policy
|
||||
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
|
||||
### Out of scope vulnerabilities
|
||||
### Vulnerabilities
|
||||
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
The following types of issues are some of the most common vulnerabilities:
|
||||
|
||||
### Reporting Guidelines
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
### Reporting Guidelines
|
||||
|
||||
### Disclosure Guidelines
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
### Disclosure Guidelines
|
||||
|
||||
### Response to Reports
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
### Response to Reports
|
||||
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ DATABASE_PATH=/root/.flowise
|
|||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_REJECT_UNAUTHORIZED=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
||||
|
|
@ -38,11 +37,8 @@ SECRETKEY_PATH=/root/.flowise
|
|||
# DEBUG=true
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
# LOG_LEVEL=info #(error | warn | info | verbose | debug)
|
||||
# LOG_SANITIZE_BODY_FIELDS=password,pwd,pass,secret,token,apikey,api_key,accesstoken,access_token,refreshtoken,refresh_token,clientsecret,client_secret,privatekey,private_key,secretkey,secret_key,auth,authorization,credential,credentials
|
||||
# LOG_SANITIZE_HEADER_FIELDS=authorization,x-api-key,x-auth-token,cookie
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
# ALLOW_BUILTIN_DEP=false
|
||||
|
||||
|
||||
############################################################################################################
|
||||
|
|
@ -101,7 +97,6 @@ JWT_TOKEN_EXPIRY_IN_MINUTES=360
|
|||
JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
||||
# EXPIRE_AUTH_TOKENS_ON_RESTART=true # (if you need to expire all tokens on app restart)
|
||||
# EXPRESS_SESSION_SECRET=flowise
|
||||
# SECURE_COOKIES=
|
||||
|
||||
# INVITE_TOKEN_EXPIRY_IN_HOURS=24
|
||||
# PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=15
|
||||
|
|
@ -167,14 +162,4 @@ JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
|||
# REDIS_KEY=
|
||||
# REDIS_CA=
|
||||
# REDIS_KEEP_ALIVE=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECURITY ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# HTTP_DENY_LIST=
|
||||
# CUSTOM_MCP_SECURITY_CHECK=true
|
||||
# CUSTOM_MCP_PROTOCOL=sse #(stdio | sse)
|
||||
# TRUST_PROXY=true #(true | false | 1 | loopback| linklocal | uniquelocal | IP addresses | loopback, IP addresses)
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
|
@ -18,7 +18,7 @@ If you like to persist your data (flows, logs, credentials, storage), set these
|
|||
- SECRETKEY_PATH=/root/.flowise
|
||||
- BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/configuration/environment-variables)
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
## Queue Mode:
|
||||
|
||||
|
|
|
|||
|
|
@ -46,13 +46,10 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -91,7 +88,6 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -142,12 +138,6 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${PORT:-3000}/api/v1/ping']
|
||||
interval: 10s
|
||||
|
|
@ -192,13 +182,10 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -237,7 +224,6 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -288,12 +274,6 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${WORKER_PORT:-5566}/healthz']
|
||||
interval: 10s
|
||||
|
|
|
|||
|
|
@ -31,13 +31,10 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -76,7 +73,6 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -127,12 +123,6 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
healthcheck:
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ DATABASE_PATH=/root/.flowise
|
|||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_REJECT_UNAUTHORIZED=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
||||
|
|
@ -38,11 +37,8 @@ SECRETKEY_PATH=/root/.flowise
|
|||
# DEBUG=true
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
# LOG_LEVEL=info #(error | warn | info | verbose | debug)
|
||||
# LOG_SANITIZE_BODY_FIELDS=password,pwd,pass,secret,token,apikey,api_key,accesstoken,access_token,refreshtoken,refresh_token,clientsecret,client_secret,privatekey,private_key,secretkey,secret_key,auth,authorization,credential,credentials
|
||||
# LOG_SANITIZE_HEADER_FIELDS=authorization,x-api-key,x-auth-token,cookie
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
# ALLOW_BUILTIN_DEP=false
|
||||
|
||||
|
||||
############################################################################################################
|
||||
|
|
@ -101,7 +97,6 @@ JWT_TOKEN_EXPIRY_IN_MINUTES=360
|
|||
JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
||||
# EXPIRE_AUTH_TOKENS_ON_RESTART=true # (if you need to expire all tokens on app restart)
|
||||
# EXPRESS_SESSION_SECRET=flowise
|
||||
# SECURE_COOKIES=
|
||||
|
||||
# INVITE_TOKEN_EXPIRY_IN_HOURS=24
|
||||
# PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=15
|
||||
|
|
@ -167,14 +162,4 @@ JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
|||
# REDIS_KEY=
|
||||
# REDIS_CA=
|
||||
# REDIS_KEEP_ALIVE=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECURITY ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# HTTP_DENY_LIST=
|
||||
# CUSTOM_MCP_SECURITY_CHECK=true
|
||||
# CUSTOM_MCP_PROTOCOL=sse #(stdio | sse)
|
||||
# TRUST_PROXY=true #(true | false | 1 | loopback| linklocal | uniquelocal | IP addresses | loopback, IP addresses)
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
|
@ -7,7 +7,7 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
|
|||
# Install Chromium and curl for container-level health checks
|
||||
RUN apk add --no-cache chromium curl
|
||||
|
||||
#install PNPM globally
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
|
|
|
|||
|
|
@ -31,13 +31,10 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -76,7 +73,6 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -127,12 +123,6 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
ports:
|
||||
- '${WORKER_PORT}:${WORKER_PORT}'
|
||||
healthcheck:
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
|||
pnpm start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/main) 的分叉分支上提交 Pull Request。
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
[English](../README.md) | 繁體中文 | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
|
||||
<h3>可視化建置 AI/LLM 流程</h3>
|
||||
<h3>可視化建構 AI/LLM 流程</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
|
|
@ -37,16 +37,16 @@
|
|||
|
||||
### Docker Compose
|
||||
|
||||
1. 複製 Flowise 專案
|
||||
2. 進入專案根目錄的 `docker` 資料夾
|
||||
3. 複製 `.env.example` 文件,貼到相同位置,並重新命名為 `.env` 文件
|
||||
1. 克隆 Flowise 項目
|
||||
2. 進入項目根目錄的 `docker` 文件夾
|
||||
3. 複製 `.env.example` 文件,粘貼到相同位置,並重命名為 `.env` 文件
|
||||
4. `docker compose up -d`
|
||||
5. 打開 [http://localhost:3000](http://localhost:3000)
|
||||
6. 您可以透過 `docker compose stop` 停止容器
|
||||
6. 您可以通過 `docker compose stop` 停止容器
|
||||
|
||||
### Docker 映像
|
||||
|
||||
1. 本地建置映像:
|
||||
1. 本地構建映像:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
|
|
@ -63,7 +63,7 @@
|
|||
|
||||
## 👨💻 開發者
|
||||
|
||||
Flowise 在單個 mono 儲存庫中有 3 個不同的模組。
|
||||
Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
||||
|
||||
- `server`: 提供 API 邏輯的 Node 後端
|
||||
- `ui`: React 前端
|
||||
|
|
@ -79,33 +79,33 @@ Flowise 在單個 mono 儲存庫中有 3 個不同的模組。
|
|||
|
||||
### 設置
|
||||
|
||||
1. 複製儲存庫
|
||||
1. 克隆存儲庫
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. 進入儲存庫文件夾
|
||||
2. 進入存儲庫文件夾
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. 安裝所有模組的所有依賴項:
|
||||
3. 安裝所有模塊的所有依賴項:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. 建置所有程式碼:
|
||||
4. 構建所有代碼:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>Exit code 134(JavaScript heap out of memory)</summary>
|
||||
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 中的 Heap 記憶體大小並重新運行腳本:
|
||||
<summary>退出代碼 134(JavaScript 堆內存不足)</summary>
|
||||
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 堆大小並重新運行腳本:
|
||||
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
pnpm build
|
||||
|
|
@ -118,9 +118,9 @@ Flowise 在單個 mono 儲存庫中有 3 個不同的模組。
|
|||
pnpm start
|
||||
```
|
||||
|
||||
您現在可以開啟 [http://localhost:3000](http://localhost:3000)
|
||||
您現在可以訪問 [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
6. 對於開發建置:
|
||||
6. 對於開發構建:
|
||||
|
||||
- 在 `packages/ui` 中創建 `.env` 文件並指定 `VITE_PORT`(參考 `.env.example`)
|
||||
- 在 `packages/server` 中創建 `.env` 文件並指定 `PORT`(參考 `.env.example`)
|
||||
|
|
@ -130,19 +130,19 @@ Flowise 在單個 mono 儲存庫中有 3 個不同的模組。
|
|||
pnpm dev
|
||||
```
|
||||
|
||||
任何程式碼更改都會自動重新加載應用程式 [http://localhost:8080](http://localhost:8080)
|
||||
任何代碼更改都會自動重新加載應用程序 [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
## 🌱 環境變數
|
||||
## 🌱 環境變量
|
||||
|
||||
Flowise 支持不同的環境變數來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變數。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
Flowise 支持不同的環境變量來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變量。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 文檔
|
||||
|
||||
[Flowise 文檔](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 自行架設
|
||||
## 🌐 自我托管
|
||||
|
||||
在您現有的基礎設施中部署 Flowise,我們支持各種自行架設選項 [部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
在您現有的基礎設施中部署 Flowise 自我托管,我們支持各種 [部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/configuration/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/configuration/deployment/azure)
|
||||
|
|
@ -178,9 +178,9 @@ Flowise 支持不同的環境變數來配置您的實例。您可以在 `package
|
|||
|
||||
</details>
|
||||
|
||||
## ☁️ Flowise 雲端平台
|
||||
## ☁️ Flowise 雲
|
||||
|
||||
[開始使用 Flowise 雲端平台](https://flowiseai.com/)
|
||||
[開始使用 Flowise 雲](https://flowiseai.com/)
|
||||
|
||||
## 🙋 支持
|
||||
|
||||
|
|
@ -194,9 +194,9 @@ Flowise 支持不同的環境變數來配置您的實例。您可以在 `package
|
|||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
請參閱 [貢獻指南](../CONTRIBUTING.md)。如果您有任何問題或問題,請透過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
|
||||
請參閱 [貢獻指南](../CONTRIBUTING.md)。如果您有任何問題或問題,請通過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 許可證
|
||||
|
||||
此儲存庫中的原始碼根據 [Apache 2.0 授權條款](../LICENSE.md) 授權使用。
|
||||
此存儲庫中的源代碼根據 [Apache 許可證版本 2.0](../LICENSE.md) 提供。
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
version: "2"
|
||||
services:
|
||||
otel-collector:
|
||||
read_only: true
|
||||
image: otel/opentelemetry-collector-contrib
|
||||
command: ["--config=/etc/otelcol-contrib/config.yaml", "--feature-gates=-exporter.datadogexporter.DisableAPMStats", "${OTELCOL_ARGS}"]
|
||||
volumes:
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "3.0.11",
|
||||
"version": "3.0.4",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
@ -51,7 +51,7 @@
|
|||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
"husky": "^8.0.1",
|
||||
"kill-port": "2.0.1",
|
||||
"kill-port": "^2.0.1",
|
||||
"lint-staged": "^13.0.3",
|
||||
"prettier": "^2.7.1",
|
||||
"pretty-quick": "^3.1.3",
|
||||
|
|
@ -66,11 +66,10 @@
|
|||
"sqlite3"
|
||||
],
|
||||
"overrides": {
|
||||
"axios": "1.12.0",
|
||||
"axios": "1.10.0",
|
||||
"body-parser": "2.0.2",
|
||||
"braces": "3.0.3",
|
||||
"cross-spawn": "7.0.6",
|
||||
"form-data": "4.0.4",
|
||||
"glob-parent": "6.0.2",
|
||||
"http-proxy-middleware": "3.0.3",
|
||||
"json5": "2.2.3",
|
||||
|
|
@ -80,7 +79,6 @@
|
|||
"rollup": "4.45.0",
|
||||
"semver": "7.7.1",
|
||||
"set-value": "4.1.0",
|
||||
"solid-js": "1.9.7",
|
||||
"tar-fs": "3.1.0",
|
||||
"unset-value": "2.0.1",
|
||||
"webpack-dev-middleware": "7.4.2",
|
||||
|
|
|
|||
|
|
@ -1,23 +0,0 @@
|
|||
import { INodeCredential, INodeParams } from '../src/Interface'
|
||||
|
||||
class CometApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Comet API'
|
||||
this.name = 'cometApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Comet API Key',
|
||||
name: 'cometApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: CometApi }
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ElevenLabsApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Eleven Labs API'
|
||||
this.name = 'elevenLabsApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Sign up for a Eleven Labs account and <a target="_blank" href="https://elevenlabs.io/app/settings/api-keys">create an API Key</a>.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Eleven Labs API Key',
|
||||
name: 'elevenLabsApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ElevenLabsApi }
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class SambanovaApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Sambanova API'
|
||||
this.name = 'sambanovaApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Sambanova Api Key',
|
||||
name: 'sambanovaApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: SambanovaApi }
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataBearerTokenCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
description: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata Bearer Token'
|
||||
this.name = 'teradataBearerToken'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-Vector-Store-User-Guide/Setting-up-Vector-Store/Importing-Modules-Required-for-Vector-Store">official guide</a> on how to get Teradata Bearer Token'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Token',
|
||||
name: 'token',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataBearerTokenCredential }
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataTD2Credential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata TD2 Auth'
|
||||
this.name = 'teradataTD2Auth'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Teradata TD2 Auth Username',
|
||||
name: 'tdUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Teradata TD2 Auth Password',
|
||||
name: 'tdPassword',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataTD2Credential }
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataVectorStoreApiCredentials implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata Vector Store API Credentials'
|
||||
this.name = 'teradataVectorStoreApiCredentials'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Teradata Host IP',
|
||||
name: 'tdHostIp',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Username',
|
||||
name: 'tdUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'tdPassword',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Vector_Store_Base_URL',
|
||||
name: 'baseURL',
|
||||
description: 'Teradata Vector Store Base URL',
|
||||
placeholder: `Base_URL`,
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'JWT Token',
|
||||
name: 'jwtToken',
|
||||
type: 'password',
|
||||
description: 'Bearer token for JWT authentication',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataVectorStoreApiCredentials }
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
roots: ['<rootDir>/nodes'],
|
||||
transform: {
|
||||
'^.+\\.tsx?$': 'ts-jest'
|
||||
},
|
||||
testRegex: '(/__tests__/.*|(\\.|/)(test|spec))\\.tsx?$',
|
||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
|
||||
verbose: true,
|
||||
testPathIgnorePatterns: ['/node_modules/', '/dist/'],
|
||||
moduleNameMapper: {
|
||||
'^../../../src/(.*)$': '<rootDir>/src/$1'
|
||||
}
|
||||
}
|
||||
|
|
@ -3,48 +3,6 @@
|
|||
{
|
||||
"name": "awsChatBedrock",
|
||||
"models": [
|
||||
{
|
||||
"label": "anthropic.claude-opus-4-5-20251101-v1:0",
|
||||
"name": "anthropic.claude-opus-4-5-20251101-v1:0",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"name": "anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"description": "Claude 4.5 Sonnet",
|
||||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"name": "anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"description": "Claude 4.5 Haiku",
|
||||
"input_cost": 0.000001,
|
||||
"output_cost": 0.000005
|
||||
},
|
||||
{
|
||||
"label": "openai.gpt-oss-20b-1:0",
|
||||
"name": "openai.gpt-oss-20b-1:0",
|
||||
"description": "21B parameters model optimized for lower latency, local, and specialized use cases",
|
||||
"input_cost": 0.00007,
|
||||
"output_cost": 0.0003
|
||||
},
|
||||
{
|
||||
"label": "openai.gpt-oss-120b-1:0",
|
||||
"name": "openai.gpt-oss-120b-1:0",
|
||||
"description": "120B parameters model optimized for production, general purpose, and high-reasoning use cases",
|
||||
"input_cost": 0.00015,
|
||||
"output_cost": 0.0006
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"name": "anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"description": "Claude 4.1 Opus",
|
||||
"input_cost": 0.000015,
|
||||
"output_cost": 0.000075
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"name": "anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
|
|
@ -322,30 +280,6 @@
|
|||
{
|
||||
"name": "azureChatOpenAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gpt-5.1",
|
||||
"name": "gpt-5.1",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5",
|
||||
"name": "gpt-5",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-mini",
|
||||
"name": "gpt-5-mini",
|
||||
"input_cost": 0.00000025,
|
||||
"output_cost": 0.000002
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-nano",
|
||||
"name": "gpt-5-nano",
|
||||
"input_cost": 0.00000005,
|
||||
"output_cost": 0.0000004
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1",
|
||||
"name": "gpt-4.1",
|
||||
|
|
@ -423,18 +357,6 @@
|
|||
"name": "gpt-4.5-preview",
|
||||
"input_cost": 0.000075,
|
||||
"output_cost": 0.00015
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"input_cost": 0.0000004,
|
||||
"output_cost": 0.0000016
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-chat-latest",
|
||||
"name": "gpt-5-chat-latest",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
@ -494,45 +416,12 @@
|
|||
"name": "gpt-4-1106-preview",
|
||||
"input_cost": 0.00001,
|
||||
"output_cost": 0.00003
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"input_cost": 0.0000004,
|
||||
"output_cost": 0.0000016
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-chat-latest",
|
||||
"name": "gpt-5-chat-latest",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "chatAnthropic",
|
||||
"models": [
|
||||
{
|
||||
"label": "claude-opus-4-5",
|
||||
"name": "claude-opus-4-5",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-5",
|
||||
"name": "claude-sonnet-4-5",
|
||||
"description": "Claude 4.5 Sonnet",
|
||||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "claude-haiku-4-5",
|
||||
"name": "claude-haiku-4-5",
|
||||
"description": "Claude 4.5 Haiku",
|
||||
"input_cost": 0.000001,
|
||||
"output_cost": 0.000005
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-0",
|
||||
"name": "claude-sonnet-4-0",
|
||||
|
|
@ -540,13 +429,6 @@
|
|||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-1",
|
||||
"name": "claude-opus-4-1",
|
||||
"description": "Claude 4.1 Opus",
|
||||
"input_cost": 0.000015,
|
||||
"output_cost": 0.000075
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-0",
|
||||
"name": "claude-opus-4-0",
|
||||
|
|
@ -642,41 +524,17 @@
|
|||
"name": "chatGoogleGenerativeAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
"label": "gemini-2.5-flash-preview-05-20",
|
||||
"name": "gemini-2.5-flash-preview-05-20",
|
||||
"input_cost": 0.15e-6,
|
||||
"output_cost": 6e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-3-pro-image-preview",
|
||||
"name": "gemini-3-pro-image-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro",
|
||||
"name": "gemini-2.5-pro",
|
||||
"input_cost": 0.3e-6,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash",
|
||||
"name": "gemini-2.5-flash",
|
||||
"label": "gemini-2.5-pro-preview-03-25",
|
||||
"name": "gemini-2.5-pro-preview-03-25",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-image",
|
||||
"name": "gemini-2.5-flash-image",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-lite",
|
||||
"name": "gemini-2.5-flash-lite",
|
||||
"input_cost": 1e-7,
|
||||
"output_cost": 4e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash",
|
||||
|
|
@ -724,29 +582,17 @@
|
|||
"name": "chatGoogleVertexAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
"label": "gemini-2.5-flash",
|
||||
"name": "gemini-2.5-flash",
|
||||
"input_cost": 0.15e-6,
|
||||
"output_cost": 6e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro",
|
||||
"name": "gemini-2.5-pro",
|
||||
"input_cost": 0.3e-6,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash",
|
||||
"name": "gemini-2.5-flash",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-lite",
|
||||
"name": "gemini-2.5-flash-lite",
|
||||
"input_cost": 1e-7,
|
||||
"output_cost": 4e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash-001",
|
||||
|
|
@ -795,34 +641,6 @@
|
|||
"input_cost": 1.25e-7,
|
||||
"output_cost": 3.75e-7
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-5@20251101",
|
||||
"name": "claude-opus-4-5@20251101",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-5@20250929",
|
||||
"name": "claude-sonnet-4-5@20250929",
|
||||
"description": "Claude 4.5 Sonnet",
|
||||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "claude-haiku-4-5@20251001",
|
||||
"name": "claude-haiku-4-5@20251001",
|
||||
"description": "Claude 4.5 Haiku",
|
||||
"input_cost": 0.000001,
|
||||
"output_cost": 0.000005
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-1@20250805",
|
||||
"name": "claude-opus-4-1@20250805",
|
||||
"description": "Claude 4.1 Opus",
|
||||
"input_cost": 0.000015,
|
||||
"output_cost": 0.000075
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4@20250514",
|
||||
"name": "claude-sonnet-4@20250514",
|
||||
|
|
@ -928,14 +746,6 @@
|
|||
{
|
||||
"name": "groqChat",
|
||||
"models": [
|
||||
{
|
||||
"label": "openai/gpt-oss-20b",
|
||||
"name": "openai/gpt-oss-20b"
|
||||
},
|
||||
{
|
||||
"label": "openai/gpt-oss-120b",
|
||||
"name": "openai/gpt-oss-120b"
|
||||
},
|
||||
{
|
||||
"label": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
||||
"name": "meta-llama/llama-4-maverick-17b-128e-instruct"
|
||||
|
|
@ -1047,30 +857,6 @@
|
|||
{
|
||||
"name": "chatOpenAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gpt-5.1",
|
||||
"name": "gpt-5.1",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5",
|
||||
"name": "gpt-5",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-mini",
|
||||
"name": "gpt-5-mini",
|
||||
"input_cost": 0.00000025,
|
||||
"output_cost": 0.000002
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-nano",
|
||||
"name": "gpt-5-nano",
|
||||
"input_cost": 0.00000005,
|
||||
"output_cost": 0.0000004
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1",
|
||||
"name": "gpt-4.1",
|
||||
|
|
@ -1805,18 +1591,6 @@
|
|||
"name": "gpt-4-32k",
|
||||
"input_cost": 0.00006,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"input_cost": 0.0000004,
|
||||
"output_cost": 0.0000016
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-chat-latest",
|
||||
"name": "gpt-5-chat-latest",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -317,7 +317,7 @@ class Condition_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
// If no condition is fulfilled, add isFulfilled to the ELSE condition
|
||||
// If no condition is fullfilled, add isFulfilled to the ELSE condition
|
||||
const dummyElseConditionData = {
|
||||
type: 'string',
|
||||
value1: '',
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import {
|
|||
INodeParams,
|
||||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { getVars, executeJavaScriptCode, createCodeExecutionSandbox, processTemplateVariables } from '../../../src/utils'
|
||||
import { getVars, executeJavaScriptCode, createCodeExecutionSandbox } from '../../../src/utils'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
interface ICustomFunctionInputVariables {
|
||||
|
|
@ -60,7 +60,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Custom Function'
|
||||
this.name = 'customFunctionAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.0
|
||||
this.type = 'CustomFunction'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute custom function'
|
||||
|
|
@ -107,7 +107,8 @@ class CustomFunction_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -133,7 +134,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const javascriptFunction = nodeData.inputs?.customFunctionJavascriptFunction as string
|
||||
const functionInputVariables = (nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]) ?? []
|
||||
const functionInputVariables = nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]
|
||||
const _customFunctionUpdateState = nodeData.inputs?.customFunctionUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
|
@ -144,19 +145,19 @@ class CustomFunction_Agentflow implements INode {
|
|||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _customFunctionUpdateState)
|
||||
}
|
||||
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, options)
|
||||
const flow = {
|
||||
input,
|
||||
state,
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
rawOutput: options.postProcessing?.rawOutput || '',
|
||||
chatHistory: options.postProcessing?.chatHistory || [],
|
||||
sourceDocuments: options.postProcessing?.sourceDocuments,
|
||||
usedTools: options.postProcessing?.usedTools,
|
||||
artifacts: options.postProcessing?.artifacts,
|
||||
fileAnnotations: options.postProcessing?.fileAnnotations
|
||||
input,
|
||||
state: newState
|
||||
}
|
||||
|
||||
// Create additional sandbox variables for custom function inputs
|
||||
|
|
@ -180,7 +181,8 @@ class CustomFunction_Agentflow implements INode {
|
|||
try {
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox, {
|
||||
libraries: ['axios'],
|
||||
streamOutput
|
||||
streamOutput,
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
let finalOutput = response
|
||||
|
|
@ -188,14 +190,15 @@ class CustomFunction_Agentflow implements INode {
|
|||
finalOutput = JSON.stringify(response, null, 2)
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _customFunctionUpdateState)
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newState = processTemplateVariables(newState, finalOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import {
|
|||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import axios, { AxiosRequestConfig } from 'axios'
|
||||
import { getCredentialData, getCredentialParam, processTemplateVariables, parseJsonBody } from '../../../src/utils'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
|
@ -30,7 +30,7 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Execute Flow'
|
||||
this.name = 'executeFlowAgentflow'
|
||||
this.version = 1.2
|
||||
this.version = 1.1
|
||||
this.type = 'ExecuteFlow'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute another flow'
|
||||
|
|
@ -102,7 +102,8 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -166,7 +167,9 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
let overrideConfig = nodeData.inputs?.executeFlowOverrideConfig
|
||||
if (typeof overrideConfig === 'string' && overrideConfig.startsWith('{') && overrideConfig.endsWith('}')) {
|
||||
try {
|
||||
overrideConfig = parseJsonBody(overrideConfig)
|
||||
// Handle escaped square brackets and other common escape sequences
|
||||
const unescapedConfig = overrideConfig.replace(/\\(\[|\])/g, '$1')
|
||||
overrideConfig = JSON.parse(unescapedConfig)
|
||||
} catch (parseError) {
|
||||
throw new Error(`Invalid JSON in executeFlowOverrideConfig: ${parseError.message}`)
|
||||
}
|
||||
|
|
@ -219,7 +222,13 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
}
|
||||
|
||||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, resultText)
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = resultText
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only add to runtime chat history if this is the first node
|
||||
const inputMessages = []
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { AxiosRequestConfig, Method, ResponseType } from 'axios'
|
||||
import axios, { AxiosRequestConfig, Method, ResponseType } from 'axios'
|
||||
import FormData from 'form-data'
|
||||
import * as querystring from 'querystring'
|
||||
import { getCredentialData, getCredentialParam, parseJsonBody } from '../../../src/utils'
|
||||
import { secureAxiosRequest } from '../../../src/httpSecurity'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import * as ipaddr from 'ipaddr.js'
|
||||
import dns from 'dns/promises'
|
||||
|
||||
class HTTP_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -19,6 +20,37 @@ class HTTP_Agentflow implements INode {
|
|||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
private sanitizeJsonString(jsonString: string): string {
|
||||
// Remove common problematic escape sequences that are not valid JSON
|
||||
let sanitized = jsonString
|
||||
// Remove escaped square brackets (not valid JSON)
|
||||
.replace(/\\(\[|\])/g, '$1')
|
||||
// Fix unquoted string values in JSON (simple case)
|
||||
.replace(/:\s*([a-zA-Z][a-zA-Z0-9]*)\s*([,}])/g, ': "$1"$2')
|
||||
// Fix trailing commas
|
||||
.replace(/,(\s*[}\]])/g, '$1')
|
||||
|
||||
return sanitized
|
||||
}
|
||||
|
||||
private parseJsonBody(body: string): any {
|
||||
try {
|
||||
// First try to parse as-is
|
||||
return JSON.parse(body)
|
||||
} catch (error) {
|
||||
try {
|
||||
// If that fails, try to sanitize and parse
|
||||
const sanitized = this.sanitizeJsonString(body)
|
||||
return JSON.parse(sanitized)
|
||||
} catch (sanitizeError) {
|
||||
// If sanitization also fails, throw the original error with helpful message
|
||||
throw new Error(
|
||||
`Invalid JSON format in body. Original error: ${error.message}. Please ensure your JSON is properly formatted with quoted strings and valid escape sequences.`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constructor() {
|
||||
this.label = 'HTTP'
|
||||
this.name = 'httpAgentflow'
|
||||
|
|
@ -67,8 +99,7 @@ class HTTP_Agentflow implements INode {
|
|||
{
|
||||
label: 'URL',
|
||||
name: 'url',
|
||||
type: 'string',
|
||||
acceptVariable: true
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Headers',
|
||||
|
|
@ -201,6 +232,44 @@ class HTTP_Agentflow implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
private isDeniedIP(ip: string, denyList: string[]): void {
|
||||
const parsedIp = ipaddr.parse(ip)
|
||||
for (const entry of denyList) {
|
||||
if (entry.includes('/')) {
|
||||
try {
|
||||
const [range, _] = entry.split('/')
|
||||
const parsedRange = ipaddr.parse(range)
|
||||
if (parsedIp.kind() === parsedRange.kind()) {
|
||||
if (parsedIp.match(ipaddr.parseCIDR(entry))) {
|
||||
throw new Error('Access to this host is denied by policy.')
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
throw new Error(`isDeniedIP: ${error}`)
|
||||
}
|
||||
} else if (ip === entry) throw new Error('Access to this host is denied by policy.')
|
||||
}
|
||||
}
|
||||
|
||||
private async checkDenyList(url: string) {
|
||||
const httpDenyListString: string | undefined = process.env.HTTP_DENY_LIST
|
||||
if (!httpDenyListString) return url
|
||||
const httpDenyList = httpDenyListString.split(',').map((ip) => ip.trim())
|
||||
|
||||
const urlObj = new URL(url)
|
||||
|
||||
const hostname = urlObj.hostname
|
||||
|
||||
if (ipaddr.isValid(hostname)) {
|
||||
this.isDeniedIP(hostname, httpDenyList)
|
||||
} else {
|
||||
const addresses = await dns.lookup(hostname, { all: true })
|
||||
for (const address of addresses) {
|
||||
this.isDeniedIP(address.address, httpDenyList)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const method = nodeData.inputs?.method as 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'
|
||||
const url = nodeData.inputs?.url as string
|
||||
|
|
@ -263,6 +332,8 @@ class HTTP_Agentflow implements INode {
|
|||
// Build final URL with query parameters
|
||||
const finalUrl = queryString ? `${url}${url.includes('?') ? '&' : '?'}${queryString}` : url
|
||||
|
||||
await this.checkDenyList(finalUrl)
|
||||
|
||||
// Prepare request config
|
||||
const requestConfig: AxiosRequestConfig = {
|
||||
method: method as Method,
|
||||
|
|
@ -275,7 +346,7 @@ class HTTP_Agentflow implements INode {
|
|||
if (method !== 'GET' && body) {
|
||||
switch (bodyType) {
|
||||
case 'json': {
|
||||
requestConfig.data = typeof body === 'string' ? parseJsonBody(body) : body
|
||||
requestConfig.data = typeof body === 'string' ? this.parseJsonBody(body) : body
|
||||
requestHeaders['Content-Type'] = 'application/json'
|
||||
break
|
||||
}
|
||||
|
|
@ -293,14 +364,14 @@ class HTTP_Agentflow implements INode {
|
|||
break
|
||||
}
|
||||
case 'xWwwFormUrlencoded':
|
||||
requestConfig.data = querystring.stringify(typeof body === 'string' ? parseJsonBody(body) : body)
|
||||
requestConfig.data = querystring.stringify(typeof body === 'string' ? this.parseJsonBody(body) : body)
|
||||
requestHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make the secure HTTP request that validates all URLs in redirect chains
|
||||
const response = await secureAxiosRequest(requestConfig)
|
||||
// Make the HTTP request
|
||||
const response = await axios(requestConfig)
|
||||
|
||||
// Process response based on response type
|
||||
let responseData
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ class HumanInput_Agentflow implements INode {
|
|||
humanInputDescription = (nodeData.inputs?.humanInputDescription as string) || 'Do you want to proceed?'
|
||||
const messages = [...pastChatHistory, ...runtimeChatHistory]
|
||||
// Find the last message in the messages array
|
||||
const lastMessage = messages.length > 0 ? (messages[messages.length - 1] as any).content || '' : ''
|
||||
const lastMessage = (messages[messages.length - 1] as any).content || ''
|
||||
humanInputDescription = `${lastMessage}\n\n${humanInputDescription}`
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
|
|
@ -241,11 +241,8 @@ class HumanInput_Agentflow implements INode {
|
|||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
for await (const chunk of await llmNodeInstance.stream(messages)) {
|
||||
const content = typeof chunk === 'string' ? chunk : chunk.content.toString()
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
sseStreamer.streamTokenEvent(chatId, chunk.content.toString())
|
||||
response = response.concat(chunk)
|
||||
}
|
||||
humanInputDescription = response.content as string
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { parseJsonBody } from '../../../src/utils'
|
||||
|
||||
class Iteration_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -40,17 +39,12 @@ class Iteration_Agentflow implements INode {
|
|||
const iterationInput = nodeData.inputs?.iterationInput
|
||||
|
||||
// Helper function to clean JSON strings with redundant backslashes
|
||||
const safeParseJson = (str: string): string => {
|
||||
try {
|
||||
return parseJsonBody(str)
|
||||
} catch {
|
||||
// Try parsing after cleaning
|
||||
return parseJsonBody(str.replace(/\\(["'[\]{}])/g, '$1'))
|
||||
}
|
||||
const cleanJsonString = (str: string): string => {
|
||||
return str.replace(/\\(["'[\]{}])/g, '$1')
|
||||
}
|
||||
|
||||
const iterationInputArray =
|
||||
typeof iterationInput === 'string' && iterationInput !== '' ? safeParseJson(iterationInput) : iterationInput
|
||||
typeof iterationInput === 'string' && iterationInput !== '' ? JSON.parse(cleanJsonString(iterationInput)) : iterationInput
|
||||
|
||||
if (!iterationInputArray || !Array.isArray(iterationInputArray)) {
|
||||
throw new Error('Invalid input array')
|
||||
|
|
|
|||
|
|
@ -2,20 +2,17 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { ICommonObject, IMessage, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages'
|
||||
import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
|
||||
import { z } from 'zod'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { ILLMMessage } from '../Interface.Agentflow'
|
||||
import { ILLMMessage, IStructuredOutput } from '../Interface.Agentflow'
|
||||
import {
|
||||
addImageArtifactsToMessages,
|
||||
extractArtifactsFromResponse,
|
||||
getPastChatHistoryImageMessages,
|
||||
getUniqueImageMessages,
|
||||
processMessagesWithImages,
|
||||
replaceBase64ImagesWithFileReferences,
|
||||
replaceInlineDataWithFileReferences,
|
||||
updateFlowState
|
||||
} from '../utils'
|
||||
import { processTemplateVariables, configureStructuredOutput } from '../../../src/utils'
|
||||
import { flatten } from 'lodash'
|
||||
import { get } from 'lodash'
|
||||
|
||||
class LLM_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -34,7 +31,7 @@ class LLM_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'LLM'
|
||||
this.name = 'llmAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.0
|
||||
this.type = 'LLM'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Large language models to analyze user-provided inputs and generate responses'
|
||||
|
|
@ -290,7 +287,8 @@ class LLM_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -401,11 +399,7 @@ class LLM_Agentflow implements INode {
|
|||
const role = msg.role
|
||||
const content = msg.content
|
||||
if (role && content) {
|
||||
if (role === 'system') {
|
||||
messages.unshift({ role, content })
|
||||
} else {
|
||||
messages.push({ role, content })
|
||||
}
|
||||
messages.push({ role, content })
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -450,16 +444,10 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
delete nodeData.inputs?.llmMessages
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant responses as user messages
|
||||
* Images are converted from FILE-STORAGE::<image_path> to base 64 image_url format
|
||||
*/
|
||||
await addImageArtifactsToMessages(messages, options)
|
||||
|
||||
// Configure structured output if specified
|
||||
const isStructuredOutput = _llmStructuredOutput && Array.isArray(_llmStructuredOutput) && _llmStructuredOutput.length > 0
|
||||
if (isStructuredOutput) {
|
||||
llmNodeInstance = configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
llmNodeInstance = this.configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
}
|
||||
|
||||
// Initialize response and determine if streaming is possible
|
||||
|
|
@ -475,11 +463,9 @@ class LLM_Agentflow implements INode {
|
|||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
/*
|
||||
* Invoke LLM
|
||||
*/
|
||||
if (isStreamable) {
|
||||
response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
|
|
@ -488,15 +474,11 @@ class LLM_Agentflow implements INode {
|
|||
// Stream whole response back to UI if this is the last node
|
||||
if (isLastNode && options.sseStreamer) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
let finalResponse = ''
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
let responseContent = JSON.stringify(response, null, 2)
|
||||
if (typeof response.content === 'string') {
|
||||
responseContent = response.content
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, finalResponse)
|
||||
sseStreamer.streamTokenEvent(chatId, responseContent)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -504,40 +486,6 @@ class LLM_Agentflow implements INode {
|
|||
const endTime = Date.now()
|
||||
const timeDelta = endTime - startTime
|
||||
|
||||
// Extract artifacts and file annotations from response metadata
|
||||
let artifacts: any[] = []
|
||||
let fileAnnotations: any[] = []
|
||||
if (response.response_metadata) {
|
||||
const {
|
||||
artifacts: extractedArtifacts,
|
||||
fileAnnotations: extractedFileAnnotations,
|
||||
savedInlineImages
|
||||
} = await extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
|
||||
|
||||
if (extractedArtifacts.length > 0) {
|
||||
artifacts = extractedArtifacts
|
||||
|
||||
// Stream artifacts if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamArtifactsEvent(chatId, artifacts)
|
||||
}
|
||||
}
|
||||
|
||||
if (extractedFileAnnotations.length > 0) {
|
||||
fileAnnotations = extractedFileAnnotations
|
||||
|
||||
// Stream file annotations if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
}
|
||||
|
||||
// Replace inlineData base64 with file references in the response
|
||||
if (savedInlineImages && savedInlineImages.length > 0) {
|
||||
replaceInlineDataWithFileReferences(response, savedInlineImages)
|
||||
}
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_llmUpdateState && Array.isArray(_llmUpdateState) && _llmUpdateState.length > 0) {
|
||||
|
|
@ -557,22 +505,10 @@ class LLM_Agentflow implements INode {
|
|||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else if (response.content === '') {
|
||||
// Empty response content, this could happen when there is only image data
|
||||
finalResponse = ''
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
const output = this.prepareOutputObject(
|
||||
response,
|
||||
finalResponse,
|
||||
startTime,
|
||||
endTime,
|
||||
timeDelta,
|
||||
isStructuredOutput,
|
||||
artifacts,
|
||||
fileAnnotations
|
||||
)
|
||||
const output = this.prepareOutputObject(response, finalResponse, startTime, endTime, timeDelta, isStructuredOutput)
|
||||
|
||||
// End analytics tracking
|
||||
if (analyticHandlers && llmIds) {
|
||||
|
|
@ -584,23 +520,41 @@ class LLM_Agentflow implements INode {
|
|||
this.sendStreamingEvents(options, chatId, response)
|
||||
}
|
||||
|
||||
// Stream file annotations if any were extracted
|
||||
if (fileAnnotations.length > 0 && isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, finalResponse)
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
const stateValue = newState[key].toString()
|
||||
if (stateValue.includes('{{ output')) {
|
||||
// Handle simple output replacement
|
||||
if (stateValue === '{{ output }}') {
|
||||
newState[key] = finalResponse
|
||||
continue
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the temporarily added image artifact messages before storing
|
||||
* This is to avoid storing the actual base64 data into database
|
||||
*/
|
||||
const messagesToStore = messages.filter((msg: any) => !msg._isTemporaryImageMessage)
|
||||
// Handle JSON path expressions like {{ output.item1 }}
|
||||
// eslint-disable-next-line
|
||||
const match = stateValue.match(/{{[\s]*output\.([\w\.]+)[\s]*}}/)
|
||||
if (match) {
|
||||
try {
|
||||
// Parse the response if it's JSON
|
||||
const jsonResponse = typeof finalResponse === 'string' ? JSON.parse(finalResponse) : finalResponse
|
||||
// Get the value using lodash get
|
||||
const path = match[1]
|
||||
const value = get(jsonResponse, path)
|
||||
newState[key] = value ?? stateValue // Fall back to original if path not found
|
||||
} catch (e) {
|
||||
// If JSON parsing fails, keep original template
|
||||
console.warn(`Failed to parse JSON or find path in output: ${e}`)
|
||||
newState[key] = stateValue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the actual messages array with one that includes the file references for images instead of base64 data
|
||||
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
|
||||
messagesToStore,
|
||||
messages,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
)
|
||||
|
|
@ -651,13 +605,7 @@ class LLM_Agentflow implements INode {
|
|||
{
|
||||
role: returnRole,
|
||||
content: finalResponse,
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id,
|
||||
...(((artifacts && artifacts.length > 0) || (fileAnnotations && fileAnnotations.length > 0)) && {
|
||||
additional_kwargs: {
|
||||
...(artifacts && artifacts.length > 0 && { artifacts }),
|
||||
...(fileAnnotations && fileAnnotations.length > 0 && { fileAnnotations })
|
||||
}
|
||||
})
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -827,6 +775,59 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures structured output for the LLM
|
||||
*/
|
||||
private configureStructuredOutput(llmNodeInstance: BaseChatModel, llmStructuredOutput: IStructuredOutput[]): BaseChatModel {
|
||||
try {
|
||||
const zodObj: ICommonObject = {}
|
||||
for (const sch of llmStructuredOutput) {
|
||||
if (sch.type === 'string') {
|
||||
zodObj[sch.key] = z.string().describe(sch.description || '')
|
||||
} else if (sch.type === 'stringArray') {
|
||||
zodObj[sch.key] = z.array(z.string()).describe(sch.description || '')
|
||||
} else if (sch.type === 'number') {
|
||||
zodObj[sch.key] = z.number().describe(sch.description || '')
|
||||
} else if (sch.type === 'boolean') {
|
||||
zodObj[sch.key] = z.boolean().describe(sch.description || '')
|
||||
} else if (sch.type === 'enum') {
|
||||
const enumValues = sch.enumValues?.split(',').map((item: string) => item.trim()) || []
|
||||
zodObj[sch.key] = z
|
||||
.enum(enumValues.length ? (enumValues as [string, ...string[]]) : ['default'])
|
||||
.describe(sch.description || '')
|
||||
} else if (sch.type === 'jsonArray') {
|
||||
const jsonSchema = sch.jsonSchema
|
||||
if (jsonSchema) {
|
||||
try {
|
||||
// Parse the JSON schema
|
||||
const schemaObj = JSON.parse(jsonSchema)
|
||||
|
||||
// Create a Zod schema from the JSON schema
|
||||
const itemSchema = this.createZodSchemaFromJSON(schemaObj)
|
||||
|
||||
// Create an array schema of the item schema
|
||||
zodObj[sch.key] = z.array(itemSchema).describe(sch.description || '')
|
||||
} catch (err) {
|
||||
console.error(`Error parsing JSON schema for ${sch.key}:`, err)
|
||||
// Fallback to generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
} else {
|
||||
// If no schema provided, use generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
}
|
||||
}
|
||||
const structuredOutput = z.object(zodObj)
|
||||
|
||||
// @ts-ignore
|
||||
return llmNodeInstance.withStructuredOutput(structuredOutput)
|
||||
} catch (exception) {
|
||||
console.error(exception)
|
||||
return llmNodeInstance
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles streaming response from the LLM
|
||||
*/
|
||||
|
|
@ -843,20 +844,16 @@ class LLM_Agentflow implements INode {
|
|||
for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) {
|
||||
if (sseStreamer) {
|
||||
let content = ''
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
content = chunk
|
||||
} else if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
const contents = chunk.content as MessageContentText[]
|
||||
content = contents.map((item) => item.text).join('')
|
||||
} else if (chunk.content) {
|
||||
} else {
|
||||
content = chunk.content.toString()
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
}
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
response = response.concat(chunk)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during streaming:', error)
|
||||
|
|
@ -878,9 +875,7 @@ class LLM_Agentflow implements INode {
|
|||
startTime: number,
|
||||
endTime: number,
|
||||
timeDelta: number,
|
||||
isStructuredOutput: boolean,
|
||||
artifacts: any[] = [],
|
||||
fileAnnotations: any[] = []
|
||||
isStructuredOutput: boolean
|
||||
): any {
|
||||
const output: any = {
|
||||
content: finalResponse,
|
||||
|
|
@ -899,27 +894,15 @@ class LLM_Agentflow implements INode {
|
|||
output.usageMetadata = response.usage_metadata
|
||||
}
|
||||
|
||||
if (response.response_metadata) {
|
||||
output.responseMetadata = response.response_metadata
|
||||
}
|
||||
|
||||
if (isStructuredOutput && typeof response === 'object') {
|
||||
const structuredOutput = response as Record<string, any>
|
||||
for (const key in structuredOutput) {
|
||||
if (structuredOutput[key] !== undefined && structuredOutput[key] !== null) {
|
||||
if (structuredOutput[key]) {
|
||||
output[key] = structuredOutput[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (artifacts && artifacts.length > 0) {
|
||||
output.artifacts = flatten(artifacts)
|
||||
}
|
||||
|
||||
if (fileAnnotations && fileAnnotations.length > 0) {
|
||||
output.fileAnnotations = fileAnnotations
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
|
|
@ -930,12 +913,7 @@ class LLM_Agentflow implements INode {
|
|||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
|
||||
if (response.tool_calls) {
|
||||
const formattedToolCalls = response.tool_calls.map((toolCall: any) => ({
|
||||
tool: toolCall.name || 'tool',
|
||||
toolInput: toolCall.args,
|
||||
toolOutput: ''
|
||||
}))
|
||||
sseStreamer.streamCalledToolsEvent(chatId, flatten(formattedToolCalls))
|
||||
sseStreamer.streamCalledToolsEvent(chatId, response.tool_calls)
|
||||
}
|
||||
|
||||
if (response.usage_metadata) {
|
||||
|
|
@ -944,6 +922,107 @@ class LLM_Agentflow implements INode {
|
|||
|
||||
sseStreamer.streamEndEvent(chatId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Zod schema from a JSON schema object
|
||||
* @param jsonSchema The JSON schema object
|
||||
* @returns A Zod schema
|
||||
*/
|
||||
private createZodSchemaFromJSON(jsonSchema: any): z.ZodTypeAny {
|
||||
// If the schema is an object with properties, create an object schema
|
||||
if (typeof jsonSchema === 'object' && jsonSchema !== null) {
|
||||
const schemaObj: Record<string, z.ZodTypeAny> = {}
|
||||
|
||||
// Process each property in the schema
|
||||
for (const [key, value] of Object.entries(jsonSchema)) {
|
||||
if (value === null) {
|
||||
// Handle null values
|
||||
schemaObj[key] = z.null()
|
||||
} else if (typeof value === 'object' && !Array.isArray(value)) {
|
||||
// Check if the property has a type definition
|
||||
if ('type' in value) {
|
||||
const type = value.type as string
|
||||
const description = ('description' in value ? (value.description as string) : '') || ''
|
||||
|
||||
// Create the appropriate Zod type based on the type property
|
||||
if (type === 'string') {
|
||||
schemaObj[key] = z.string().describe(description)
|
||||
} else if (type === 'number') {
|
||||
schemaObj[key] = z.number().describe(description)
|
||||
} else if (type === 'boolean') {
|
||||
schemaObj[key] = z.boolean().describe(description)
|
||||
} else if (type === 'array') {
|
||||
// If it's an array type, check if items is defined
|
||||
if ('items' in value && value.items) {
|
||||
const itemSchema = this.createZodSchemaFromJSON(value.items)
|
||||
schemaObj[key] = z.array(itemSchema).describe(description)
|
||||
} else {
|
||||
// Default to array of any if items not specified
|
||||
schemaObj[key] = z.array(z.any()).describe(description)
|
||||
}
|
||||
} else if (type === 'object') {
|
||||
// If it's an object type, check if properties is defined
|
||||
if ('properties' in value && value.properties) {
|
||||
const nestedSchema = this.createZodSchemaFromJSON(value.properties)
|
||||
schemaObj[key] = nestedSchema.describe(description)
|
||||
} else {
|
||||
// Default to record of any if properties not specified
|
||||
schemaObj[key] = z.record(z.any()).describe(description)
|
||||
}
|
||||
} else {
|
||||
// Default to any for unknown types
|
||||
schemaObj[key] = z.any().describe(description)
|
||||
}
|
||||
|
||||
// Check if the property is optional
|
||||
if ('optional' in value && value.optional === true) {
|
||||
schemaObj[key] = schemaObj[key].optional()
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values without a type property
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = this.createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// It's a nested object without a type property, recursively create schema
|
||||
schemaObj[key] = this.createZodSchemaFromJSON(value)
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = this.createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// For primitive values (which shouldn't be in the schema directly)
|
||||
// Use the corresponding Zod type
|
||||
if (typeof value === 'string') {
|
||||
schemaObj[key] = z.string()
|
||||
} else if (typeof value === 'number') {
|
||||
schemaObj[key] = z.number()
|
||||
} else if (typeof value === 'boolean') {
|
||||
schemaObj[key] = z.boolean()
|
||||
} else {
|
||||
schemaObj[key] = z.any()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return z.object(schemaObj)
|
||||
}
|
||||
|
||||
// Fallback to any for unknown types
|
||||
return z.any()
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LLM_Agentflow }
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
class Loop_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -20,7 +19,7 @@ class Loop_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Loop'
|
||||
this.name = 'loopAgentflow'
|
||||
this.version = 1.2
|
||||
this.version = 1.0
|
||||
this.type = 'Loop'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Loop back to a previous node'
|
||||
|
|
@ -41,39 +40,6 @@ class Loop_Agentflow implements INode {
|
|||
name: 'maxLoopCount',
|
||||
type: 'number',
|
||||
default: 5
|
||||
},
|
||||
{
|
||||
label: 'Fallback Message',
|
||||
name: 'fallbackMessage',
|
||||
type: 'string',
|
||||
description: 'Message to display if the loop count is exceeded',
|
||||
placeholder: 'Enter your fallback message here',
|
||||
rows: 4,
|
||||
acceptVariable: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'loopUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -92,20 +58,12 @@ class Loop_Agentflow implements INode {
|
|||
})
|
||||
}
|
||||
return returnOptions
|
||||
},
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const loopBackToNode = nodeData.inputs?.loopBackToNode as string
|
||||
const _maxLoopCount = nodeData.inputs?.maxLoopCount as string
|
||||
const fallbackMessage = nodeData.inputs?.fallbackMessage as string
|
||||
const _loopUpdateState = nodeData.inputs?.loopUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
||||
|
|
@ -117,34 +75,16 @@ class Loop_Agentflow implements INode {
|
|||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
|
||||
}
|
||||
|
||||
const finalOutput = 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_loopUpdateState && Array.isArray(_loopUpdateState) && _loopUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _loopUpdateState)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: data,
|
||||
output: {
|
||||
content: finalOutput,
|
||||
content: 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`,
|
||||
nodeID: loopBackToNodeId,
|
||||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5,
|
||||
fallbackMessage
|
||||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
|
||||
},
|
||||
state: newState
|
||||
state
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import {
|
|||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
|
|
@ -36,7 +35,7 @@ class Retriever_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Retriever'
|
||||
this.name = 'retrieverAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.0
|
||||
this.type = 'Retriever'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Retrieve information from vector database'
|
||||
|
|
@ -87,7 +86,8 @@ class Retriever_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -197,7 +197,14 @@ class Retriever_Agentflow implements INode {
|
|||
sseStreamer.streamTokenEvent(chatId, finalOutput)
|
||||
}
|
||||
|
||||
newState = processTemplateVariables(newState, finalOutput)
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { ARTIFACTS_PREFIX, TOOL_ARGS_PREFIX } from '../../../src/agents'
|
||||
import zodToJsonSchema from 'zod-to-json-schema'
|
||||
|
|
@ -29,7 +28,7 @@ class Tool_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Tool'
|
||||
this.name = 'toolAgentflow'
|
||||
this.version = 1.2
|
||||
this.version = 1.1
|
||||
this.type = 'Tool'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Tools allow LLM to interact with external systems'
|
||||
|
|
@ -80,7 +79,8 @@ class Tool_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -330,7 +330,14 @@ class Tool_Agentflow implements INode {
|
|||
sseStreamer.streamTokenEvent(chatId, toolOutput)
|
||||
}
|
||||
|
||||
newState = processTemplateVariables(newState, toolOutput)
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = toolOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import { BaseMessage, MessageContentImageUrl, AIMessageChunk } from '@langchain/core/messages'
|
||||
import { BaseMessage, MessageContentImageUrl } from '@langchain/core/messages'
|
||||
import { getImageUploads } from '../../src/multiModalUtils'
|
||||
import { addSingleFileToStorage, getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload, INodeData } from '../../src/Interface'
|
||||
import { getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload } from '../../src/Interface'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { IFlowState } from './Interface.Agentflow'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters, mapMimeTypeToInputField } from '../../src/utils'
|
||||
import fetch from 'node-fetch'
|
||||
import { handleEscapeCharacters, mapMimeTypeToInputField } from '../../src/utils'
|
||||
|
||||
export const addImagesToMessages = async (
|
||||
options: ICommonObject,
|
||||
|
|
@ -19,8 +18,7 @@ export const addImagesToMessages = async (
|
|||
for (const upload of imageUploads) {
|
||||
let bf = upload.data
|
||||
if (upload.type == 'stored-file') {
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
const contents = await getFileFromStorage(upload.name, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
|
||||
|
||||
|
|
@ -91,9 +89,8 @@ export const processMessagesWithImages = async (
|
|||
if (item.type === 'stored-file' && item.name && item.mime.startsWith('image/')) {
|
||||
hasImageReferences = true
|
||||
try {
|
||||
const fileName = item.name.replace(/^FILE-STORAGE::/, '')
|
||||
// Get file contents from storage
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
const contents = await getFileFromStorage(item.name, options.orgId, options.chatflowid, options.chatId)
|
||||
|
||||
// Create base64 data URL
|
||||
const base64Data = 'data:' + item.mime + ';base64,' + contents.toString('base64')
|
||||
|
|
@ -316,17 +313,13 @@ export const getPastChatHistoryImageMessages = async (
|
|||
if (message.additional_kwargs && message.additional_kwargs.fileUploads) {
|
||||
// example: [{"type":"stored-file","name":"0_DiXc4ZklSTo3M8J4.jpg","mime":"image/jpeg"}]
|
||||
const fileUploads = message.additional_kwargs.fileUploads
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const fileAnnotations = message.additional_kwargs.fileAnnotations
|
||||
const usedTools = message.additional_kwargs.usedTools
|
||||
try {
|
||||
let messageWithFileUploads = ''
|
||||
const uploads: IFileUpload[] = typeof fileUploads === 'string' ? JSON.parse(fileUploads) : fileUploads
|
||||
const imageContents: MessageContentImageUrl[] = []
|
||||
for (const upload of uploads) {
|
||||
if (upload.type === 'stored-file' && upload.mime.startsWith('image/')) {
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const fileData = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
const fileData = await getFileFromStorage(upload.name, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
const bf = 'data:' + upload.mime + ';base64,' + fileData.toString('base64')
|
||||
|
||||
|
|
@ -365,83 +358,22 @@ export const getPastChatHistoryImageMessages = async (
|
|||
}
|
||||
}
|
||||
const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content
|
||||
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
|
||||
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
|
||||
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
|
||||
|
||||
if (imageContents.length > 0) {
|
||||
const imageMessage: any = {
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: imageContents
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
imageMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) imageMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) imageMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) imageMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(imageMessage)
|
||||
})
|
||||
transformedPastMessages.push({
|
||||
role: messageRole,
|
||||
content: [...JSON.parse((pastChatHistory[i] as any).additional_kwargs.fileUploads)]
|
||||
})
|
||||
}
|
||||
|
||||
const contentMessage: any = {
|
||||
role: messageRole,
|
||||
content: messageContent
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
contentMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) contentMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) contentMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) contentMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(contentMessage)
|
||||
} catch (e) {
|
||||
// failed to parse fileUploads, continue with text only
|
||||
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
|
||||
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
|
||||
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
|
||||
|
||||
const errorMessage: any = {
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
errorMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) errorMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) errorMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) errorMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(errorMessage)
|
||||
}
|
||||
} else if (message.additional_kwargs) {
|
||||
const hasArtifacts =
|
||||
message.additional_kwargs.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts) &&
|
||||
message.additional_kwargs.artifacts.length > 0
|
||||
const hasFileAnnotations =
|
||||
message.additional_kwargs.fileAnnotations &&
|
||||
Array.isArray(message.additional_kwargs.fileAnnotations) &&
|
||||
message.additional_kwargs.fileAnnotations.length > 0
|
||||
const hasUsedTools =
|
||||
message.additional_kwargs.usedTools &&
|
||||
Array.isArray(message.additional_kwargs.usedTools) &&
|
||||
message.additional_kwargs.usedTools.length > 0
|
||||
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
const messageAdditionalKwargs: any = {}
|
||||
if (hasArtifacts) messageAdditionalKwargs.artifacts = message.additional_kwargs.artifacts
|
||||
if (hasFileAnnotations) messageAdditionalKwargs.fileAnnotations = message.additional_kwargs.fileAnnotations
|
||||
if (hasUsedTools) messageAdditionalKwargs.usedTools = message.additional_kwargs.usedTools
|
||||
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content,
|
||||
additional_kwargs: messageAdditionalKwargs
|
||||
content: messageContent
|
||||
})
|
||||
} else {
|
||||
} catch (e) {
|
||||
// failed to parse fileUploads, continue with text only
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
|
|
@ -460,443 +392,12 @@ export const getPastChatHistoryImageMessages = async (
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets MIME type from filename extension
|
||||
*/
|
||||
export const getMimeTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const mimeTypes: { [key: string]: string } = {
|
||||
png: 'image/png',
|
||||
jpg: 'image/jpeg',
|
||||
jpeg: 'image/jpeg',
|
||||
gif: 'image/gif',
|
||||
pdf: 'application/pdf',
|
||||
txt: 'text/plain',
|
||||
csv: 'text/csv',
|
||||
json: 'application/json',
|
||||
html: 'text/html',
|
||||
xml: 'application/xml'
|
||||
}
|
||||
return mimeTypes[extension || ''] || 'application/octet-stream'
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets artifact type from filename extension for UI rendering
|
||||
*/
|
||||
export const getArtifactTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const artifactTypes: { [key: string]: string } = {
|
||||
png: 'png',
|
||||
jpg: 'jpeg',
|
||||
jpeg: 'jpeg',
|
||||
html: 'html',
|
||||
htm: 'html',
|
||||
md: 'markdown',
|
||||
markdown: 'markdown',
|
||||
json: 'json',
|
||||
js: 'javascript',
|
||||
javascript: 'javascript',
|
||||
tex: 'latex',
|
||||
latex: 'latex',
|
||||
txt: 'text',
|
||||
csv: 'text',
|
||||
pdf: 'text'
|
||||
}
|
||||
return artifactTypes[extension || ''] || 'text'
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves base64 image data to storage and returns file information
|
||||
*/
|
||||
export const saveBase64Image = async (
|
||||
outputItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!outputItem.result) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = outputItem.result
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension and MIME type
|
||||
const outputFormat = outputItem.output_format || 'png'
|
||||
const fileName = `generated_image_${outputItem.id || Date.now()}.${outputFormat}`
|
||||
const mimeType = outputFormat === 'png' ? 'image/png' : 'image/jpeg'
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving base64 image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves Gemini inline image data to storage and returns file information
|
||||
*/
|
||||
export const saveGeminiInlineImage = async (
|
||||
inlineItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!inlineItem.data || !inlineItem.mimeType) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = inlineItem.data
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension from MIME type
|
||||
const mimeType = inlineItem.mimeType
|
||||
let extension = 'png'
|
||||
if (mimeType.includes('jpeg') || mimeType.includes('jpg')) {
|
||||
extension = 'jpg'
|
||||
} else if (mimeType.includes('png')) {
|
||||
extension = 'png'
|
||||
} else if (mimeType.includes('gif')) {
|
||||
extension = 'gif'
|
||||
} else if (mimeType.includes('webp')) {
|
||||
extension = 'webp'
|
||||
}
|
||||
|
||||
const fileName = `gemini_generated_image_${Date.now()}.${extension}`
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving Gemini inline image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads file content from container file citation
|
||||
*/
|
||||
export const downloadContainerFile = async (
|
||||
containerId: string,
|
||||
fileId: string,
|
||||
filename: string,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; totalSize: number } | null> => {
|
||||
try {
|
||||
const credentialData = await getCredentialData(modelNodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, modelNodeData)
|
||||
|
||||
if (!openAIApiKey) {
|
||||
console.warn('No OpenAI API key available for downloading container file')
|
||||
return null
|
||||
}
|
||||
|
||||
// Download the file using OpenAI Container API
|
||||
const response = await fetch(`https://api.openai.com/v1/containers/${containerId}/files/${fileId}/content`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: '*/*',
|
||||
Authorization: `Bearer ${openAIApiKey}`
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`Failed to download container file ${fileId} from container ${containerId}: ${response.status} ${response.statusText}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
const dataBuffer = Buffer.from(data)
|
||||
const mimeType = getMimeTypeFromFilename(filename)
|
||||
|
||||
// Store the file using the same storage utility as OpenAIAssistant
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
dataBuffer,
|
||||
filename,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error downloading container file:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace inlineData base64 with file references in the response content
|
||||
*/
|
||||
export const replaceInlineDataWithFileReferences = (
|
||||
response: AIMessageChunk,
|
||||
savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
): void => {
|
||||
// Check if content is an array
|
||||
if (!Array.isArray(response.content)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Replace base64 data with file references in response content
|
||||
let savedImageIndex = 0
|
||||
for (let i = 0; i < response.content.length; i++) {
|
||||
const contentItem = response.content[i]
|
||||
if (
|
||||
typeof contentItem === 'object' &&
|
||||
contentItem.type === 'inlineData' &&
|
||||
contentItem.inlineData &&
|
||||
savedImageIndex < savedInlineImages.length
|
||||
) {
|
||||
const savedImage = savedInlineImages[savedImageIndex]
|
||||
// Replace with file reference
|
||||
response.content[i] = {
|
||||
type: 'stored-file',
|
||||
name: savedImage.fileName,
|
||||
mime: savedImage.mimeType,
|
||||
path: savedImage.filePath
|
||||
}
|
||||
savedImageIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the inlineData from response_metadata to avoid duplication
|
||||
if (response.response_metadata?.inlineData) {
|
||||
delete response.response_metadata.inlineData
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts artifacts from response metadata (both annotations and built-in tools)
|
||||
*/
|
||||
export const extractArtifactsFromResponse = async (
|
||||
responseMetadata: any,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{
|
||||
artifacts: any[]
|
||||
fileAnnotations: any[]
|
||||
savedInlineImages?: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
}> => {
|
||||
const artifacts: any[] = []
|
||||
const fileAnnotations: any[] = []
|
||||
const savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }> = []
|
||||
|
||||
// Handle Gemini inline data (image generation)
|
||||
if (responseMetadata?.inlineData && Array.isArray(responseMetadata.inlineData)) {
|
||||
for (const inlineItem of responseMetadata.inlineData) {
|
||||
if (inlineItem.type === 'gemini_inline_data' && inlineItem.data && inlineItem.mimeType) {
|
||||
try {
|
||||
const savedImageResult = await saveGeminiInlineImage(inlineItem, options)
|
||||
if (savedImageResult) {
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
|
||||
// Track saved image for replacing base64 data in content
|
||||
savedInlineImages.push({
|
||||
filePath: savedImageResult.filePath,
|
||||
fileName: savedImageResult.fileName,
|
||||
mimeType: inlineItem.mimeType
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing Gemini inline image artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!responseMetadata?.output || !Array.isArray(responseMetadata.output)) {
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
for (const outputItem of responseMetadata.output) {
|
||||
// Handle container file citations from annotations
|
||||
if (outputItem.type === 'message' && outputItem.content && Array.isArray(outputItem.content)) {
|
||||
for (const contentItem of outputItem.content) {
|
||||
if (contentItem.annotations && Array.isArray(contentItem.annotations)) {
|
||||
for (const annotation of contentItem.annotations) {
|
||||
if (annotation.type === 'container_file_citation' && annotation.file_id && annotation.filename) {
|
||||
try {
|
||||
// Download and store the file content
|
||||
const downloadResult = await downloadContainerFile(
|
||||
annotation.container_id,
|
||||
annotation.file_id,
|
||||
annotation.filename,
|
||||
modelNodeData,
|
||||
options
|
||||
)
|
||||
|
||||
if (downloadResult) {
|
||||
const fileType = getArtifactTypeFromFilename(annotation.filename)
|
||||
|
||||
if (fileType === 'png' || fileType === 'jpeg' || fileType === 'jpg') {
|
||||
const artifact = {
|
||||
type: fileType,
|
||||
data: downloadResult.filePath
|
||||
}
|
||||
|
||||
artifacts.push(artifact)
|
||||
} else {
|
||||
fileAnnotations.push({
|
||||
filePath: downloadResult.filePath,
|
||||
fileName: annotation.filename
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing annotation:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle built-in tool artifacts (like image generation)
|
||||
if (outputItem.type === 'image_generation_call' && outputItem.result) {
|
||||
try {
|
||||
const savedImageResult = await saveBase64Image(outputItem, options)
|
||||
if (savedImageResult) {
|
||||
// Replace the base64 result with the file path in the response metadata
|
||||
outputItem.result = savedImageResult.filePath
|
||||
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing image generation artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant messages as user messages
|
||||
* This allows the LLM to see and reference the generated images in the conversation
|
||||
* Messages are marked with a special flag for later removal
|
||||
*/
|
||||
export const addImageArtifactsToMessages = async (messages: BaseMessageLike[], options: ICommonObject): Promise<void> => {
|
||||
const imageExtensions = ['png', 'jpg', 'jpeg', 'gif', 'webp']
|
||||
const messagesToInsert: Array<{ index: number; message: any }> = []
|
||||
|
||||
// Iterate through messages to find assistant messages with image artifacts
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const message = messages[i] as any
|
||||
|
||||
// Check if this is an assistant message with artifacts
|
||||
if (
|
||||
(message.role === 'assistant' || message.role === 'ai') &&
|
||||
message.additional_kwargs?.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts)
|
||||
) {
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const imageArtifacts: Array<{ type: string; name: string; mime: string }> = []
|
||||
|
||||
// Extract image artifacts
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type && artifact.data) {
|
||||
// Check if this is an image artifact by file type
|
||||
if (imageExtensions.includes(artifact.type.toLowerCase())) {
|
||||
// Extract filename from the file path
|
||||
const fileName = artifact.data.split('/').pop() || artifact.data
|
||||
const mimeType = `image/${artifact.type.toLowerCase()}`
|
||||
|
||||
imageArtifacts.push({
|
||||
type: 'stored-file',
|
||||
name: fileName,
|
||||
mime: mimeType
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found image artifacts, prepare to insert a user message after this assistant message
|
||||
if (imageArtifacts.length > 0) {
|
||||
// Check if the next message already contains these image artifacts to avoid duplicates
|
||||
const nextMessage = messages[i + 1] as any
|
||||
const shouldInsert =
|
||||
!nextMessage ||
|
||||
nextMessage.role !== 'user' ||
|
||||
!Array.isArray(nextMessage.content) ||
|
||||
!nextMessage.content.some(
|
||||
(item: any) =>
|
||||
(item.type === 'stored-file' || item.type === 'image_url') &&
|
||||
imageArtifacts.some((artifact) => {
|
||||
// Compare with and without FILE-STORAGE:: prefix
|
||||
const artifactName = artifact.name.replace('FILE-STORAGE::', '')
|
||||
const itemName = item.name?.replace('FILE-STORAGE::', '') || ''
|
||||
return artifactName === itemName
|
||||
})
|
||||
)
|
||||
|
||||
if (shouldInsert) {
|
||||
messagesToInsert.push({
|
||||
index: i + 1,
|
||||
message: {
|
||||
role: 'user',
|
||||
content: imageArtifacts,
|
||||
_isTemporaryImageMessage: true // Mark for later removal
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert messages in reverse order to maintain correct indices
|
||||
for (let i = messagesToInsert.length - 1; i >= 0; i--) {
|
||||
const { index, message } = messagesToInsert[i]
|
||||
messages.splice(index, 0, message)
|
||||
}
|
||||
|
||||
// Convert stored-file references to base64 image_url format
|
||||
if (messagesToInsert.length > 0) {
|
||||
const { updatedMessages } = await processMessagesWithImages(messages, options)
|
||||
// Replace the messages array content with the updated messages
|
||||
messages.length = 0
|
||||
messages.push(...updatedMessages)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the flow state with new values
|
||||
*/
|
||||
export const updateFlowState = (state: ICommonObject, updateState: IFlowState[]): ICommonObject => {
|
||||
export const updateFlowState = (state: ICommonObject, llmUpdateState: IFlowState[]): ICommonObject => {
|
||||
let newFlowState: Record<string, any> = {}
|
||||
for (const state of updateState) {
|
||||
for (const state of llmUpdateState) {
|
||||
newFlowState[state.key] = state.value
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ json.dumps(my_dict)`
|
|||
// TODO: get print console output
|
||||
finalResult = await pyodide.runPythonAsync(code)
|
||||
} catch (error) {
|
||||
throw new Error(`Sorry, I'm unable to find answer for question: "${input}" using following code: "${pythonCode}"`)
|
||||
throw new Error(`Sorry, I'm unable to find answer for question: "${input}" using follwoing code: "${pythonCode}"`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { getBaseClasses, transformBracesWithColon, convertChatHistoryToText, convertBaseMessagetoIMessage } from '../../../src/utils'
|
||||
import { getBaseClasses, transformBracesWithColon } from '../../../src/utils'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
|
|
@ -23,10 +23,8 @@ import { Moderation, checkInputs, streamResponse } from '../../moderation/Modera
|
|||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import type { Document } from '@langchain/core/documents'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { RESPONSE_TEMPLATE, REPHRASE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { RESPONSE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
|
||||
class ConversationalRetrievalToolAgent_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -44,7 +42,7 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Tool Agent'
|
||||
this.name = 'conversationalRetrievalToolAgent'
|
||||
this.author = 'niztal(falkor) and nikitas-novatix'
|
||||
this.author = 'niztal(falkor)'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -81,26 +79,6 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
optional: true,
|
||||
default: RESPONSE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Prompt',
|
||||
name: 'rephrasePrompt',
|
||||
type: 'string',
|
||||
description: 'Using previous chat history, rephrase question into a standalone question',
|
||||
warning: 'Prompt must include input variables: {chat_history} and {question}',
|
||||
rows: 4,
|
||||
additionalParams: true,
|
||||
optional: true,
|
||||
default: REPHRASE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Model',
|
||||
name: 'rephraseModel',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Optional: Use a different (faster/cheaper) model for rephrasing. If not specified, uses the main Tool Calling Chat Model.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
|
|
@ -125,9 +103,8 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
// The agent will be prepared in run() with the correct user message - it needs the actual runtime input for rephrasing
|
||||
async init(_nodeData: INodeData, _input: string, _options: ICommonObject): Promise<any> {
|
||||
return null
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
|
|
@ -171,23 +148,6 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
sseStreamer.streamUsedToolsEvent(chatId, res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
|
||||
// If the tool is set to returnDirect, stream the output to the client
|
||||
if (res.usedTools && res.usedTools.length) {
|
||||
let inputTools = nodeData.inputs?.tools
|
||||
inputTools = flatten(inputTools)
|
||||
for (const tool of res.usedTools) {
|
||||
const inputTool = inputTools.find((inputTool: Tool) => inputTool.name === tool.tool)
|
||||
if (inputTool && (inputTool as any).returnDirect && shouldStreamResponse) {
|
||||
sseStreamer.streamTokenEvent(chatId, tool.toolOutput)
|
||||
// Prevent CustomChainHandler from streaming the same output again
|
||||
if (res.output === tool.toolOutput) {
|
||||
res.output = ''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The CustomChainHandler will send the stream end event
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
|
|
@ -250,11 +210,9 @@ const prepareAgent = async (
|
|||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const rephraseModel = (nodeData.inputs?.rephraseModel as BaseChatModel) || model // Use main model if not specified
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
let systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
|
|
@ -262,9 +220,6 @@ const prepareAgent = async (
|
|||
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
|
||||
|
||||
systemMessage = transformBracesWithColon(systemMessage)
|
||||
if (rephrasePrompt) {
|
||||
rephrasePrompt = transformBracesWithColon(rephrasePrompt)
|
||||
}
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
|
|
@ -308,37 +263,6 @@ const prepareAgent = async (
|
|||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
// Function to get standalone question (either rephrased or original)
|
||||
const getStandaloneQuestion = async (input: string): Promise<string> => {
|
||||
// If no rephrase prompt, return the original input
|
||||
if (!rephrasePrompt) {
|
||||
return input
|
||||
}
|
||||
|
||||
// Get chat history (use empty string if none)
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true)) as BaseMessage[]
|
||||
const iMessages = convertBaseMessagetoIMessage(messages)
|
||||
const chatHistoryString = convertChatHistoryToText(iMessages)
|
||||
|
||||
// Always rephrase to normalize/expand user queries for better retrieval
|
||||
try {
|
||||
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt)
|
||||
const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, rephraseModel, new StringOutputParser()])
|
||||
const res = await condenseQuestionChain.invoke({
|
||||
question: input,
|
||||
chat_history: chatHistoryString
|
||||
})
|
||||
return res
|
||||
} catch (error) {
|
||||
console.error('Error rephrasing question:', error)
|
||||
// On error, fall back to original input
|
||||
return input
|
||||
}
|
||||
}
|
||||
|
||||
// Get standalone question before creating runnable
|
||||
const standaloneQuestion = await getStandaloneQuestion(flowObj?.input || '')
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
|
|
@ -348,9 +272,7 @@ const prepareAgent = async (
|
|||
return messages ?? []
|
||||
},
|
||||
context: async (i: { input: string; chatHistory?: string }) => {
|
||||
// Use the standalone question (rephrased or original) for retrieval
|
||||
const retrievalQuery = standaloneQuestion || i.input
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(retrievalQuery)
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(i.input)
|
||||
const formattedDocs = formatDocs(relevantDocs)
|
||||
return formattedDocs
|
||||
}
|
||||
|
|
@ -373,6 +295,4 @@ const prepareAgent = async (
|
|||
return executor
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
nodeClass: ConversationalRetrievalToolAgent_Agents
|
||||
}
|
||||
module.exports = { nodeClass: ConversationalRetrievalToolAgent_Agents }
|
||||
|
|
|
|||
|
|
@ -578,7 +578,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(
|
||||
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
|
|
@ -703,7 +703,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
clearInterval(timeout)
|
||||
reject(
|
||||
|
|
@ -1096,7 +1096,7 @@ async function handleToolSubmission(params: ToolSubmissionParams): Promise<ToolS
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -273,9 +273,10 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
console.error('Error parsing base options', exception)
|
||||
}
|
||||
}
|
||||
if (modelName.includes('o1') || modelName.includes('o3') || modelName.includes('gpt-5')) {
|
||||
if (modelName === 'o3-mini' || modelName.includes('o1')) {
|
||||
delete obj.temperature
|
||||
delete obj.stop
|
||||
}
|
||||
if (modelName.includes('o1') || modelName.includes('o3')) {
|
||||
const reasoning: OpenAIClient.Reasoning = {}
|
||||
if (reasoningEffort) {
|
||||
reasoning.effort = reasoningEffort
|
||||
|
|
@ -284,11 +285,6 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
reasoning.summary = reasoningSummary
|
||||
}
|
||||
obj.reasoning = reasoning
|
||||
|
||||
if (maxTokens) {
|
||||
delete obj.maxTokens
|
||||
obj.maxCompletionTokens = parseInt(maxTokens, 10)
|
||||
}
|
||||
}
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
label: 'Extended Thinking',
|
||||
name: 'extendedThinking',
|
||||
type: 'boolean',
|
||||
description: 'Enable extended thinking for reasoning model such as Claude Sonnet 3.7 and Claude 4',
|
||||
description: 'Enable extended thinking for reasoning model such as Claude Sonnet 3.7',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,176 +0,0 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatCometAPI_ChatModels implements INode {
|
||||
readonly baseURL: string = 'https://api.cometapi.com/v1'
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatCometAPI'
|
||||
this.name = 'chatCometAPI'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatCometAPI'
|
||||
this.icon = 'cometapi.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around CometAPI large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['cometApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'gpt-5-mini',
|
||||
description: 'Enter the model name (e.g., gpt-5-mini, claude-sonnet-4-20250514, gemini-2.0-flash)'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.7,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Streaming',
|
||||
name: 'streaming',
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokens',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Probability',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Frequency Penalty',
|
||||
name: 'frequencyPenalty',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Presence Penalty',
|
||||
name: 'presencePenalty',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Base Options',
|
||||
name: 'baseOptions',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
description: 'Additional options to pass to the CometAPI client. This should be a JSON object.'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
|
||||
const presencePenalty = nodeData.inputs?.presencePenalty as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
|
||||
if (nodeData.inputs?.credentialId) {
|
||||
nodeData.credential = nodeData.inputs?.credentialId
|
||||
}
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('cometApiKey', credentialData, nodeData)
|
||||
|
||||
// Custom error handling for missing API key
|
||||
if (!openAIApiKey || openAIApiKey.trim() === '') {
|
||||
throw new Error(
|
||||
'CometAPI API Key is missing or empty. Please provide a valid CometAPI API key in the credential configuration.'
|
||||
)
|
||||
}
|
||||
|
||||
// Custom error handling for missing model name
|
||||
if (!modelName || modelName.trim() === '') {
|
||||
throw new Error('Model Name is required. Please enter a valid model name (e.g., gpt-5-mini, claude-sonnet-4-20250514).')
|
||||
}
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: ChatOpenAIFields = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
apiKey: openAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
if (baseOptions) {
|
||||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
if (parsedBaseOptions.baseURL) {
|
||||
console.warn("The 'baseURL' parameter is not allowed when using the ChatCometAPI node.")
|
||||
parsedBaseOptions.baseURL = undefined
|
||||
}
|
||||
} catch (exception) {
|
||||
throw new Error('Invalid JSON in the BaseOptions: ' + exception)
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
...obj,
|
||||
configuration: {
|
||||
baseURL: this.baseURL,
|
||||
...parsedBaseOptions
|
||||
}
|
||||
})
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatCometAPI_ChatModels }
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="207" height="232">
|
||||
<path d="M0 0 C0.67675781 0.46921875 1.35351562 0.9384375 2.05078125 1.421875 C17.41087449 13.29936953 26.5563465 33.10648567 29 52 C30.88353091 73.63839398 25.57681073 94.24662535 12.078125 111.44921875 C3.52802952 121.42968735 -6.99880017 129.74646365 -17.29296875 137.85546875 C-20.80734684 140.63958646 -24.27616958 143.47806764 -27.75 146.3125 C-33.38311845 150.9007126 -39.05678704 155.43068832 -44.78027344 159.90551758 C-49.04401373 163.24101866 -53.27752081 166.61244975 -57.5 170 C-65.19965911 176.17403006 -72.95956911 182.26929551 -80.72949219 188.35449219 C-82.46597474 189.71488377 -84.201812 191.07609486 -85.9375 192.4375 C-86.79347778 193.10785278 -86.79347778 193.10785278 -87.66674805 193.79174805 C-90.77553239 196.23184554 -93.86474805 198.69340592 -96.93359375 201.18359375 C-97.59174072 201.71605713 -98.2498877 202.24852051 -98.92797852 202.79711914 C-100.18028531 203.81199799 -101.42972104 204.83043537 -102.67553711 205.85327148 C-113.67406316 214.75771491 -113.67406316 214.75771491 -119 215.3125 C-119.66 215.209375 -120.32 215.10625 -121 215 C-120.76709766 206.13030267 -117.19647897 198.99781748 -113.5 191.125 C-112.58712887 189.14895917 -111.67783856 187.17130284 -110.76916504 185.19332886 C-109.94887846 183.41284051 -109.12017404 181.63623744 -108.29101562 179.85986328 C-106.94852395 177.01575524 -106.94852395 177.01575524 -106 174 C-111.81280806 176.66712818 -116.32649031 179.60521955 -121.0625 183.875 C-121.70026367 184.43727295 -122.33802734 184.9995459 -122.99511719 185.57885742 C-127.01702202 189.14802701 -130.94253647 192.81269758 -134.84643555 196.51000977 C-148.71629517 209.64341666 -148.71629517 209.64341666 -155 213 C-155.99 212.67 -156.98 212.34 -158 212 C-157.4887063 206.47279763 -156.48994938 202.10988914 -154.26953125 196.96484375 C-153.99662766 196.32366104 -153.72372406 195.68247833 -153.44255066 195.02186584 C-152.57534585 192.98952075 -151.69455566 190.96343746 -150.8125 188.9375 C-148.52960643 183.63017721 -146.25725257 178.31827892 -144 173 C-143.70816833 172.31351379 -143.41633667 171.62702759 -143.11566162 170.91973877 C-142.25821582 168.88940943 -141.41429529 166.85397475 -140.57421875 164.81640625 C-140.07881592 163.6260376 -139.58341309 162.43566895 -139.07299805 161.20922852 C-138.07156521 158.21404429 -137.75796746 156.11997935 -138 153 C-146.93919642 160.27430311 -154.518973 168.82712219 -161.6875 177.8125 C-165.32819613 182.32251855 -169.13147644 186.77267556 -174 190 C-174.99 190 -175.98 190 -177 190 C-177.4046542 183.89279484 -176.01282428 179.27664925 -173.9140625 173.57421875 C-173.5860817 172.65574158 -173.25810089 171.7372644 -172.92018127 170.79095459 C-171.83227842 167.75258546 -170.72872039 164.72015532 -169.625 161.6875 C-168.84968909 159.53233408 -168.07537462 157.37680947 -167.30200195 155.22094727 C-165.68521472 150.71991256 -164.06188369 146.22131861 -162.43310547 141.72460938 C-160.13107613 135.36796141 -157.84629984 129.00524003 -155.56640625 122.640625 C-152.24887824 113.38342752 -148.91109678 104.13369846 -145.56491089 94.88682556 C-143.03925807 87.89940757 -140.53790092 80.90366027 -138.04859924 73.9032135 C-136.26757654 68.89703093 -134.47331016 63.8956159 -132.67861938 58.89431763 C-131.8214001 56.49792847 -130.9687066 54.09991471 -130.12094116 51.70016479 C-123.98775591 34.3499988 -118.12632414 19.31829963 -105 6 C-104.44957031 5.37738281 -103.89914063 4.75476563 -103.33203125 4.11328125 C-76.27136915 -25.62003884 -30.00866348 -21.14678947 0 0 Z " fill="#00ACE2" transform="translate(177,17)"/>
|
||||
<path d="M0 0 C3.59370889 2.76840946 6.81090677 5.77847531 10 9 C10.69867188 9.66515625 11.39734375 10.3303125 12.1171875 11.015625 C16.54012757 15.52461449 19.531169 20.41283818 22.375 26 C22.70902832 26.65234619 23.04305664 27.30469238 23.38720703 27.97680664 C29.78966214 41.20843735 30.40448825 59.20573624 26.08984375 73.18359375 C18.48979965 92.82385108 6.27019435 105.41854323 -13 114 C-29.20527458 120.38314632 -45.91187826 119.08787574 -61.9140625 112.8671875 C-78.47633521 105.1244532 -90.6818902 90.79579279 -97.20117188 73.89526367 C-101.70761398 60.18076397 -101.08909063 42.12663774 -95 29 C-94.57589844 28.06671875 -94.15179688 27.1334375 -93.71484375 26.171875 C-85.2846631 9.3584785 -71.84223513 -1.671465 -54.3125 -7.96484375 C-35.99378812 -13.48589997 -16.09003976 -10.05627485 0 0 Z " fill="#0274C3" transform="translate(163,25)"/>
|
||||
<path d="M0 0 C3.59370889 2.76840946 6.81090677 5.77847531 10 9 C10.69867188 9.66515625 11.39734375 10.3303125 12.1171875 11.015625 C16.54012757 15.52461449 19.531169 20.41283818 22.375 26 C22.70902832 26.65234619 23.04305664 27.30469238 23.38720703 27.97680664 C29.78966214 41.20843735 30.40448825 59.20573624 26.08984375 73.18359375 C18.48979965 92.82385108 6.27019435 105.41854323 -13 114 C-29.20527458 120.38314632 -45.91187826 119.08787574 -61.9140625 112.8671875 C-78.47633521 105.1244532 -90.6818902 90.79579279 -97.20117188 73.89526367 C-101.70761398 60.18076397 -101.08909063 42.12663774 -95 29 C-94.57589844 28.06671875 -94.15179688 27.1334375 -93.71484375 26.171875 C-85.2846631 9.3584785 -71.84223513 -1.671465 -54.3125 -7.96484375 C-35.99378812 -13.48589997 -16.09003976 -10.05627485 0 0 Z M-72.85546875 22.3046875 C-81.52384195 33.1993642 -85.32925872 46.19509438 -84 60 C-81.19770636 74.79342134 -74.05177982 85.87095721 -62 95 C-50.07317504 102.49999729 -36.59178226 103.84984433 -22.875 100.9375 C-9.58998661 97.14684284 0.96143129 88.7625654 7.6796875 76.7578125 C13.61298631 64.36459073 14.80612594 52.14069452 11.02734375 38.90625 C6.83721139 27.05279572 -1.00703398 17.2712335 -11.984375 10.95703125 C-15.54241409 9.26765223 -19.22605928 8.10166317 -23 7 C-23.99 6.67 -24.98 6.34 -26 6 C-44.99521417 4.32054509 -59.38243396 8.38333807 -72.85546875 22.3046875 Z " fill="#FAFDFE" transform="translate(163,25)"/>
|
||||
<path d="M0 0 C6.24302767 5.06772084 11.11257121 12.4655725 12.15625 20.50390625 C12.39769334 29.34676869 10.95006126 36.08814626 5.75 43.375 C1.38925675 47.21456516 -1.15219336 48.71018589 -7.05664062 48.625 C-10.77603931 48.20106141 -13.73923312 46.63634037 -17 44.875 C-17.68956787 44.5147876 -18.37913574 44.1545752 -19.08959961 43.78344727 C-41.85230667 31.66318165 -41.85230667 31.66318165 -46.25 21.375 C-47.21511912 15.34300547 -45.21326136 11.66919243 -42.0234375 6.7421875 C-37.16499414 0.25712874 -31.52400844 -3.17464768 -23.75 -5.3125 C-15.10762666 -6.39279667 -7.23796009 -4.98215226 0 0 Z " fill="#FCFDFE" transform="translate(149.25,47.625)"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 6.4 KiB |
|
|
@ -174,18 +174,6 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Thinking Budget',
|
||||
name: 'thinkingBudget',
|
||||
type: 'number',
|
||||
description: 'Guides the number of thinking tokens. -1 for dynamic, 0 to disable, or positive integer (Gemini 2.5 models).',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
show: {
|
||||
modelName: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.5-flash-lite']
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
name: 'baseUrl',
|
||||
|
|
@ -228,7 +216,6 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const baseUrl = nodeData.inputs?.baseUrl as string | undefined
|
||||
const thinkingBudget = nodeData.inputs?.thinkingBudget as string
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
|
|
@ -248,7 +235,6 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
if (cache) obj.cache = cache
|
||||
if (temperature) obj.temperature = parseFloat(temperature)
|
||||
if (baseUrl) obj.baseUrl = baseUrl
|
||||
if (thinkingBudget) obj.thinkingBudget = parseInt(thinkingBudget, 10)
|
||||
|
||||
let safetySettings: SafetySetting[] = []
|
||||
if (_safetySettings) {
|
||||
|
|
|
|||
|
|
@ -174,9 +174,6 @@ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<G
|
|||
* - Gemini 1.0 Pro version gemini-1.0-pro-002
|
||||
*/
|
||||
convertSystemMessageToHumanContent?: boolean | undefined
|
||||
|
||||
/** Thinking budget for Gemini 2.5 thinking models. Supports -1 (dynamic), 0 (off), or positive integers. */
|
||||
thinkingBudget?: number
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -602,17 +599,10 @@ export class LangchainChatGoogleGenerativeAI
|
|||
|
||||
convertSystemMessageToHumanContent: boolean | undefined
|
||||
|
||||
thinkingBudget?: number
|
||||
|
||||
private client: GenerativeModel
|
||||
|
||||
get _isMultimodalModel() {
|
||||
return (
|
||||
this.model.includes('vision') ||
|
||||
this.model.startsWith('gemini-1.5') ||
|
||||
this.model.startsWith('gemini-2') ||
|
||||
this.model.startsWith('gemini-3')
|
||||
)
|
||||
return this.model.includes('vision') || this.model.startsWith('gemini-1.5') || this.model.startsWith('gemini-2')
|
||||
}
|
||||
|
||||
constructor(fields: GoogleGenerativeAIChatInput) {
|
||||
|
|
@ -667,7 +657,6 @@ export class LangchainChatGoogleGenerativeAI
|
|||
|
||||
this.streaming = fields.streaming ?? this.streaming
|
||||
this.json = fields.json
|
||||
this.thinkingBudget = fields.thinkingBudget
|
||||
|
||||
this.client = new GenerativeAI(this.apiKey).getGenerativeModel(
|
||||
{
|
||||
|
|
@ -687,22 +676,12 @@ export class LangchainChatGoogleGenerativeAI
|
|||
baseUrl: fields.baseUrl
|
||||
}
|
||||
)
|
||||
if (this.thinkingBudget !== undefined) {
|
||||
;(this.client.generationConfig as any).thinkingConfig = {
|
||||
...(this.thinkingBudget !== undefined ? { thinkingBudget: this.thinkingBudget } : {})
|
||||
}
|
||||
}
|
||||
this.streamUsage = fields.streamUsage ?? this.streamUsage
|
||||
}
|
||||
|
||||
useCachedContent(cachedContent: CachedContent, modelParams?: ModelParams, requestOptions?: RequestOptions): void {
|
||||
if (!this.apiKey) return
|
||||
this.client = new GenerativeAI(this.apiKey).getGenerativeModelFromCachedContent(cachedContent, modelParams, requestOptions)
|
||||
if (this.thinkingBudget !== undefined) {
|
||||
;(this.client.generationConfig as any).thinkingConfig = {
|
||||
...(this.thinkingBudget !== undefined ? { thinkingBudget: this.thinkingBudget } : {})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get useSystemInstruction(): boolean {
|
||||
|
|
@ -791,12 +770,6 @@ export class LangchainChatGoogleGenerativeAI
|
|||
this.client.systemInstruction = systemInstruction
|
||||
actualPrompt = prompt.slice(1)
|
||||
}
|
||||
|
||||
// Ensure actualPrompt is never empty
|
||||
if (actualPrompt.length === 0) {
|
||||
actualPrompt = [{ role: 'user', parts: [{ text: '...' }] }]
|
||||
}
|
||||
|
||||
const parameters = this.invocationParams(options)
|
||||
|
||||
// Handle streaming
|
||||
|
|
@ -861,12 +834,6 @@ export class LangchainChatGoogleGenerativeAI
|
|||
this.client.systemInstruction = systemInstruction
|
||||
actualPrompt = prompt.slice(1)
|
||||
}
|
||||
|
||||
// Ensure actualPrompt is never empty
|
||||
if (actualPrompt.length === 0) {
|
||||
actualPrompt = [{ role: 'user', parts: [{ text: '...' }] }]
|
||||
}
|
||||
|
||||
const parameters = this.invocationParams(options)
|
||||
const request = {
|
||||
...parameters,
|
||||
|
|
|
|||
|
|
@ -48,8 +48,6 @@ export function getMessageAuthor(message: BaseMessage) {
|
|||
}
|
||||
|
||||
/**
|
||||
* !!! IMPORTANT: Must return 'user' as default instead of throwing error
|
||||
* https://github.com/FlowiseAI/Flowise/issues/4743
|
||||
* Maps a message type to a Google Generative AI chat author.
|
||||
* @param message The message to map.
|
||||
* @param model The model to use for mapping.
|
||||
|
|
@ -452,7 +450,6 @@ export function mapGenerateContentResultToChatResult(
|
|||
const [candidate] = response.candidates
|
||||
const { content: candidateContent, ...generationInfo } = candidate
|
||||
let content: MessageContent | undefined
|
||||
const inlineDataItems: any[] = []
|
||||
|
||||
if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length === 1 && candidateContent.parts[0].text) {
|
||||
content = candidateContent.parts[0].text
|
||||
|
|
@ -473,18 +470,6 @@ export function mapGenerateContentResultToChatResult(
|
|||
type: 'codeExecutionResult',
|
||||
codeExecutionResult: p.codeExecutionResult
|
||||
}
|
||||
} else if ('inlineData' in p && p.inlineData) {
|
||||
// Extract inline image data for processing by Agent
|
||||
inlineDataItems.push({
|
||||
type: 'gemini_inline_data',
|
||||
mimeType: p.inlineData.mimeType,
|
||||
data: p.inlineData.data
|
||||
})
|
||||
// Return the inline data as part of the content structure
|
||||
return {
|
||||
type: 'inlineData',
|
||||
inlineData: p.inlineData
|
||||
}
|
||||
}
|
||||
return p
|
||||
})
|
||||
|
|
@ -501,12 +486,6 @@ export function mapGenerateContentResultToChatResult(
|
|||
text = block?.text ?? text
|
||||
}
|
||||
|
||||
// Build response_metadata with inline data if present
|
||||
const response_metadata: any = {}
|
||||
if (inlineDataItems.length > 0) {
|
||||
response_metadata.inlineData = inlineDataItems
|
||||
}
|
||||
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
|
|
@ -521,8 +500,7 @@ export function mapGenerateContentResultToChatResult(
|
|||
additional_kwargs: {
|
||||
...generationInfo
|
||||
},
|
||||
usage_metadata: extra?.usageMetadata,
|
||||
response_metadata: Object.keys(response_metadata).length > 0 ? response_metadata : undefined
|
||||
usage_metadata: extra?.usageMetadata
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
|
|
@ -553,8 +531,6 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
const [candidate] = response.candidates
|
||||
const { content: candidateContent, ...generationInfo } = candidate
|
||||
let content: MessageContent | undefined
|
||||
const inlineDataItems: any[] = []
|
||||
|
||||
// Checks if some parts do not have text. If false, it means that the content is a string.
|
||||
if (Array.isArray(candidateContent?.parts) && candidateContent.parts.every((p) => 'text' in p)) {
|
||||
content = candidateContent.parts.map((p) => p.text).join('')
|
||||
|
|
@ -575,18 +551,6 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
type: 'codeExecutionResult',
|
||||
codeExecutionResult: p.codeExecutionResult
|
||||
}
|
||||
} else if ('inlineData' in p && p.inlineData) {
|
||||
// Extract inline image data for processing by Agent
|
||||
inlineDataItems.push({
|
||||
type: 'gemini_inline_data',
|
||||
mimeType: p.inlineData.mimeType,
|
||||
data: p.inlineData.data
|
||||
})
|
||||
// Return the inline data as part of the content structure
|
||||
return {
|
||||
type: 'inlineData',
|
||||
inlineData: p.inlineData
|
||||
}
|
||||
}
|
||||
return p
|
||||
})
|
||||
|
|
@ -616,12 +580,6 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
)
|
||||
}
|
||||
|
||||
// Build response_metadata with inline data if present
|
||||
const response_metadata: any = {}
|
||||
if (inlineDataItems.length > 0) {
|
||||
response_metadata.inlineData = inlineDataItems
|
||||
}
|
||||
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
|
|
@ -631,8 +589,7 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {},
|
||||
usage_metadata: extra.usageMetadata,
|
||||
response_metadata: Object.keys(response_metadata).length > 0 ? response_metadata : undefined
|
||||
usage_metadata: extra.usageMetadata
|
||||
}),
|
||||
generationInfo
|
||||
})
|
||||
|
|
|
|||
|
|
@ -41,17 +41,15 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
label: 'Model',
|
||||
name: 'model',
|
||||
type: 'string',
|
||||
description:
|
||||
'Model name (e.g., deepseek-ai/DeepSeek-V3.2-Exp:novita). If model includes provider (:) or using router endpoint, leave Endpoint blank.',
|
||||
placeholder: 'deepseek-ai/DeepSeek-V3.2-Exp:novita'
|
||||
description: 'If using own inference endpoint, leave this blank',
|
||||
placeholder: 'gpt2'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'endpoint',
|
||||
type: 'string',
|
||||
placeholder: 'https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2',
|
||||
description:
|
||||
'Custom inference endpoint (optional). Not needed for models with providers (:) or router endpoints. Leave blank to use Inference Providers.',
|
||||
description: 'Using your own inference endpoint',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
|
|
@ -105,7 +103,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder: 'AI assistant:',
|
||||
description: 'Sets the stop sequences to use. Use comma to separate different sequences.',
|
||||
description: 'Sets the stop sequences to use. Use comma to seperate different sequences.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
|
|
@ -126,15 +124,6 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
|
||||
|
||||
if (!huggingFaceApiKey) {
|
||||
console.error('[ChatHuggingFace] API key validation failed: No API key found')
|
||||
throw new Error('HuggingFace API key is required. Please configure it in the credential settings.')
|
||||
}
|
||||
|
||||
if (!huggingFaceApiKey.startsWith('hf_')) {
|
||||
console.warn('[ChatHuggingFace] API key format warning: Key does not start with "hf_"')
|
||||
}
|
||||
|
||||
const obj: Partial<HFInput> = {
|
||||
model,
|
||||
apiKey: huggingFaceApiKey
|
||||
|
|
|
|||
|
|
@ -56,9 +56,9 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpointUrl = fields?.endpointUrl
|
||||
this.includeCredentials = fields?.includeCredentials
|
||||
if (!this.apiKey || this.apiKey.trim() === '') {
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'Please set an API key for HuggingFace Hub. Either configure it in the credential settings in the UI, or set the environment variable HUGGINGFACEHUB_API_KEY.'
|
||||
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
@ -68,21 +68,19 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
}
|
||||
|
||||
invocationParams(options?: this['ParsedCallOptions']) {
|
||||
// Return parameters compatible with chatCompletion API (OpenAI-compatible format)
|
||||
const params: any = {
|
||||
temperature: this.temperature,
|
||||
max_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP
|
||||
return {
|
||||
model: this.model,
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
return_full_text: false,
|
||||
temperature: this.temperature,
|
||||
max_new_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP,
|
||||
top_k: this.topK,
|
||||
repetition_penalty: this.frequencyPenalty
|
||||
}
|
||||
}
|
||||
// Include optional parameters if they are defined
|
||||
if (this.topK !== undefined) {
|
||||
params.top_k = this.topK
|
||||
}
|
||||
if (this.frequencyPenalty !== undefined) {
|
||||
params.frequency_penalty = this.frequencyPenalty
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
|
|
@ -90,109 +88,51 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<GenerationChunk> {
|
||||
try {
|
||||
const client = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
client.chatCompletionStream({
|
||||
model: this.model,
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
...this.invocationParams(options)
|
||||
const hfi = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
hfi.textGenerationStream({
|
||||
...this.invocationParams(options),
|
||||
inputs: prompt
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.token.text
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token ?? '')
|
||||
|
||||
// stream is done
|
||||
if (chunk.generated_text)
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.choices[0]?.delta?.content || ''
|
||||
if (token) {
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token)
|
||||
}
|
||||
// stream is done when finish_reason is set
|
||||
if (chunk.choices[0]?.finish_reason) {
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('[ChatHuggingFace] Error in _streamResponseChunks:', error)
|
||||
// Provide more helpful error messages
|
||||
if (error?.message?.includes('endpointUrl') || error?.message?.includes('third-party provider')) {
|
||||
throw new Error(
|
||||
`Cannot use custom endpoint with model "${this.model}" that includes a provider. Please leave the Endpoint field blank in the UI. Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
try {
|
||||
const client = await this._prepareHFInference()
|
||||
// Use chatCompletion for chat models (v4 supports conversational models via Inference Providers)
|
||||
const args = {
|
||||
model: this.model,
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
...this.invocationParams(options)
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, client.chatCompletion.bind(client), args)
|
||||
const content = res.choices[0]?.message?.content || ''
|
||||
if (!content) {
|
||||
console.error('[ChatHuggingFace] No content in response:', JSON.stringify(res))
|
||||
throw new Error(`No content received from HuggingFace API. Response: ${JSON.stringify(res)}`)
|
||||
}
|
||||
return content
|
||||
} catch (error: any) {
|
||||
console.error('[ChatHuggingFace] Error in _call:', error.message)
|
||||
// Provide more helpful error messages
|
||||
if (error?.message?.includes('endpointUrl') || error?.message?.includes('third-party provider')) {
|
||||
throw new Error(
|
||||
`Cannot use custom endpoint with model "${this.model}" that includes a provider. Please leave the Endpoint field blank in the UI. Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
if (error?.message?.includes('Invalid username or password') || error?.message?.includes('authentication')) {
|
||||
throw new Error(
|
||||
`HuggingFace API authentication failed. Please verify your API key is correct and starts with "hf_". Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
const hfi = await this._prepareHFInference()
|
||||
const args = { ...this.invocationParams(options), inputs: prompt }
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfi.textGeneration.bind(hfi), args)
|
||||
return res.generated_text
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
private async _prepareHFInference() {
|
||||
if (!this.apiKey || this.apiKey.trim() === '') {
|
||||
console.error('[ChatHuggingFace] API key validation failed: Empty or undefined')
|
||||
throw new Error('HuggingFace API key is required. Please configure it in the credential settings.')
|
||||
}
|
||||
|
||||
const { InferenceClient } = await HuggingFaceInference.imports()
|
||||
// Use InferenceClient for chat models (works better with Inference Providers)
|
||||
const client = new InferenceClient(this.apiKey)
|
||||
|
||||
// Don't override endpoint if model uses a provider (contains ':') or if endpoint is router-based
|
||||
// When using Inference Providers, endpoint should be left blank - InferenceClient handles routing automatically
|
||||
if (
|
||||
this.endpointUrl &&
|
||||
!this.model.includes(':') &&
|
||||
!this.endpointUrl.includes('/v1/chat/completions') &&
|
||||
!this.endpointUrl.includes('router.huggingface.co')
|
||||
) {
|
||||
return client.endpoint(this.endpointUrl)
|
||||
}
|
||||
|
||||
// Return client without endpoint override - InferenceClient will use Inference Providers automatically
|
||||
return client
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hfi = new HfInference(this.apiKey, {
|
||||
includeCredentials: this.includeCredentials
|
||||
})
|
||||
return this.endpointUrl ? hfi.endpoint(this.endpointUrl) : hfi
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
static async imports(): Promise<{
|
||||
InferenceClient: typeof import('@huggingface/inference').InferenceClient
|
||||
HfInference: typeof import('@huggingface/inference').HfInference
|
||||
}> {
|
||||
try {
|
||||
const { InferenceClient } = await import('@huggingface/inference')
|
||||
return { InferenceClient }
|
||||
const { HfInference } = await import('@huggingface/inference')
|
||||
return { HfInference }
|
||||
} catch (e) {
|
||||
throw new Error('Please install huggingface as a dependency with, e.g. `pnpm install @huggingface/inference`')
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ class ChatNvdiaNIM_ChatModels implements INode {
|
|||
|
||||
constructor() {
|
||||
this.label = 'Chat NVIDIA NIM'
|
||||
this.name = 'chatNvidiaNIM'
|
||||
this.name = 'Chat NVIDIA NIM'
|
||||
this.version = 1.1
|
||||
this.type = 'ChatNvidiaNIM'
|
||||
this.type = 'Chat NVIDIA NIM'
|
||||
this.icon = 'nvdia.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around NVIDIA NIM Inference API'
|
||||
|
|
|
|||
|
|
@ -280,6 +280,19 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (modelName.includes('o3') || modelName.includes('o1')) {
|
||||
delete obj.temperature
|
||||
}
|
||||
if (modelName.includes('o1') || modelName.includes('o3')) {
|
||||
const reasoning: OpenAIClient.Reasoning = {}
|
||||
if (reasoningEffort) {
|
||||
reasoning.effort = reasoningEffort
|
||||
}
|
||||
if (reasoningSummary) {
|
||||
reasoning.summary = reasoningSummary
|
||||
}
|
||||
obj.reasoning = reasoning
|
||||
}
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
|
|
@ -292,19 +305,6 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
}
|
||||
if (strictToolCalling) obj.supportsStrictToolCalling = strictToolCalling
|
||||
|
||||
if (modelName.includes('o1') || modelName.includes('o3') || modelName.includes('gpt-5')) {
|
||||
delete obj.temperature
|
||||
delete obj.stop
|
||||
const reasoning: OpenAIClient.Reasoning = {}
|
||||
if (reasoningEffort) {
|
||||
reasoning.effort = reasoningEffort
|
||||
}
|
||||
if (reasoningSummary) {
|
||||
reasoning.summary = reasoningSummary
|
||||
}
|
||||
obj.reasoning = reasoning
|
||||
}
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
if (baseOptions) {
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenRouter } from './FlowiseChatOpenRouter'
|
||||
|
||||
class ChatOpenRouter_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -24,7 +23,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
this.icon = 'openRouter.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Open Router Inference API'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -115,40 +114,6 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Allow image input. Refer to the <a href="https://docs.flowiseai.com/using-flowise/uploads#image" target="_blank">docs</a> for more details.',
|
||||
default: false,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Image Resolution',
|
||||
description: 'This parameter controls the resolution in which the model views the image.',
|
||||
name: 'imageResolution',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Low',
|
||||
name: 'low'
|
||||
},
|
||||
{
|
||||
label: 'High',
|
||||
name: 'high'
|
||||
},
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
}
|
||||
],
|
||||
default: 'low',
|
||||
optional: false,
|
||||
show: {
|
||||
allowImageUploads: true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -165,8 +130,6 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
const basePath = (nodeData.inputs?.basepath as string) || 'https://openrouter.ai/api/v1'
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openRouterApiKey = getCredentialParam('openRouterApiKey', credentialData, nodeData)
|
||||
|
|
@ -192,7 +155,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
} catch (exception) {
|
||||
throw new Error("Invalid JSON in the ChatOpenRouter's BaseOptions: " + exception)
|
||||
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -203,15 +166,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false,
|
||||
imageResolution
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenRouter(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
const model = new ChatOpenAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,29 +0,0 @@
|
|||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
|
||||
export class ChatOpenRouter extends LangchainChatOpenAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: ChatOpenAIFields) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.model = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
// pass - OpenRouter models don't need model switching
|
||||
}
|
||||
}
|
||||
|
|
@ -1,123 +0,0 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatSambanova_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatSambanova'
|
||||
this.name = 'chatSambanova'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatSambanova'
|
||||
this.icon = 'sambanova.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Sambanova Chat Endpoints'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['sambanovaApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'Meta-Llama-3.3-70B-Instruct',
|
||||
placeholder: 'Meta-Llama-3.3-70B-Instruct'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Streaming',
|
||||
name: 'streaming',
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'BasePath',
|
||||
name: 'basepath',
|
||||
type: 'string',
|
||||
optional: true,
|
||||
default: 'htps://api.sambanova.ai/v1',
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'BaseOptions',
|
||||
name: 'baseOptions',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const basePath = nodeData.inputs?.basepath as string
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const sambanovaApiKey = getCredentialParam('sambanovaApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: ChatOpenAIFields = {
|
||||
temperature: temperature ? parseFloat(temperature) : undefined,
|
||||
model: modelName,
|
||||
apiKey: sambanovaApiKey,
|
||||
openAIApiKey: sambanovaApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
if (baseOptions) {
|
||||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
} catch (exception) {
|
||||
throw new Error("Invalid JSON in the ChatSambanova's BaseOptions: " + exception)
|
||||
}
|
||||
}
|
||||
|
||||
if (basePath || parsedBaseOptions) {
|
||||
obj.configuration = {
|
||||
baseURL: basePath,
|
||||
defaultHeaders: parsedBaseOptions
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatSambanova_ChatModels }
|
||||
|
Before Width: | Height: | Size: 12 KiB |
|
|
@ -95,7 +95,7 @@ class API_DocumentLoaders implements INode {
|
|||
type: 'string',
|
||||
rows: 4,
|
||||
description:
|
||||
'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, separated by comma. Use * to omit all metadata keys except the ones you specify in the Additional Metadata field',
|
||||
'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, seperated by comma. Use * to omit all metadata keys execept the ones you specify in the Additional Metadata field',
|
||||
placeholder: 'key1, key2, key3.nestedKey1',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
|
|
|
|||
|
|
@ -1,12 +1,5 @@
|
|||
<svg width="200" height="200" viewBox="0 0 200 200" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_267_4154)">
|
||||
<path d="M114.695 0H196.97C198.643 0 200 1.35671 200 3.03031V128.766C200 131.778 196.083 132.945 194.434 130.425L112.159 4.68953C110.841 2.67412 112.287 0 114.695 0Z" fill="#246DFF"/>
|
||||
<path d="M85.3048 0H3.0303C1.35671 0 0 1.35671 0 3.03031V128.766C0 131.778 3.91698 132.945 5.566 130.425L87.8405 4.68953C89.1593 2.67412 87.7134 0 85.3048 0Z" fill="#20A34E"/>
|
||||
<path d="M98.5909 100.668L5.12683 194.835C3.22886 196.747 4.58334 200 7.27759 200H192.8C195.483 200 196.842 196.77 194.967 194.852L102.908 100.685C101.726 99.4749 99.7824 99.4676 98.5909 100.668Z" fill="#F86606"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_267_4154">
|
||||
<rect width="200" height="200" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4.72492 9.35559L6.5 24L15 5.5L6.33616 7.15025C5.30261 7.34712 4.59832 8.3111 4.72492 9.35559Z" fill="#97D700" stroke="#97D700" stroke-width="2" stroke-linejoin="round"/>
|
||||
<path d="M26.6204 20.5943L26.5699 20.6161L19.5 4.5L24.0986 4.14626C25.163 4.06438 26.1041 4.83296 26.2365 5.8923L27.8137 18.5094C27.9241 19.3925 27.4377 20.2422 26.6204 20.5943Z" fill="#71C5E8" stroke="#71C5E8" stroke-width="2" stroke-linejoin="round"/>
|
||||
<path d="M17.5 10L9.5 28L23 22L17.5 10Z" fill="#FF9114" stroke="#FF9114" stroke-width="2" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 827 B After Width: | Height: | Size: 653 B |
|
|
@ -2,7 +2,7 @@ import { TextLoader } from 'langchain/document_loaders/fs/text'
|
|||
import Papa from 'papaparse'
|
||||
|
||||
type CSVLoaderOptions = {
|
||||
// Return specific column from key (string) or index (integer)
|
||||
// Return specifific column from key (string) or index (integer)
|
||||
column?: string | number
|
||||
// Force separator (default: auto detect)
|
||||
separator?: string
|
||||
|
|
|
|||
|
|
@ -119,7 +119,8 @@ class CustomDocumentLoader_DocumentLoaders implements INode {
|
|||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox, {
|
||||
libraries: ['axios']
|
||||
libraries: ['axios'],
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (output === 'document' && Array.isArray(response)) {
|
||||
|
|
|
|||
|
|
@ -136,10 +136,9 @@ class File_DocumentLoaders implements INode {
|
|||
|
||||
let files: string[] = []
|
||||
const fileBlobs: { blob: Blob; ext: string }[] = []
|
||||
const processRaw = options.processRaw
|
||||
|
||||
//FILE-STORAGE::["CONTRIBUTING.md","LICENSE.md","README.md"]
|
||||
const totalFiles = getOverrideFileInputs(nodeData, processRaw) || fileBase64
|
||||
const totalFiles = getOverrideFileInputs(nodeData) || fileBase64
|
||||
if (totalFiles.startsWith('FILE-STORAGE::')) {
|
||||
const fileName = totalFiles.replace('FILE-STORAGE::', '')
|
||||
if (fileName.startsWith('[') && fileName.endsWith(']')) {
|
||||
|
|
@ -299,7 +298,7 @@ class File_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const getOverrideFileInputs = (nodeData: INodeData, processRaw: boolean) => {
|
||||
const getOverrideFileInputs = (nodeData: INodeData) => {
|
||||
const txtFileBase64 = nodeData.inputs?.txtFile as string
|
||||
const pdfFileBase64 = nodeData.inputs?.pdfFile as string
|
||||
const jsonFileBase64 = nodeData.inputs?.jsonFile as string
|
||||
|
|
@ -348,10 +347,6 @@ const getOverrideFileInputs = (nodeData: INodeData, processRaw: boolean) => {
|
|||
files.push(...removePrefix(powerpointFileBase64))
|
||||
}
|
||||
|
||||
if (processRaw) {
|
||||
return files.length ? JSON.stringify(files) : ''
|
||||
}
|
||||
|
||||
return files.length ? `FILE-STORAGE::${JSON.stringify(files)}` : ''
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'
|
|||
import { LoadOfSheet } from '../MicrosoftExcel/ExcelLoader'
|
||||
import { PowerpointLoader } from '../MicrosoftPowerpoint/PowerpointLoader'
|
||||
import { handleEscapeCharacters } from '../../../src/utils'
|
||||
import { isPathTraversal } from '../../../src/validator'
|
||||
|
||||
class Folder_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -126,14 +125,6 @@ class Folder_DocumentLoaders implements INode {
|
|||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
if (!folderPath) {
|
||||
throw new Error('Folder path is required')
|
||||
}
|
||||
|
||||
if (isPathTraversal(folderPath)) {
|
||||
throw new Error('Invalid folder path: Path traversal detected. Please provide a safe folder path.')
|
||||
}
|
||||
|
||||
let omitMetadataKeys: string[] = []
|
||||
if (_omitMetadataKeys) {
|
||||
omitMetadataKeys = _omitMetadataKeys.split(',').map((key) => key.trim())
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class Json_DocumentLoaders implements INode {
|
|||
constructor() {
|
||||
this.label = 'Json File'
|
||||
this.name = 'jsonFile'
|
||||
this.version = 3.1
|
||||
this.version = 3.0
|
||||
this.type = 'Document'
|
||||
this.icon = 'json.svg'
|
||||
this.category = 'Document Loaders'
|
||||
|
|
@ -66,14 +66,6 @@ class Json_DocumentLoaders implements INode {
|
|||
type: 'TextSplitter',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Separate by JSON Object (JSON Array)',
|
||||
name: 'separateByObject',
|
||||
type: 'boolean',
|
||||
description: 'If enabled and the file is a JSON Array, each JSON object will be extracted as a chunk',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Pointers Extraction (separated by commas)',
|
||||
name: 'pointersName',
|
||||
|
|
@ -81,10 +73,7 @@ class Json_DocumentLoaders implements INode {
|
|||
description:
|
||||
'Ex: { "key": "value" }, Pointer Extraction = "key", "value" will be extracted as pageContent of the chunk. Use comma to separate multiple pointers',
|
||||
placeholder: 'key1, key2',
|
||||
optional: true,
|
||||
hide: {
|
||||
separateByObject: true
|
||||
}
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
|
|
@ -133,7 +122,6 @@ class Json_DocumentLoaders implements INode {
|
|||
const pointersName = nodeData.inputs?.pointersName as string
|
||||
const metadata = nodeData.inputs?.metadata
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const separateByObject = nodeData.inputs?.separateByObject as boolean
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
let omitMetadataKeys: string[] = []
|
||||
|
|
@ -165,7 +153,7 @@ class Json_DocumentLoaders implements INode {
|
|||
if (!file) continue
|
||||
const fileData = await getFileFromStorage(file, orgId, chatflowid)
|
||||
const blob = new Blob([fileData])
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata, separateByObject)
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata)
|
||||
|
||||
if (textSplitter) {
|
||||
let splittedDocs = await loader.load()
|
||||
|
|
@ -188,7 +176,7 @@ class Json_DocumentLoaders implements INode {
|
|||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
const blob = new Blob([bf])
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata, separateByObject)
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata)
|
||||
|
||||
if (textSplitter) {
|
||||
let splittedDocs = await loader.load()
|
||||
|
|
@ -318,20 +306,13 @@ class TextLoader extends BaseDocumentLoader {
|
|||
class JSONLoader extends TextLoader {
|
||||
public pointers: string[]
|
||||
private metadataMapping: Record<string, string>
|
||||
private separateByObject: boolean
|
||||
|
||||
constructor(
|
||||
filePathOrBlob: string | Blob,
|
||||
pointers: string | string[] = [],
|
||||
metadataMapping: Record<string, string> = {},
|
||||
separateByObject: boolean = false
|
||||
) {
|
||||
constructor(filePathOrBlob: string | Blob, pointers: string | string[] = [], metadataMapping: Record<string, string> = {}) {
|
||||
super(filePathOrBlob)
|
||||
this.pointers = Array.isArray(pointers) ? pointers : [pointers]
|
||||
if (metadataMapping) {
|
||||
this.metadataMapping = typeof metadataMapping === 'object' ? metadataMapping : JSON.parse(metadataMapping)
|
||||
}
|
||||
this.separateByObject = separateByObject
|
||||
}
|
||||
|
||||
protected async parse(raw: string): Promise<Document[]> {
|
||||
|
|
@ -342,24 +323,14 @@ class JSONLoader extends TextLoader {
|
|||
const jsonArray = Array.isArray(json) ? json : [json]
|
||||
|
||||
for (const item of jsonArray) {
|
||||
if (this.separateByObject) {
|
||||
if (typeof item === 'object' && item !== null && !Array.isArray(item)) {
|
||||
const metadata = this.extractMetadata(item)
|
||||
const pageContent = this.formatObjectAsKeyValue(item)
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const content = this.extractContent(item)
|
||||
const metadata = this.extractMetadata(item)
|
||||
for (const pageContent of content) {
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
const content = this.extractContent(item)
|
||||
const metadata = this.extractMetadata(item)
|
||||
|
||||
for (const pageContent of content) {
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -399,30 +370,6 @@ class JSONLoader extends TextLoader {
|
|||
return metadata
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a JSON object as readable key-value pairs
|
||||
*/
|
||||
private formatObjectAsKeyValue(obj: any, prefix: string = ''): string {
|
||||
const lines: string[] = []
|
||||
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const fullKey = prefix ? `${prefix}.${key}` : key
|
||||
|
||||
if (value === null || value === undefined) {
|
||||
lines.push(`${fullKey}: ${value}`)
|
||||
} else if (Array.isArray(value)) {
|
||||
lines.push(`${fullKey}: ${JSON.stringify(value)}`)
|
||||
} else if (typeof value === 'object') {
|
||||
// Recursively format nested objects
|
||||
lines.push(this.formatObjectAsKeyValue(value, fullKey))
|
||||
} else {
|
||||
lines.push(`${fullKey}: ${value}`)
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
/**
|
||||
* If JSON pointers are specified, return all strings below any of them
|
||||
* and exclude all other nodes expect if they match a JSON pointer.
|
||||
|
|
|
|||
|
|
@ -95,30 +95,21 @@ export class OxylabsLoader extends BaseDocumentLoader {
|
|||
}
|
||||
|
||||
public async load(): Promise<DocumentInterface[]> {
|
||||
let isUrlSource = this.params.source == 'universal'
|
||||
|
||||
const params = {
|
||||
const response = await this.sendAPIRequest<OxylabsResponse>({
|
||||
url: this.params.query,
|
||||
source: this.params.source,
|
||||
geo_location: this.params.geo_location,
|
||||
render: this.params.render ? 'html' : null,
|
||||
render: this.params.render,
|
||||
parse: this.params.parse,
|
||||
user_agent_type: this.params.user_agent_type,
|
||||
markdown: !this.params.parse,
|
||||
url: isUrlSource ? this.params.query : null,
|
||||
query: !isUrlSource ? this.params.query : null
|
||||
}
|
||||
|
||||
const response = await this.sendAPIRequest<OxylabsResponse>(params)
|
||||
|
||||
const docs: OxylabsDocument[] = response.data.results.map((result, index) => {
|
||||
const content = typeof result.content === 'string' ? result.content : JSON.stringify(result.content)
|
||||
return {
|
||||
id: `${response.data.job.id.toString()}-${index}`,
|
||||
pageContent: content,
|
||||
metadata: {}
|
||||
}
|
||||
user_agent_type: this.params.user_agent_type
|
||||
})
|
||||
|
||||
const docs: OxylabsDocument[] = response.data.results.map((result, index) => ({
|
||||
id: `${response.data.job.id.toString()}-${index}`,
|
||||
pageContent: result.content,
|
||||
metadata: {}
|
||||
}))
|
||||
|
||||
return docs
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,14 @@
|
|||
import { omit } from 'lodash'
|
||||
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import {
|
||||
Browser,
|
||||
Page,
|
||||
PlaywrightWebBaseLoader,
|
||||
PlaywrightWebBaseLoaderOptions
|
||||
} from '@langchain/community/document_loaders/web/playwright'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { test } from 'linkifyjs'
|
||||
import { omit } from 'lodash'
|
||||
import { handleEscapeCharacters, INodeOutputsValue, webCrawl, xmlScrape } from '../../../src'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class Playwright_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -114,14 +113,6 @@ class Playwright_DocumentLoaders implements INode {
|
|||
additionalParams: true,
|
||||
description: 'CSS selectors like .div or #div'
|
||||
},
|
||||
{
|
||||
label: 'CSS Selector (Optional)',
|
||||
name: 'cssSelector',
|
||||
type: 'string',
|
||||
description: 'Only content inside this selector will be extracted. Leave empty to use the entire page body.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
name: 'metadata',
|
||||
|
|
@ -164,14 +155,8 @@ class Playwright_DocumentLoaders implements INode {
|
|||
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
|
||||
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
|
||||
let limit = parseInt(nodeData.inputs?.limit as string)
|
||||
const waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as
|
||||
| 'load'
|
||||
| 'domcontentloaded'
|
||||
| 'networkidle'
|
||||
| 'commit'
|
||||
| undefined
|
||||
const waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const cssSelector = nodeData.inputs?.cssSelector as string
|
||||
let waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as 'load' | 'domcontentloaded' | 'networkidle' | 'commit' | undefined
|
||||
let waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
const orgId = options.orgId
|
||||
|
|
@ -187,17 +172,13 @@ class Playwright_DocumentLoaders implements INode {
|
|||
throw new Error('Invalid URL')
|
||||
}
|
||||
|
||||
async function playwrightLoader(url: string): Promise<Document[] | undefined> {
|
||||
async function playwrightLoader(url: string): Promise<any> {
|
||||
try {
|
||||
let docs = []
|
||||
|
||||
const executablePath = process.env.PLAYWRIGHT_EXECUTABLE_PATH
|
||||
|
||||
const config: PlaywrightWebBaseLoaderOptions = {
|
||||
launchOptions: {
|
||||
args: ['--no-sandbox'],
|
||||
headless: true,
|
||||
executablePath: executablePath
|
||||
headless: true
|
||||
}
|
||||
}
|
||||
if (waitUntilGoToOption) {
|
||||
|
|
@ -205,22 +186,12 @@ class Playwright_DocumentLoaders implements INode {
|
|||
waitUntil: waitUntilGoToOption
|
||||
}
|
||||
}
|
||||
if (cssSelector || waitForSelector) {
|
||||
if (waitForSelector) {
|
||||
config['evaluate'] = async (page: Page, _: Browser): Promise<string> => {
|
||||
if (waitForSelector) {
|
||||
await page.waitForSelector(waitForSelector)
|
||||
}
|
||||
await page.waitForSelector(waitForSelector)
|
||||
|
||||
if (cssSelector) {
|
||||
const selectorHandle = await page.$(cssSelector)
|
||||
const result = await page.evaluate(
|
||||
(htmlSelection) => htmlSelection?.innerHTML ?? document.body.innerHTML,
|
||||
selectorHandle
|
||||
)
|
||||
return result
|
||||
} else {
|
||||
return await page.evaluate(() => document.body.innerHTML)
|
||||
}
|
||||
const result = await page.evaluate(() => document.body.innerHTML)
|
||||
return result
|
||||
}
|
||||
}
|
||||
const loader = new PlaywrightWebBaseLoader(url, config)
|
||||
|
|
@ -237,7 +208,7 @@ class Playwright_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
let docs: Document[] = []
|
||||
let docs: IDocument[] = []
|
||||
if (relativeLinksMethod) {
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Start PlaywrightWebBaseLoader ${relativeLinksMethod}`)
|
||||
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
|
||||
|
|
@ -254,10 +225,7 @@ class Playwright_DocumentLoaders implements INode {
|
|||
options.logger.info(`[${orgId}]: PlaywrightWebBaseLoader pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
|
||||
if (!pages || pages.length === 0) throw new Error('No relative links found')
|
||||
for (const page of pages) {
|
||||
const result = await playwrightLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
docs.push(...(await playwrightLoader(page)))
|
||||
}
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Finish PlaywrightWebBaseLoader ${relativeLinksMethod}`)
|
||||
} else if (selectedLinks && selectedLinks.length > 0) {
|
||||
|
|
@ -266,16 +234,10 @@ class Playwright_DocumentLoaders implements INode {
|
|||
`[${orgId}]: PlaywrightWebBaseLoader pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`
|
||||
)
|
||||
for (const page of selectedLinks.slice(0, limit)) {
|
||||
const result = await playwrightLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
docs.push(...(await playwrightLoader(page)))
|
||||
}
|
||||
} else {
|
||||
const result = await playwrightLoader(url)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
docs = await playwrightLoader(url)
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import { Browser, Page, PuppeteerWebBaseLoader, PuppeteerWebBaseLoaderOptions } from '@langchain/community/document_loaders/web/puppeteer'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { test } from 'linkifyjs'
|
||||
import { omit } from 'lodash'
|
||||
import { PuppeteerLifeCycleEvent } from 'puppeteer'
|
||||
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { Browser, Page, PuppeteerWebBaseLoader, PuppeteerWebBaseLoaderOptions } from '@langchain/community/document_loaders/web/puppeteer'
|
||||
import { test } from 'linkifyjs'
|
||||
import { handleEscapeCharacters, INodeOutputsValue, webCrawl, xmlScrape } from '../../../src'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { PuppeteerLifeCycleEvent } from 'puppeteer'
|
||||
|
||||
class Puppeteer_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -110,14 +109,6 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
additionalParams: true,
|
||||
description: 'CSS selectors like .div or #div'
|
||||
},
|
||||
{
|
||||
label: 'CSS Selector (Optional)',
|
||||
name: 'cssSelector',
|
||||
type: 'string',
|
||||
description: 'Only content inside this selector will be extracted. Leave empty to use the entire page body.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
name: 'metadata',
|
||||
|
|
@ -160,9 +151,8 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
|
||||
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
|
||||
let limit = parseInt(nodeData.inputs?.limit as string)
|
||||
const waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as PuppeteerLifeCycleEvent
|
||||
const waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const cssSelector = nodeData.inputs?.cssSelector as string
|
||||
let waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as PuppeteerLifeCycleEvent
|
||||
let waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
const orgId = options.orgId
|
||||
|
|
@ -178,17 +168,13 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
throw new Error('Invalid URL')
|
||||
}
|
||||
|
||||
async function puppeteerLoader(url: string): Promise<Document[] | undefined> {
|
||||
async function puppeteerLoader(url: string): Promise<any> {
|
||||
try {
|
||||
let docs: Document[] = []
|
||||
|
||||
const executablePath = process.env.PUPPETEER_EXECUTABLE_PATH
|
||||
|
||||
let docs = []
|
||||
const config: PuppeteerWebBaseLoaderOptions = {
|
||||
launchOptions: {
|
||||
args: ['--no-sandbox'],
|
||||
headless: 'new',
|
||||
executablePath: executablePath
|
||||
headless: 'new'
|
||||
}
|
||||
}
|
||||
if (waitUntilGoToOption) {
|
||||
|
|
@ -196,22 +182,12 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
waitUntil: waitUntilGoToOption
|
||||
}
|
||||
}
|
||||
if (cssSelector || waitForSelector) {
|
||||
if (waitForSelector) {
|
||||
config['evaluate'] = async (page: Page, _: Browser): Promise<string> => {
|
||||
if (waitForSelector) {
|
||||
await page.waitForSelector(waitForSelector)
|
||||
}
|
||||
await page.waitForSelector(waitForSelector)
|
||||
|
||||
if (cssSelector) {
|
||||
const selectorHandle = await page.$(cssSelector)
|
||||
const result = await page.evaluate(
|
||||
(htmlSelection) => htmlSelection?.innerHTML ?? document.body.innerHTML,
|
||||
selectorHandle
|
||||
)
|
||||
return result
|
||||
} else {
|
||||
return await page.evaluate(() => document.body.innerHTML)
|
||||
}
|
||||
const result = await page.evaluate(() => document.body.innerHTML)
|
||||
return result
|
||||
}
|
||||
}
|
||||
const loader = new PuppeteerWebBaseLoader(url, config)
|
||||
|
|
@ -228,7 +204,7 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
let docs: Document[] = []
|
||||
let docs: IDocument[] = []
|
||||
if (relativeLinksMethod) {
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Start PuppeteerWebBaseLoader ${relativeLinksMethod}`)
|
||||
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
|
||||
|
|
@ -245,10 +221,7 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
options.logger.info(`[${orgId}]: PuppeteerWebBaseLoader pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
|
||||
if (!pages || pages.length === 0) throw new Error('No relative links found')
|
||||
for (const page of pages) {
|
||||
const result = await puppeteerLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
docs.push(...(await puppeteerLoader(page)))
|
||||
}
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Finish PuppeteerWebBaseLoader ${relativeLinksMethod}`)
|
||||
} else if (selectedLinks && selectedLinks.length > 0) {
|
||||
|
|
@ -257,16 +230,10 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
`[${orgId}]: PuppeteerWebBaseLoader pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`
|
||||
)
|
||||
for (const page of selectedLinks.slice(0, limit)) {
|
||||
const result = await puppeteerLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
docs.push(...(await puppeteerLoader(page)))
|
||||
}
|
||||
} else {
|
||||
const result = await puppeteerLoader(url)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
docs = await puppeteerLoader(url)
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ type Element = {
|
|||
}
|
||||
|
||||
export class UnstructuredLoader extends BaseDocumentLoader {
|
||||
public filePath: string
|
||||
|
||||
private apiUrl = process.env.UNSTRUCTURED_API_URL || 'https://api.unstructuredapp.io/general/v0/general'
|
||||
|
||||
private apiKey: string | undefined = process.env.UNSTRUCTURED_API_KEY
|
||||
|
|
@ -136,7 +138,7 @@ export class UnstructuredLoader extends BaseDocumentLoader {
|
|||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to partition file with error ${response.status} and message ${await response.text()}`)
|
||||
throw new Error(`Failed to partition file ${this.filePath} with error ${response.status} and message ${await response.text()}`)
|
||||
}
|
||||
|
||||
const elements = await response.json()
|
||||
|
|
|
|||
|
|
@ -4,11 +4,15 @@ import {
|
|||
UnstructuredLoaderOptions,
|
||||
UnstructuredLoaderStrategy,
|
||||
SkipInferTableTypes,
|
||||
HiResModelName
|
||||
HiResModelName,
|
||||
UnstructuredLoader as LCUnstructuredLoader
|
||||
} from '@langchain/community/document_loaders/fs/unstructured'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { getFileFromStorage, INodeOutputsValue } from '../../../src'
|
||||
import { UnstructuredLoader } from './Unstructured'
|
||||
import { isPathTraversal } from '../../../src/validator'
|
||||
import sanitize from 'sanitize-filename'
|
||||
import path from 'path'
|
||||
|
||||
class UnstructuredFile_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -40,6 +44,17 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
/** Deprecated
|
||||
{
|
||||
label: 'File Path',
|
||||
name: 'filePath',
|
||||
type: 'string',
|
||||
placeholder: '',
|
||||
optional: true,
|
||||
warning:
|
||||
'Use the File Upload instead of File path. If file is uploaded, this path is ignored. Path will be deprecated in future releases.'
|
||||
},
|
||||
*/
|
||||
{
|
||||
label: 'Files Upload',
|
||||
name: 'fileObject',
|
||||
|
|
@ -440,6 +455,7 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const filePath = nodeData.inputs?.filePath as string
|
||||
const unstructuredAPIUrl = nodeData.inputs?.unstructuredAPIUrl as string
|
||||
const strategy = nodeData.inputs?.strategy as UnstructuredLoaderStrategy
|
||||
const encoding = nodeData.inputs?.encoding as string
|
||||
|
|
@ -544,8 +560,37 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
docs.push(...loaderDocs)
|
||||
}
|
||||
}
|
||||
} else if (filePath) {
|
||||
if (!filePath || typeof filePath !== 'string') {
|
||||
throw new Error('Invalid file path format')
|
||||
}
|
||||
|
||||
if (isPathTraversal(filePath)) {
|
||||
throw new Error('Invalid path characters detected in filePath - path traversal not allowed')
|
||||
}
|
||||
|
||||
const parsedPath = path.parse(filePath)
|
||||
const sanitizedFilename = sanitize(parsedPath.base)
|
||||
|
||||
if (!sanitizedFilename || sanitizedFilename.trim() === '') {
|
||||
throw new Error('Invalid filename after sanitization')
|
||||
}
|
||||
|
||||
const sanitizedFilePath = path.join(parsedPath.dir, sanitizedFilename)
|
||||
|
||||
if (!path.isAbsolute(sanitizedFilePath)) {
|
||||
throw new Error('File path must be absolute')
|
||||
}
|
||||
|
||||
if (sanitizedFilePath.includes('..')) {
|
||||
throw new Error('Invalid file path - directory traversal not allowed')
|
||||
}
|
||||
|
||||
const loader = new LCUnstructuredLoader(sanitizedFilePath, obj)
|
||||
const loaderDocs = await loader.load()
|
||||
docs.push(...loaderDocs)
|
||||
} else {
|
||||
throw new Error('File upload is required')
|
||||
throw new Error('File path or File upload is required')
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
/*
|
||||
* Uncomment this if you want to use the UnstructuredFolder to load a folder from the file system
|
||||
|
||||
import { omit } from 'lodash'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
|
|
@ -519,4 +516,3 @@ class UnstructuredFolder_DocumentLoaders implements INode {
|
|||
}
|
||||
|
||||
module.exports = { nodeClass: UnstructuredFolder_DocumentLoaders }
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ class AWSBedrockEmbedding_Embeddings implements INode {
|
|||
{
|
||||
label: 'Max AWS API retries',
|
||||
name: 'maxRetries',
|
||||
description: 'This will limit the number of AWS API for Titan model embeddings call retries. Used to avoid throttling.',
|
||||
description: 'This will limit the nubmer of AWS API for Titan model embeddings call retries. Used to avoid throttling.',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
default: 5,
|
||||
|
|
|
|||
|
|
@ -4,25 +4,6 @@ import { GoogleGenerativeAIEmbeddings, GoogleGenerativeAIEmbeddingsParams } from
|
|||
import { TaskType } from '@google/generative-ai'
|
||||
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
|
||||
|
||||
class GoogleGenerativeAIEmbeddingsWithStripNewLines extends GoogleGenerativeAIEmbeddings {
|
||||
stripNewLines: boolean
|
||||
|
||||
constructor(params: GoogleGenerativeAIEmbeddingsParams & { stripNewLines?: boolean }) {
|
||||
super(params)
|
||||
this.stripNewLines = params.stripNewLines ?? false
|
||||
}
|
||||
|
||||
async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
const processedTexts = this.stripNewLines ? texts.map((text) => text.replace(/\n/g, ' ')) : texts
|
||||
return super.embedDocuments(processedTexts)
|
||||
}
|
||||
|
||||
async embedQuery(text: string): Promise<number[]> {
|
||||
const processedText = this.stripNewLines ? text.replace(/\n/g, ' ') : text
|
||||
return super.embedQuery(processedText)
|
||||
}
|
||||
}
|
||||
|
||||
class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -43,7 +24,7 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
this.icon = 'GoogleGemini.svg'
|
||||
this.category = 'Embeddings'
|
||||
this.description = 'Google Generative API to generate embeddings for a given text'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(GoogleGenerativeAIEmbeddingsWithStripNewLines)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(GoogleGenerativeAIEmbeddings)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -74,14 +55,6 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
{ label: 'CLUSTERING', name: 'CLUSTERING' }
|
||||
],
|
||||
default: 'TASK_TYPE_UNSPECIFIED'
|
||||
},
|
||||
{
|
||||
label: 'Strip New Lines',
|
||||
name: 'stripNewLines',
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
description: 'Remove new lines from input text before embedding to reduce token count'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -98,7 +71,6 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
const modelName = nodeData.inputs?.modelName as string
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData)
|
||||
const stripNewLines = nodeData.inputs?.stripNewLines as boolean
|
||||
|
||||
let taskType: TaskType
|
||||
switch (nodeData.inputs?.tasktype as string) {
|
||||
|
|
@ -121,14 +93,13 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
taskType = TaskType.TASK_TYPE_UNSPECIFIED
|
||||
break
|
||||
}
|
||||
const obj: GoogleGenerativeAIEmbeddingsParams & { stripNewLines?: boolean } = {
|
||||
const obj: GoogleGenerativeAIEmbeddingsParams = {
|
||||
apiKey: apiKey,
|
||||
modelName: modelName,
|
||||
taskType: taskType,
|
||||
stripNewLines
|
||||
taskType: taskType
|
||||
}
|
||||
|
||||
const model = new GoogleGenerativeAIEmbeddingsWithStripNewLines(obj)
|
||||
const model = new GoogleGenerativeAIEmbeddings(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,25 +4,6 @@ import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from
|
|||
import { MODEL_TYPE, getModels, getRegions } from '../../../src/modelLoader'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
|
||||
class VertexAIEmbeddingsWithStripNewLines extends VertexAIEmbeddings {
|
||||
stripNewLines: boolean
|
||||
|
||||
constructor(params: GoogleVertexAIEmbeddingsInput & { stripNewLines?: boolean }) {
|
||||
super(params)
|
||||
this.stripNewLines = params.stripNewLines ?? false
|
||||
}
|
||||
|
||||
async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
const processedTexts = this.stripNewLines ? texts.map((text) => text.replace(/\n/g, ' ')) : texts
|
||||
return super.embedDocuments(processedTexts)
|
||||
}
|
||||
|
||||
async embedQuery(text: string): Promise<number[]> {
|
||||
const processedText = this.stripNewLines ? text.replace(/\n/g, ' ') : text
|
||||
return super.embedQuery(processedText)
|
||||
}
|
||||
}
|
||||
|
||||
class GoogleVertexAIEmbedding_Embeddings implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -43,7 +24,7 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
this.icon = 'GoogleVertex.svg'
|
||||
this.category = 'Embeddings'
|
||||
this.description = 'Google vertexAI API to generate embeddings for a given text'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(VertexAIEmbeddingsWithStripNewLines)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(VertexAIEmbeddings)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -68,14 +49,6 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
type: 'asyncOptions',
|
||||
loadMethod: 'listRegions',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Strip New Lines',
|
||||
name: 'stripNewLines',
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
description: 'Remove new lines from input text before embedding to reduce token count'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -93,11 +66,9 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const region = nodeData.inputs?.region as string
|
||||
const stripNewLines = nodeData.inputs?.stripNewLines as boolean
|
||||
|
||||
const obj: GoogleVertexAIEmbeddingsInput & { stripNewLines?: boolean } = {
|
||||
model: modelName,
|
||||
stripNewLines
|
||||
const obj: GoogleVertexAIEmbeddingsInput = {
|
||||
model: modelName
|
||||
}
|
||||
|
||||
const authOptions = await buildGoogleCredentials(nodeData, options)
|
||||
|
|
@ -105,7 +76,7 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
|
||||
if (region) obj.location = region
|
||||
|
||||
const model = new VertexAIEmbeddingsWithStripNewLines(obj)
|
||||
const model = new VertexAIEmbeddings(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,22 +23,24 @@ export class HuggingFaceInferenceEmbeddings extends Embeddings implements Huggin
|
|||
this.model = fields?.model ?? 'sentence-transformers/distilbert-base-nli-mean-tokens'
|
||||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpoint = fields?.endpoint ?? ''
|
||||
const hf = new HfInference(this.apiKey)
|
||||
// v4 uses Inference Providers by default; only override if custom endpoint provided
|
||||
this.client = this.endpoint ? hf.endpoint(this.endpoint) : hf
|
||||
this.client = new HfInference(this.apiKey)
|
||||
if (this.endpoint) this.client.endpoint(this.endpoint)
|
||||
}
|
||||
|
||||
async _embed(texts: string[]): Promise<number[][]> {
|
||||
// replace newlines, which can negatively affect performance.
|
||||
const clean = texts.map((text) => text.replace(/\n/g, ' '))
|
||||
const hf = new HfInference(this.apiKey)
|
||||
const obj: any = {
|
||||
inputs: clean
|
||||
}
|
||||
if (!this.endpoint) {
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
obj.model = this.model
|
||||
}
|
||||
|
||||
const res = await this.caller.callWithOptions({}, this.client.featureExtraction.bind(this.client), obj)
|
||||
const res = await this.caller.callWithOptions({}, hf.featureExtraction.bind(hf), obj)
|
||||
return res as number[][]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class SubQuestionQueryEngine_LlamaIndex implements INode {
|
|||
this.icon = 'subQueryEngine.svg'
|
||||
this.category = 'Engine'
|
||||
this.description =
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate responses and synthesizes a final response'
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response'
|
||||
this.baseClasses = [this.type, 'BaseQueryEngine']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
|
|
|
|||
|
|
@ -78,8 +78,6 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hf = new HfInference(this.apiKey)
|
||||
// v4 uses Inference Providers by default; only override if custom endpoint provided
|
||||
const hfClient = this.endpoint ? hf.endpoint(this.endpoint) : hf
|
||||
const obj: any = {
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
|
|
@ -92,10 +90,12 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
},
|
||||
inputs: prompt
|
||||
}
|
||||
if (!this.endpoint) {
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
obj.model = this.model
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfClient.textGeneration.bind(hfClient), obj)
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), obj)
|
||||
return res.generated_text
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,71 +0,0 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { OpenAI } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
|
||||
class Sambanova_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Sambanova'
|
||||
this.name = 'sambanova'
|
||||
this.version = 1.0
|
||||
this.type = 'Sambanova'
|
||||
this.icon = 'sambanova.png'
|
||||
this.category = 'LLMs'
|
||||
this.description = 'Wrapper around Sambanova API for large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(OpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['sambanovaApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'Meta-Llama-3.3-70B-Instruct',
|
||||
description: 'For more details see https://docs.sambanova.ai/cloud/docs/get-started/supported-models',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const sambanovaKey = getCredentialParam('sambanovaApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: any = {
|
||||
model: modelName,
|
||||
configuration: {
|
||||
baseURL: 'https://api.sambanova.ai/v1',
|
||||
apiKey: sambanovaKey
|
||||
}
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const sambanova = new OpenAI(obj)
|
||||
return sambanova
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Sambanova_LLMs }
|
||||
|
Before Width: | Height: | Size: 12 KiB |
|
|
@ -21,7 +21,6 @@ import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
|||
import { ChatAnthropic } from '../../chatmodels/ChatAnthropic/FlowiseChatAnthropic'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { ChatGoogleGenerativeAI } from '../../chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI'
|
||||
import { AzureChatOpenAI } from '../../chatmodels/AzureChatOpenAI/FlowiseAzureChatOpenAI'
|
||||
|
||||
const sysPrompt = `You are a supervisor tasked with managing a conversation between the following workers: {team_members}.
|
||||
Given the following user request, respond with the worker to act next.
|
||||
|
|
@ -243,7 +242,7 @@ class Supervisor_MultiAgents implements INode {
|
|||
}
|
||||
}
|
||||
})
|
||||
} else if (llm instanceof ChatOpenAI || llm instanceof AzureChatOpenAI) {
|
||||
} else if (llm instanceof ChatOpenAI) {
|
||||
let prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemPrompt],
|
||||
new MessagesPlaceholder('messages'),
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ return [
|
|||
tool_calls: [
|
||||
{
|
||||
id: "12345",
|
||||
name: "calculator",
|
||||
name: "calulator",
|
||||
args: {
|
||||
number1: 333382,
|
||||
number2: 1932,
|
||||
|
|
@ -130,7 +130,8 @@ class ChatPromptTemplate_Prompts implements INode {
|
|||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(messageHistoryCode, sandbox, {
|
||||
libraries: ['axios', '@langchain/core']
|
||||
libraries: ['axios', '@langchain/core'],
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
const parsedResponse = JSON.parse(response)
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ class MySQLRecordManager_RecordManager implements INode {
|
|||
label: 'Namespace',
|
||||
name: 'namespace',
|
||||
type: 'string',
|
||||
description: 'If not specified, chatflowid will be used',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -204,8 +205,8 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
|
||||
async createSchema(): Promise<void> {
|
||||
const dataSource = await this.getDataSource()
|
||||
try {
|
||||
const dataSource = await this.getDataSource()
|
||||
const queryRunner = dataSource.createQueryRunner()
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
|
|
@ -218,16 +219,7 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
unique key \`unique_key_namespace\` (\`key\`,
|
||||
\`namespace\`));`)
|
||||
|
||||
// Add doc_id column if it doesn't exist (migration for existing tables)
|
||||
const checkColumn = await queryRunner.manager.query(
|
||||
`SELECT COUNT(1) ColumnExists FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_schema=DATABASE() AND table_name='${tableName}' AND column_name='doc_id';`
|
||||
)
|
||||
if (checkColumn[0].ColumnExists === 0) {
|
||||
await queryRunner.manager.query(`ALTER TABLE \`${tableName}\` ADD COLUMN \`doc_id\` longtext;`)
|
||||
}
|
||||
|
||||
const columns = [`updated_at`, `key`, `namespace`, `group_id`, `doc_id`]
|
||||
const columns = [`updated_at`, `key`, `namespace`, `group_id`]
|
||||
for (const column of columns) {
|
||||
// MySQL does not support 'IF NOT EXISTS' function for Index
|
||||
const Check = await queryRunner.manager.query(
|
||||
|
|
@ -249,8 +241,6 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
return
|
||||
}
|
||||
throw e
|
||||
} finally {
|
||||
await dataSource.destroy()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -269,7 +259,7 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
}
|
||||
|
||||
async update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
async update(keys: string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
if (keys.length === 0) {
|
||||
return
|
||||
}
|
||||
|
|
@ -285,23 +275,23 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
throw new Error(`Time sync issue with database ${updatedAt} < ${timeAtLeast}`)
|
||||
}
|
||||
|
||||
// Handle both new format (objects with uid and docId) and old format (strings)
|
||||
const isNewFormat = keys.length > 0 && typeof keys[0] === 'object' && 'uid' in keys[0]
|
||||
const keyStrings = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.uid) : (keys as string[])
|
||||
const docIds = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.docId) : keys.map(() => null)
|
||||
const groupIds = _groupIds ?? keys.map(() => null)
|
||||
|
||||
const groupIds = _groupIds ?? keyStrings.map(() => null)
|
||||
|
||||
if (groupIds.length !== keyStrings.length) {
|
||||
throw new Error(`Number of keys (${keyStrings.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
if (groupIds.length !== keys.length) {
|
||||
throw new Error(`Number of keys (${keys.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
}
|
||||
|
||||
const recordsToUpsert = keyStrings.map((key, i) => [key, this.namespace, updatedAt, groupIds[i] ?? null, docIds[i] ?? null])
|
||||
const recordsToUpsert = keys.map((key, i) => [
|
||||
key,
|
||||
this.namespace,
|
||||
updatedAt,
|
||||
groupIds[i] ?? null // Ensure groupIds[i] is null if undefined
|
||||
])
|
||||
|
||||
const query = `
|
||||
INSERT INTO \`${tableName}\` (\`key\`, \`namespace\`, \`updated_at\`, \`group_id\`, \`doc_id\`)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON DUPLICATE KEY UPDATE \`updated_at\` = VALUES(\`updated_at\`), \`doc_id\` = VALUES(\`doc_id\`)`
|
||||
INSERT INTO \`${tableName}\` (\`key\`, \`namespace\`, \`updated_at\`, \`group_id\`)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON DUPLICATE KEY UPDATE \`updated_at\` = VALUES(\`updated_at\`)`
|
||||
|
||||
// To handle multiple files upsert
|
||||
try {
|
||||
|
|
@ -357,13 +347,13 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
}
|
||||
|
||||
async listKeys(options?: ListKeyOptions & { docId?: string }): Promise<string[]> {
|
||||
async listKeys(options?: ListKeyOptions): Promise<string[]> {
|
||||
const dataSource = await this.getDataSource()
|
||||
const queryRunner = dataSource.createQueryRunner()
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
try {
|
||||
const { before, after, limit, groupIds, docId } = options ?? {}
|
||||
const { before, after, limit, groupIds } = options ?? {}
|
||||
let query = `SELECT \`key\` FROM \`${tableName}\` WHERE \`namespace\` = ?`
|
||||
const values: (string | number | string[])[] = [this.namespace]
|
||||
|
||||
|
|
@ -390,11 +380,6 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
values.push(...groupIds.filter((gid): gid is string => gid !== null))
|
||||
}
|
||||
|
||||
if (docId) {
|
||||
query += ` AND \`doc_id\` = ?`
|
||||
values.push(docId)
|
||||
}
|
||||
|
||||
query += ';'
|
||||
|
||||
// Directly using try/catch with async/await for cleaner flow
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ class PostgresRecordManager_RecordManager implements INode {
|
|||
label: 'Namespace',
|
||||
name: 'namespace',
|
||||
type: 'string',
|
||||
description: 'If not specified, chatflowid will be used',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -221,8 +222,8 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
|
||||
async createSchema(): Promise<void> {
|
||||
const dataSource = await this.getDataSource()
|
||||
try {
|
||||
const dataSource = await this.getDataSource()
|
||||
const queryRunner = dataSource.createQueryRunner()
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
|
|
@ -240,19 +241,6 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
CREATE INDEX IF NOT EXISTS namespace_index ON "${tableName}" (namespace);
|
||||
CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
||||
|
||||
// Add doc_id column if it doesn't exist (migration for existing tables)
|
||||
await queryRunner.manager.query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = '${tableName}' AND column_name = 'doc_id'
|
||||
) THEN
|
||||
ALTER TABLE "${tableName}" ADD COLUMN doc_id TEXT;
|
||||
CREATE INDEX IF NOT EXISTS doc_id_index ON "${tableName}" (doc_id);
|
||||
END IF;
|
||||
END $$;`)
|
||||
|
||||
await queryRunner.release()
|
||||
} catch (e: any) {
|
||||
// This error indicates that the table already exists
|
||||
|
|
@ -263,8 +251,6 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
return
|
||||
}
|
||||
throw e
|
||||
} finally {
|
||||
await dataSource.destroy()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -298,7 +284,7 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
return `(${placeholders.join(', ')})`
|
||||
}
|
||||
|
||||
async update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
async update(keys: string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
if (keys.length === 0) {
|
||||
return
|
||||
}
|
||||
|
|
@ -314,22 +300,17 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
throw new Error(`Time sync issue with database ${updatedAt} < ${timeAtLeast}`)
|
||||
}
|
||||
|
||||
// Handle both new format (objects with uid and docId) and old format (strings)
|
||||
const isNewFormat = keys.length > 0 && typeof keys[0] === 'object' && 'uid' in keys[0]
|
||||
const keyStrings = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.uid) : (keys as string[])
|
||||
const docIds = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.docId) : keys.map(() => null)
|
||||
const groupIds = _groupIds ?? keys.map(() => null)
|
||||
|
||||
const groupIds = _groupIds ?? keyStrings.map(() => null)
|
||||
|
||||
if (groupIds.length !== keyStrings.length) {
|
||||
throw new Error(`Number of keys (${keyStrings.length}) does not match number of group_ids ${groupIds.length})`)
|
||||
if (groupIds.length !== keys.length) {
|
||||
throw new Error(`Number of keys (${keys.length}) does not match number of group_ids ${groupIds.length})`)
|
||||
}
|
||||
|
||||
const recordsToUpsert = keyStrings.map((key, i) => [key, this.namespace, updatedAt, groupIds[i], docIds[i]])
|
||||
const recordsToUpsert = keys.map((key, i) => [key, this.namespace, updatedAt, groupIds[i]])
|
||||
|
||||
const valuesPlaceholders = recordsToUpsert.map((_, j) => this.generatePlaceholderForRowAt(j, recordsToUpsert[0].length)).join(', ')
|
||||
|
||||
const query = `INSERT INTO "${tableName}" (key, namespace, updated_at, group_id, doc_id) VALUES ${valuesPlaceholders} ON CONFLICT (key, namespace) DO UPDATE SET updated_at = EXCLUDED.updated_at, doc_id = EXCLUDED.doc_id;`
|
||||
const query = `INSERT INTO "${tableName}" (key, namespace, updated_at, group_id) VALUES ${valuesPlaceholders} ON CONFLICT (key, namespace) DO UPDATE SET updated_at = EXCLUDED.updated_at;`
|
||||
try {
|
||||
await queryRunner.manager.query(query, recordsToUpsert.flat())
|
||||
await queryRunner.release()
|
||||
|
|
@ -368,8 +349,8 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
}
|
||||
|
||||
async listKeys(options?: ListKeyOptions & { docId?: string }): Promise<string[]> {
|
||||
const { before, after, limit, groupIds, docId } = options ?? {}
|
||||
async listKeys(options?: ListKeyOptions): Promise<string[]> {
|
||||
const { before, after, limit, groupIds } = options ?? {}
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
let query = `SELECT key FROM "${tableName}" WHERE namespace = $1`
|
||||
|
|
@ -400,12 +381,6 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
index += 1
|
||||
}
|
||||
|
||||
if (docId) {
|
||||
values.push(docId)
|
||||
query += ` AND doc_id = $${index}`
|
||||
index += 1
|
||||
}
|
||||
|
||||
query += ';'
|
||||
|
||||
const dataSource = await this.getDataSource()
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ class SQLiteRecordManager_RecordManager implements INode {
|
|||
label: 'Namespace',
|
||||
name: 'namespace',
|
||||
type: 'string',
|
||||
description: 'If not specified, chatflowid will be used',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -178,8 +179,8 @@ class SQLiteRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
|
||||
async createSchema(): Promise<void> {
|
||||
const dataSource = await this.getDataSource()
|
||||
try {
|
||||
const dataSource = await this.getDataSource()
|
||||
const queryRunner = dataSource.createQueryRunner()
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
|
|
@ -197,15 +198,6 @@ CREATE INDEX IF NOT EXISTS key_index ON "${tableName}" (key);
|
|||
CREATE INDEX IF NOT EXISTS namespace_index ON "${tableName}" (namespace);
|
||||
CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
||||
|
||||
// Add doc_id column if it doesn't exist (migration for existing tables)
|
||||
const checkColumn = await queryRunner.manager.query(
|
||||
`SELECT COUNT(*) as count FROM pragma_table_info('${tableName}') WHERE name='doc_id';`
|
||||
)
|
||||
if (checkColumn[0].count === 0) {
|
||||
await queryRunner.manager.query(`ALTER TABLE "${tableName}" ADD COLUMN doc_id TEXT;`)
|
||||
await queryRunner.manager.query(`CREATE INDEX IF NOT EXISTS doc_id_index ON "${tableName}" (doc_id);`)
|
||||
}
|
||||
|
||||
await queryRunner.release()
|
||||
} catch (e: any) {
|
||||
// This error indicates that the table already exists
|
||||
|
|
@ -216,8 +208,6 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
return
|
||||
}
|
||||
throw e
|
||||
} finally {
|
||||
await dataSource.destroy()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -236,7 +226,7 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
}
|
||||
}
|
||||
|
||||
async update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
async update(keys: string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
if (keys.length === 0) {
|
||||
return
|
||||
}
|
||||
|
|
@ -251,23 +241,23 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
throw new Error(`Time sync issue with database ${updatedAt} < ${timeAtLeast}`)
|
||||
}
|
||||
|
||||
// Handle both new format (objects with uid and docId) and old format (strings)
|
||||
const isNewFormat = keys.length > 0 && typeof keys[0] === 'object' && 'uid' in keys[0]
|
||||
const keyStrings = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.uid) : (keys as string[])
|
||||
const docIds = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.docId) : keys.map(() => null)
|
||||
const groupIds = _groupIds ?? keys.map(() => null)
|
||||
|
||||
const groupIds = _groupIds ?? keyStrings.map(() => null)
|
||||
|
||||
if (groupIds.length !== keyStrings.length) {
|
||||
throw new Error(`Number of keys (${keyStrings.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
if (groupIds.length !== keys.length) {
|
||||
throw new Error(`Number of keys (${keys.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
}
|
||||
|
||||
const recordsToUpsert = keyStrings.map((key, i) => [key, this.namespace, updatedAt, groupIds[i] ?? null, docIds[i] ?? null])
|
||||
const recordsToUpsert = keys.map((key, i) => [
|
||||
key,
|
||||
this.namespace,
|
||||
updatedAt,
|
||||
groupIds[i] ?? null // Ensure groupIds[i] is null if undefined
|
||||
])
|
||||
|
||||
const query = `
|
||||
INSERT INTO "${tableName}" (key, namespace, updated_at, group_id, doc_id)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (key, namespace) DO UPDATE SET updated_at = excluded.updated_at, doc_id = excluded.doc_id`
|
||||
INSERT INTO "${tableName}" (key, namespace, updated_at, group_id)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON CONFLICT (key, namespace) DO UPDATE SET updated_at = excluded.updated_at`
|
||||
|
||||
try {
|
||||
// To handle multiple files upsert
|
||||
|
|
@ -322,8 +312,8 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
}
|
||||
}
|
||||
|
||||
async listKeys(options?: ListKeyOptions & { docId?: string }): Promise<string[]> {
|
||||
const { before, after, limit, groupIds, docId } = options ?? {}
|
||||
async listKeys(options?: ListKeyOptions): Promise<string[]> {
|
||||
const { before, after, limit, groupIds } = options ?? {}
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
let query = `SELECT key FROM "${tableName}" WHERE namespace = ?`
|
||||
|
|
@ -352,11 +342,6 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
values.push(...groupIds.filter((gid): gid is string => gid !== null))
|
||||
}
|
||||
|
||||
if (docId) {
|
||||
query += ` AND doc_id = ?`
|
||||
values.push(docId)
|
||||
}
|
||||
|
||||
query += ';'
|
||||
|
||||
const dataSource = await this.getDataSource()
|
||||
|
|
|
|||
|
|
@ -940,7 +940,9 @@ const getReturnOutput = async (nodeData: INodeData, input: string, options: ICom
|
|||
const sandbox = createCodeExecutionSandbox(input, variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(updateStateMemoryCode, sandbox)
|
||||
const response = await executeJavaScriptCode(updateStateMemoryCode, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response !== 'object') throw new Error('Return output must be an object')
|
||||
return response
|
||||
|
|
|
|||
|
|
@ -282,7 +282,9 @@ const runCondition = async (nodeData: INodeData, input: string, options: ICommon
|
|||
const sandbox = createCodeExecutionSandbox(input, variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(conditionFunction, sandbox)
|
||||
const response = await executeJavaScriptCode(conditionFunction, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response !== 'string') throw new Error('Condition function must return a string')
|
||||
return response
|
||||
|
|
|
|||
|
|
@ -549,7 +549,9 @@ const runCondition = async (
|
|||
const sandbox = createCodeExecutionSandbox(input, variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(conditionFunction, sandbox)
|
||||
const response = await executeJavaScriptCode(conditionFunction, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response !== 'string') throw new Error('Condition function must return a string')
|
||||
return response
|
||||
|
|
|
|||
|
|
@ -166,7 +166,9 @@ class CustomFunction_SeqAgents implements INode {
|
|||
const sandbox = createCodeExecutionSandbox(input, variables, flow, additionalSandbox)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox)
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (returnValueAs === 'stateObj') {
|
||||
if (typeof response !== 'object') {
|
||||
|
|
|
|||
|
|
@ -264,7 +264,8 @@ class ExecuteFlow_SeqAgents implements INode {
|
|||
|
||||
try {
|
||||
let response = await executeJavaScriptCode(code, sandbox, {
|
||||
useSandbox: false
|
||||
useSandbox: false,
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response === 'object') {
|
||||
|
|
|
|||
|
|
@ -712,7 +712,9 @@ const getReturnOutput = async (nodeData: INodeData, input: string, options: ICom
|
|||
const sandbox = createCodeExecutionSandbox(input, variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(updateStateMemoryCode, sandbox)
|
||||
const response = await executeJavaScriptCode(updateStateMemoryCode, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response !== 'object') throw new Error('Return output must be an object')
|
||||
return response
|
||||
|
|
|
|||
|
|
@ -204,7 +204,9 @@ class State_SeqAgents implements INode {
|
|||
const sandbox = createCodeExecutionSandbox('', variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(`return ${stateMemoryCode}`, sandbox)
|
||||
const response = await executeJavaScriptCode(`return ${stateMemoryCode}`, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response !== 'object') throw new Error('State must be an object')
|
||||
const returnOutput: ISeqAgentNode = {
|
||||
|
|
|
|||
|
|
@ -575,7 +575,9 @@ const getReturnOutput = async (
|
|||
const sandbox = createCodeExecutionSandbox(input, variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(updateStateMemoryCode, sandbox)
|
||||
const response = await executeJavaScriptCode(updateStateMemoryCode, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response !== 'object') throw new Error('Return output must be an object')
|
||||
return response
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ export function filterConversationHistory(
|
|||
export const restructureMessages = (llm: BaseChatModel, state: ISeqAgentsState) => {
|
||||
const messages: BaseMessage[] = []
|
||||
for (const message of state.messages as unknown as BaseMessage[]) {
|
||||
// Sometimes Anthropic can return a message with content types of array, ignore that EXCEPT when tool calls are present
|
||||
// Sometimes Anthropic can return a message with content types of array, ignore that EXECEPT when tool calls are present
|
||||
if ((message as any).tool_calls?.length && message.content !== '') {
|
||||
message.content = JSON.stringify(message.content)
|
||||
}
|
||||
|
|
@ -396,7 +396,9 @@ export const checkMessageHistory = async (
|
|||
const sandbox = createCodeExecutionSandbox('', variables, flow)
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(messageHistory, sandbox)
|
||||
const response = await executeJavaScriptCode(messageHistory, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (!Array.isArray(response)) throw new Error('Returned message history must be an array')
|
||||
if (sysPrompt) {
|
||||
|
|
|
|||
|
|
@ -1,479 +0,0 @@
|
|||
// Mock AWS SDK DynamoDB client
|
||||
jest.mock('@aws-sdk/client-dynamodb', () => {
|
||||
const mockSend = jest.fn()
|
||||
|
||||
// Create mock constructors that capture inputs
|
||||
const PutItemCommandMock = jest.fn((input) => ({ input, _type: 'PutItemCommand' }))
|
||||
const QueryCommandMock = jest.fn((input) => ({ input, _type: 'QueryCommand' }))
|
||||
|
||||
return {
|
||||
DynamoDBClient: jest.fn().mockImplementation(() => ({
|
||||
send: mockSend
|
||||
})),
|
||||
DescribeTableCommand: jest.fn(),
|
||||
ListTablesCommand: jest.fn(),
|
||||
PutItemCommand: PutItemCommandMock,
|
||||
QueryCommand: QueryCommandMock,
|
||||
__mockSend: mockSend
|
||||
}
|
||||
})
|
||||
|
||||
// Mock AWS credentials utility
|
||||
jest.mock('../../../src/awsToolsUtils', () => ({
|
||||
AWS_REGIONS: [
|
||||
{ label: 'US East (N. Virginia)', name: 'us-east-1' },
|
||||
{ label: 'US West (Oregon)', name: 'us-west-2' }
|
||||
],
|
||||
DEFAULT_AWS_REGION: 'us-east-1',
|
||||
getAWSCredentials: jest.fn(() =>
|
||||
Promise.resolve({
|
||||
accessKeyId: 'test-access-key',
|
||||
secretAccessKey: 'test-secret-key',
|
||||
sessionToken: 'test-session-token'
|
||||
})
|
||||
)
|
||||
}))
|
||||
|
||||
// Mock getBaseClasses function
|
||||
jest.mock('../../../src/utils', () => ({
|
||||
getBaseClasses: jest.fn(() => ['Tool', 'StructuredTool'])
|
||||
}))
|
||||
|
||||
describe('AWSDynamoDBKVStorage', () => {
|
||||
let AWSDynamoDBKVStorage_Tools: any
|
||||
let mockSend: jest.Mock
|
||||
let PutItemCommandMock: jest.Mock
|
||||
let QueryCommandMock: jest.Mock
|
||||
|
||||
// Helper function to create a node instance
|
||||
const createNode = () => new AWSDynamoDBKVStorage_Tools()
|
||||
|
||||
// Helper function to create nodeData
|
||||
const createNodeData = (overrides = {}) => ({
|
||||
inputs: {
|
||||
region: 'us-east-1',
|
||||
tableName: 'test-table',
|
||||
keyPrefix: '',
|
||||
operation: 'store',
|
||||
...overrides
|
||||
}
|
||||
})
|
||||
|
||||
beforeEach(async () => {
|
||||
// Clear all mocks before each test
|
||||
jest.clearAllMocks()
|
||||
|
||||
// Get the mock functions
|
||||
const dynamoDBModule = require('@aws-sdk/client-dynamodb')
|
||||
mockSend = dynamoDBModule.__mockSend
|
||||
PutItemCommandMock = dynamoDBModule.PutItemCommand
|
||||
QueryCommandMock = dynamoDBModule.QueryCommand
|
||||
|
||||
mockSend.mockReset()
|
||||
PutItemCommandMock.mockClear()
|
||||
QueryCommandMock.mockClear()
|
||||
|
||||
// Dynamic import to get fresh module instance
|
||||
const module = (await import('./AWSDynamoDBKVStorage')) as any
|
||||
AWSDynamoDBKVStorage_Tools = module.nodeClass
|
||||
})
|
||||
|
||||
describe('AWSDynamoDBKVStorage_Tools Node', () => {
|
||||
it('should have correct input parameters', () => {
|
||||
const node = createNode()
|
||||
const inputNames = node.inputs.map((input: any) => input.name)
|
||||
|
||||
expect(inputNames).toEqual(['region', 'tableName', 'keyPrefix', 'operation'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('loadMethods - listTables', () => {
|
||||
it('should list valid DynamoDB tables with correct schema', async () => {
|
||||
const node = createNode()
|
||||
|
||||
// Mock responses for list and describe commands
|
||||
mockSend
|
||||
.mockResolvedValueOnce({
|
||||
TableNames: ['table1', 'table2', 'invalid-table']
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
Table: {
|
||||
KeySchema: [
|
||||
{ AttributeName: 'pk', KeyType: 'HASH' },
|
||||
{ AttributeName: 'sk', KeyType: 'RANGE' }
|
||||
]
|
||||
}
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
Table: {
|
||||
KeySchema: [
|
||||
{ AttributeName: 'pk', KeyType: 'HASH' },
|
||||
{ AttributeName: 'sk', KeyType: 'RANGE' }
|
||||
]
|
||||
}
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
Table: {
|
||||
KeySchema: [{ AttributeName: 'id', KeyType: 'HASH' }]
|
||||
}
|
||||
})
|
||||
|
||||
const nodeData = { inputs: { region: 'us-east-1' } }
|
||||
|
||||
const result = await node.loadMethods.listTables(nodeData, {})
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
label: 'table1',
|
||||
name: 'table1',
|
||||
description: 'Table with pk (partition) and sk (sort) keys'
|
||||
},
|
||||
{
|
||||
label: 'table2',
|
||||
name: 'table2',
|
||||
description: 'Table with pk (partition) and sk (sort) keys'
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('should return error when no tables found', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend.mockResolvedValueOnce({
|
||||
TableNames: []
|
||||
})
|
||||
|
||||
const nodeData = { inputs: { region: 'us-east-1' } }
|
||||
|
||||
const result = await node.loadMethods.listTables(nodeData, {})
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
label: 'No tables found',
|
||||
name: 'error',
|
||||
description: 'No DynamoDB tables found in this region'
|
||||
}
|
||||
])
|
||||
})
|
||||
|
||||
it('should return error when no compatible tables found', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend
|
||||
.mockResolvedValueOnce({
|
||||
TableNames: ['invalid-table']
|
||||
})
|
||||
.mockResolvedValueOnce({
|
||||
Table: {
|
||||
KeySchema: [{ AttributeName: 'id', KeyType: 'HASH' }]
|
||||
}
|
||||
})
|
||||
|
||||
const nodeData = { inputs: { region: 'us-east-1' } }
|
||||
|
||||
const result = await node.loadMethods.listTables(nodeData, {})
|
||||
|
||||
expect(result).toHaveLength(1)
|
||||
expect(result[0]).toMatchObject({
|
||||
label: 'No compatible tables found',
|
||||
name: 'error'
|
||||
})
|
||||
expect(result[0].description).toContain('Found 1 table(s) with different schema')
|
||||
})
|
||||
|
||||
it('should handle AWS credentials error', async () => {
|
||||
const node = createNode()
|
||||
const { getAWSCredentials } = require('../../../src/awsToolsUtils')
|
||||
|
||||
getAWSCredentials.mockRejectedValueOnce(new Error('AWS Access Key not found'))
|
||||
|
||||
const nodeData = { inputs: { region: 'us-east-1' } }
|
||||
|
||||
const result = await node.loadMethods.listTables(nodeData, {})
|
||||
|
||||
expect(result).toEqual([
|
||||
{
|
||||
label: 'AWS Credentials Required',
|
||||
name: 'error',
|
||||
description: 'Enter AWS Access Key ID and Secret Access Key'
|
||||
}
|
||||
])
|
||||
})
|
||||
})
|
||||
|
||||
describe('init method', () => {
|
||||
it.each([
|
||||
['store', 'test-prefix', 'dynamodb_kv_store', 'Store a text value with a key in DynamoDB'],
|
||||
['retrieve', '', 'dynamodb_kv_retrieve', 'Retrieve a value by key from DynamoDB']
|
||||
])('should create correct tool for %s operation', async (operation, keyPrefix, expectedName, expectedDescription) => {
|
||||
const node = createNode()
|
||||
const nodeData = createNodeData({ keyPrefix, operation })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
|
||||
expect(tool.name).toBe(expectedName)
|
||||
expect(tool.description).toContain(expectedDescription)
|
||||
})
|
||||
|
||||
it.each([
|
||||
['error', '', 'Valid DynamoDB Table selection is required'],
|
||||
['test-table', 'prefix#invalid', 'Key prefix cannot contain "#" character']
|
||||
])('should throw error for invalid config (table: %s, prefix: %s)', async (tableName, keyPrefix, expectedError) => {
|
||||
const node = createNode()
|
||||
const nodeData = createNodeData({ tableName, keyPrefix })
|
||||
|
||||
await expect(node.init(nodeData, '', {})).rejects.toThrow(expectedError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('DynamoDBStoreTool', () => {
|
||||
it('should store value successfully', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend.mockResolvedValueOnce({})
|
||||
|
||||
const nodeData = createNodeData({ keyPrefix: 'test' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
const result = await tool._call({ key: 'mykey', value: 'myvalue' })
|
||||
|
||||
expect(result).toContain('Successfully stored value with key "mykey"')
|
||||
expect(mockSend).toHaveBeenCalledTimes(1)
|
||||
|
||||
// Verify PutItemCommand was called with correct parameters
|
||||
expect(PutItemCommandMock).toHaveBeenCalledTimes(1)
|
||||
const putCommandInput = PutItemCommandMock.mock.calls[0][0]
|
||||
|
||||
expect(putCommandInput).toMatchObject({
|
||||
TableName: 'test-table',
|
||||
Item: {
|
||||
pk: { S: 'test#mykey' },
|
||||
value: { S: 'myvalue' }
|
||||
}
|
||||
})
|
||||
|
||||
// Verify timestamp fields exist
|
||||
expect(putCommandInput.Item.sk).toBeDefined()
|
||||
expect(putCommandInput.Item.timestamp).toBeDefined()
|
||||
})
|
||||
|
||||
it.each([
|
||||
['', 'Key must be a non-empty string'],
|
||||
[' ', 'Key must be a non-empty string'],
|
||||
['a'.repeat(2049), 'Key too long']
|
||||
])('should handle invalid key: "%s"', async (key, expectedError) => {
|
||||
const node = createNode()
|
||||
|
||||
const nodeData = createNodeData()
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
await expect(tool._call({ key, value: 'myvalue' })).rejects.toThrow(expectedError)
|
||||
})
|
||||
|
||||
it.each([
|
||||
['store', { key: 'mykey', value: 'myvalue' }, 'Failed to store value: DynamoDB error'],
|
||||
['retrieve', { key: 'mykey' }, 'Failed to retrieve value: DynamoDB error']
|
||||
])('should handle DynamoDB error for %s', async (operation, callParams, expectedError) => {
|
||||
const node = createNode()
|
||||
mockSend.mockRejectedValueOnce(new Error('DynamoDB error'))
|
||||
|
||||
const nodeData = createNodeData({ operation })
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
|
||||
await expect(tool._call(callParams)).rejects.toThrow(expectedError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('DynamoDBRetrieveTool', () => {
|
||||
it('should retrieve latest value successfully', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend.mockResolvedValueOnce({
|
||||
Items: [
|
||||
{
|
||||
pk: { S: 'test#mykey' },
|
||||
sk: { S: '1234567890' },
|
||||
value: { S: 'myvalue' },
|
||||
timestamp: { S: '2024-01-01T00:00:00.000Z' }
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
const nodeData = createNodeData({ keyPrefix: 'test', operation: 'retrieve' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
const result = await tool._call({ key: 'mykey' })
|
||||
const parsed = JSON.parse(result)
|
||||
|
||||
expect(parsed).toEqual({
|
||||
value: 'myvalue',
|
||||
timestamp: '2024-01-01T00:00:00.000Z'
|
||||
})
|
||||
expect(mockSend).toHaveBeenCalledTimes(1)
|
||||
|
||||
// Verify QueryCommand was called with correct parameters
|
||||
expect(QueryCommandMock).toHaveBeenCalledTimes(1)
|
||||
const queryCommandInput = QueryCommandMock.mock.calls[0][0]
|
||||
|
||||
expect(queryCommandInput).toMatchObject({
|
||||
TableName: 'test-table',
|
||||
KeyConditionExpression: 'pk = :pk',
|
||||
ExpressionAttributeValues: {
|
||||
':pk': { S: 'test#mykey' }
|
||||
},
|
||||
ScanIndexForward: false,
|
||||
Limit: 1
|
||||
})
|
||||
})
|
||||
|
||||
it('should retrieve nth latest value', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend.mockResolvedValueOnce({
|
||||
Items: [
|
||||
{
|
||||
pk: { S: 'mykey' },
|
||||
sk: { S: '1234567892' },
|
||||
value: { S: 'newest' },
|
||||
timestamp: { S: '2024-01-03T00:00:00.000Z' }
|
||||
},
|
||||
{
|
||||
pk: { S: 'mykey' },
|
||||
sk: { S: '1234567891' },
|
||||
value: { S: 'second' },
|
||||
timestamp: { S: '2024-01-02T00:00:00.000Z' }
|
||||
},
|
||||
{
|
||||
pk: { S: 'mykey' },
|
||||
sk: { S: '1234567890' },
|
||||
value: { S: 'oldest' },
|
||||
timestamp: { S: '2024-01-01T00:00:00.000Z' }
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
const nodeData = createNodeData({ operation: 'retrieve' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
const result = await tool._call({ key: 'mykey', nthLatest: '2' })
|
||||
const parsed = JSON.parse(result)
|
||||
|
||||
expect(parsed).toEqual({
|
||||
value: 'second',
|
||||
timestamp: '2024-01-02T00:00:00.000Z'
|
||||
})
|
||||
|
||||
// Verify QueryCommand was called with Limit: 2
|
||||
expect(QueryCommandMock).toHaveBeenCalledTimes(1)
|
||||
const queryCommandInput = QueryCommandMock.mock.calls[0][0]
|
||||
expect(queryCommandInput.Limit).toBe(2)
|
||||
})
|
||||
|
||||
it('should return null when key not found', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend.mockResolvedValueOnce({
|
||||
Items: []
|
||||
})
|
||||
|
||||
const nodeData = createNodeData({ operation: 'retrieve' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
const result = await tool._call({ key: 'nonexistent' })
|
||||
const parsed = JSON.parse(result)
|
||||
|
||||
expect(parsed).toEqual({
|
||||
value: null,
|
||||
timestamp: null
|
||||
})
|
||||
})
|
||||
|
||||
it('should return null when nth version does not exist', async () => {
|
||||
const node = createNode()
|
||||
|
||||
mockSend.mockResolvedValueOnce({
|
||||
Items: [
|
||||
{
|
||||
pk: { S: 'mykey' },
|
||||
sk: { S: '1234567890' },
|
||||
value: { S: 'only-one' },
|
||||
timestamp: { S: '2024-01-01T00:00:00.000Z' }
|
||||
}
|
||||
]
|
||||
})
|
||||
|
||||
const nodeData = createNodeData({ operation: 'retrieve' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
const result = await tool._call({ key: 'mykey', nthLatest: '3' })
|
||||
const parsed = JSON.parse(result)
|
||||
|
||||
expect(parsed).toEqual({
|
||||
value: null,
|
||||
timestamp: null
|
||||
})
|
||||
})
|
||||
|
||||
it.each([
|
||||
['0', 'nthLatest must be a positive number'],
|
||||
['-1', 'nthLatest must be a positive number']
|
||||
])('should reject invalid nthLatest value "%s"', async (nthLatest, expectedError) => {
|
||||
const node = createNode()
|
||||
|
||||
const nodeData = createNodeData({ operation: 'retrieve' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
await expect(tool._call({ key: 'mykey', nthLatest })).rejects.toThrow(expectedError)
|
||||
})
|
||||
|
||||
it.each([
|
||||
['', 'Key must be a non-empty string'],
|
||||
[' ', 'Key must be a non-empty string']
|
||||
])('should handle invalid key for retrieve: "%s"', async (key, expectedError) => {
|
||||
const node = createNode()
|
||||
|
||||
const nodeData = createNodeData({ operation: 'retrieve' })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
await expect(tool._call({ key })).rejects.toThrow(expectedError)
|
||||
})
|
||||
})
|
||||
|
||||
describe('Helper Functions', () => {
|
||||
it.each([
|
||||
['myapp', 'userdata', 'myapp#userdata'],
|
||||
['', 'userdata', 'userdata']
|
||||
])('should build full key correctly (prefix: "%s", key: "%s", expected: "%s")', async (keyPrefix, key, expectedFullKey) => {
|
||||
const node = createNode()
|
||||
mockSend.mockResolvedValueOnce({})
|
||||
const nodeData = createNodeData({ keyPrefix })
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
await tool._call({ key, value: 'test' })
|
||||
|
||||
// Verify the put command was called with the correct full key
|
||||
expect(mockSend).toHaveBeenCalledTimes(1)
|
||||
expect(PutItemCommandMock).toHaveBeenCalledTimes(1)
|
||||
|
||||
const putCommandInput = PutItemCommandMock.mock.calls[0][0]
|
||||
expect(putCommandInput.Item.pk.S).toBe(expectedFullKey)
|
||||
})
|
||||
|
||||
it.each([
|
||||
[{ accessKeyId: 'test-key', secretAccessKey: 'test-secret', sessionToken: 'test-token' }, 'with session token'],
|
||||
[{ accessKeyId: 'test-key', secretAccessKey: 'test-secret' }, 'without session token']
|
||||
])('should work %s', async (credentials, _description) => {
|
||||
const node = createNode()
|
||||
const { getAWSCredentials } = require('../../../src/awsToolsUtils')
|
||||
|
||||
getAWSCredentials.mockResolvedValueOnce(credentials)
|
||||
mockSend.mockResolvedValueOnce({})
|
||||
|
||||
const nodeData = createNodeData()
|
||||
|
||||
const tool = await node.init(nodeData, '', {})
|
||||
await tool._call({ key: 'test', value: 'value' })
|
||||
expect(getAWSCredentials).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
|
|
@ -1,375 +0,0 @@
|
|||
import { z } from 'zod'
|
||||
import { StructuredTool } from '@langchain/core/tools'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { AWS_REGIONS, DEFAULT_AWS_REGION, AWSCredentials, getAWSCredentials } from '../../../src/awsToolsUtils'
|
||||
import { DynamoDBClient, DescribeTableCommand, ListTablesCommand, PutItemCommand, QueryCommand } from '@aws-sdk/client-dynamodb'
|
||||
|
||||
// Operation enum
|
||||
enum Operation {
|
||||
STORE = 'store',
|
||||
RETRIEVE = 'retrieve'
|
||||
}
|
||||
|
||||
// Constants
|
||||
const ERROR_PLACEHOLDER = 'error'
|
||||
const KEY_SEPARATOR = '#'
|
||||
const MAX_KEY_LENGTH = 2048 // DynamoDB limit for partition key
|
||||
|
||||
// Helper function to create DynamoDB client
|
||||
function createDynamoDBClient(credentials: AWSCredentials, region: string): DynamoDBClient {
|
||||
return new DynamoDBClient({
|
||||
region,
|
||||
credentials: {
|
||||
accessKeyId: credentials.accessKeyId,
|
||||
secretAccessKey: credentials.secretAccessKey,
|
||||
...(credentials.sessionToken && { sessionToken: credentials.sessionToken })
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Helper function to build full key with optional prefix
|
||||
function buildFullKey(key: string, keyPrefix: string): string {
|
||||
const fullKey = keyPrefix ? `${keyPrefix}${KEY_SEPARATOR}${key}` : key
|
||||
|
||||
// Validate key length (DynamoDB limit)
|
||||
if (fullKey.length > MAX_KEY_LENGTH) {
|
||||
throw new Error(`Key too long. Maximum length is ${MAX_KEY_LENGTH} characters, got ${fullKey.length}`)
|
||||
}
|
||||
|
||||
return fullKey
|
||||
}
|
||||
|
||||
// Helper function to validate and sanitize input
|
||||
function validateKey(key: string): void {
|
||||
if (!key || key.trim().length === 0) {
|
||||
throw new Error('Key must be a non-empty string')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for storing key-value pairs in DynamoDB with automatic versioning
|
||||
*/
|
||||
class DynamoDBStoreTool extends StructuredTool {
|
||||
name = 'dynamodb_kv_store'
|
||||
description = 'Store a text value with a key in DynamoDB. Input must be an object with "key" and "value" properties.'
|
||||
schema = z.object({
|
||||
key: z.string().min(1).describe('The key to store the value under'),
|
||||
value: z.string().describe('The text value to store')
|
||||
})
|
||||
private readonly dynamoClient: DynamoDBClient
|
||||
private readonly tableName: string
|
||||
private readonly keyPrefix: string
|
||||
|
||||
constructor(dynamoClient: DynamoDBClient, tableName: string, keyPrefix: string = '') {
|
||||
super()
|
||||
this.dynamoClient = dynamoClient
|
||||
this.tableName = tableName
|
||||
this.keyPrefix = keyPrefix
|
||||
}
|
||||
|
||||
async _call({ key, value }: z.infer<typeof this.schema>): Promise<string> {
|
||||
try {
|
||||
validateKey(key)
|
||||
const fullKey = buildFullKey(key, this.keyPrefix)
|
||||
const timestamp = Date.now()
|
||||
const isoTimestamp = new Date(timestamp).toISOString()
|
||||
|
||||
const putCommand = new PutItemCommand({
|
||||
TableName: this.tableName,
|
||||
Item: {
|
||||
pk: { S: fullKey },
|
||||
sk: { S: timestamp.toString() },
|
||||
value: { S: value },
|
||||
timestamp: { S: isoTimestamp }
|
||||
}
|
||||
})
|
||||
|
||||
await this.dynamoClient.send(putCommand)
|
||||
return `Successfully stored value with key "${key}" at ${isoTimestamp}`
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
throw new Error(`Failed to store value: ${errorMessage}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Tool for retrieving key-value pairs from DynamoDB with version control
|
||||
*/
|
||||
class DynamoDBRetrieveTool extends StructuredTool {
|
||||
name = 'dynamodb_kv_retrieve'
|
||||
description =
|
||||
'Retrieve a value by key from DynamoDB. Returns JSON with value and timestamp. Specify which version to get (1=latest, 2=2nd latest, etc).'
|
||||
schema = z.object({
|
||||
key: z.string().min(1).describe('The key to retrieve the value for'),
|
||||
nthLatest: z
|
||||
.string()
|
||||
.regex(/^\d+$/, 'Must be a positive number')
|
||||
.describe(
|
||||
'Which version to retrieve: "1" for latest, "2" for 2nd latest, "3" for 3rd latest, etc. Use "1" to get the most recent value.'
|
||||
)
|
||||
.optional()
|
||||
.default('1')
|
||||
})
|
||||
private readonly dynamoClient: DynamoDBClient
|
||||
private readonly tableName: string
|
||||
private readonly keyPrefix: string
|
||||
|
||||
constructor(dynamoClient: DynamoDBClient, tableName: string, keyPrefix: string = '') {
|
||||
super()
|
||||
this.dynamoClient = dynamoClient
|
||||
this.tableName = tableName
|
||||
this.keyPrefix = keyPrefix
|
||||
}
|
||||
|
||||
async _call(input: z.infer<typeof this.schema>): Promise<string> {
|
||||
try {
|
||||
const { key, nthLatest = '1' } = input
|
||||
validateKey(key)
|
||||
const fullKey = buildFullKey(key, this.keyPrefix)
|
||||
|
||||
// Convert string to number and validate
|
||||
const nthLatestNum = parseInt(nthLatest, 10)
|
||||
if (isNaN(nthLatestNum) || nthLatestNum < 1) {
|
||||
throw new Error('nthLatest must be a positive number (1 or greater)')
|
||||
}
|
||||
|
||||
const queryCommand = new QueryCommand({
|
||||
TableName: this.tableName,
|
||||
KeyConditionExpression: 'pk = :pk',
|
||||
ExpressionAttributeValues: {
|
||||
':pk': { S: fullKey }
|
||||
},
|
||||
ScanIndexForward: false, // Sort descending (newest first)
|
||||
Limit: nthLatestNum
|
||||
})
|
||||
|
||||
const result = await this.dynamoClient.send(queryCommand)
|
||||
|
||||
if (!result.Items || result.Items.length === 0) {
|
||||
return JSON.stringify({
|
||||
value: null,
|
||||
timestamp: null
|
||||
})
|
||||
}
|
||||
|
||||
if (result.Items.length < nthLatestNum) {
|
||||
return JSON.stringify({
|
||||
value: null,
|
||||
timestamp: null
|
||||
})
|
||||
}
|
||||
|
||||
const item = result.Items[nthLatestNum - 1]
|
||||
const value = item.value?.S || null
|
||||
const timestamp = item.timestamp?.S || item.sk?.S || null
|
||||
|
||||
// Return JSON with value and timestamp
|
||||
return JSON.stringify({
|
||||
value: value,
|
||||
timestamp: timestamp
|
||||
})
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : String(error)
|
||||
throw new Error(`Failed to retrieve value: ${errorMessage}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Node implementation for AWS DynamoDB KV Storage tools
|
||||
*/
|
||||
class AWSDynamoDBKVStorage_Tools implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AWS DynamoDB KV Storage'
|
||||
this.name = 'awsDynamoDBKVStorage'
|
||||
this.version = 1.0
|
||||
this.type = 'AWSDynamoDBKVStorage'
|
||||
this.icon = 'dynamodbkvstorage.svg'
|
||||
this.category = 'Tools'
|
||||
this.description = 'Store and retrieve versioned text values in AWS DynamoDB'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(DynamoDBStoreTool)]
|
||||
this.credential = {
|
||||
label: 'AWS Credentials',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['awsApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'AWS Region',
|
||||
name: 'region',
|
||||
type: 'options',
|
||||
options: AWS_REGIONS,
|
||||
default: DEFAULT_AWS_REGION,
|
||||
description: 'AWS Region where your DynamoDB tables are located'
|
||||
},
|
||||
{
|
||||
label: 'DynamoDB Table',
|
||||
name: 'tableName',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listTables',
|
||||
description: 'Select a DynamoDB table with partition key "pk" and sort key "sk"',
|
||||
refresh: true
|
||||
},
|
||||
{
|
||||
label: 'Key Prefix',
|
||||
name: 'keyPrefix',
|
||||
type: 'string',
|
||||
description: 'Optional prefix to add to all keys (e.g., "myapp" would make keys like "myapp#userdata")',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Operation',
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'Store', name: Operation.STORE },
|
||||
{ label: 'Retrieve', name: Operation.RETRIEVE }
|
||||
],
|
||||
default: Operation.STORE,
|
||||
description: 'Choose whether to store or retrieve data'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
loadMethods: Record<string, (nodeData: INodeData, options?: ICommonObject) => Promise<INodeOptionsValue[]>> = {
|
||||
listTables: async (nodeData: INodeData, options?: ICommonObject): Promise<INodeOptionsValue[]> => {
|
||||
try {
|
||||
const credentials = await getAWSCredentials(nodeData, options ?? {})
|
||||
const region = (nodeData.inputs?.region as string) || DEFAULT_AWS_REGION
|
||||
const dynamoClient = createDynamoDBClient(credentials, region)
|
||||
|
||||
const listCommand = new ListTablesCommand({})
|
||||
const listResponse = await dynamoClient.send(listCommand)
|
||||
|
||||
if (!listResponse.TableNames || listResponse.TableNames.length === 0) {
|
||||
return [
|
||||
{
|
||||
label: 'No tables found',
|
||||
name: ERROR_PLACEHOLDER,
|
||||
description: 'No DynamoDB tables found in this region'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
const validTables: INodeOptionsValue[] = []
|
||||
const invalidTables: string[] = []
|
||||
|
||||
// Check tables in parallel for better performance
|
||||
const tableChecks = await Promise.allSettled(
|
||||
listResponse.TableNames.map(async (tableName) => {
|
||||
const describeCommand = new DescribeTableCommand({
|
||||
TableName: tableName
|
||||
})
|
||||
const describeResponse = await dynamoClient.send(describeCommand)
|
||||
|
||||
const keySchema = describeResponse.Table?.KeySchema
|
||||
if (keySchema) {
|
||||
const hasPk = keySchema.some((key) => key.AttributeName === 'pk' && key.KeyType === 'HASH')
|
||||
const hasSk = keySchema.some((key) => key.AttributeName === 'sk' && key.KeyType === 'RANGE')
|
||||
|
||||
if (hasPk && hasSk) {
|
||||
return {
|
||||
valid: true,
|
||||
table: {
|
||||
label: tableName,
|
||||
name: tableName,
|
||||
description: `Table with pk (partition) and sk (sort) keys`
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return { valid: false, tableName }
|
||||
})
|
||||
)
|
||||
|
||||
tableChecks.forEach((result) => {
|
||||
if (result.status === 'fulfilled') {
|
||||
if (result.value.valid) {
|
||||
validTables.push(result.value.table!)
|
||||
} else if (result.value.tableName) {
|
||||
invalidTables.push(result.value.tableName)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if (validTables.length === 0) {
|
||||
return [
|
||||
{
|
||||
label: 'No compatible tables found',
|
||||
name: ERROR_PLACEHOLDER,
|
||||
description: `No tables with partition key "pk" and sort key "sk" found. ${
|
||||
invalidTables.length > 0 ? `Found ${invalidTables.length} table(s) with different schema.` : ''
|
||||
} Please create a table with these keys.`
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
// Sort tables alphabetically
|
||||
validTables.sort((a, b) => a.label.localeCompare(b.label))
|
||||
|
||||
return validTables
|
||||
} catch (error) {
|
||||
if (error instanceof Error && error.message.includes('AWS Access Key')) {
|
||||
return [
|
||||
{
|
||||
label: 'AWS Credentials Required',
|
||||
name: ERROR_PLACEHOLDER,
|
||||
description: 'Enter AWS Access Key ID and Secret Access Key'
|
||||
}
|
||||
]
|
||||
}
|
||||
console.error('Error loading DynamoDB tables:', error)
|
||||
return [
|
||||
{
|
||||
label: 'Error Loading Tables',
|
||||
name: ERROR_PLACEHOLDER,
|
||||
description: `Failed to load tables: ${error instanceof Error ? error.message : String(error)}`
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentials = await getAWSCredentials(nodeData, options)
|
||||
|
||||
const region = (nodeData.inputs?.region as string) || DEFAULT_AWS_REGION
|
||||
const tableName = nodeData.inputs?.tableName as string
|
||||
const keyPrefix = (nodeData.inputs?.keyPrefix as string) || ''
|
||||
const operation = (nodeData.inputs?.operation as string) || Operation.STORE
|
||||
|
||||
if (!tableName || tableName === ERROR_PLACEHOLDER) {
|
||||
throw new Error('Valid DynamoDB Table selection is required')
|
||||
}
|
||||
|
||||
// Validate key prefix doesn't contain separator
|
||||
if (keyPrefix && keyPrefix.includes(KEY_SEPARATOR)) {
|
||||
throw new Error(`Key prefix cannot contain "${KEY_SEPARATOR}" character`)
|
||||
}
|
||||
|
||||
const dynamoClient = createDynamoDBClient(credentials, region)
|
||||
|
||||
if (operation === Operation.STORE) {
|
||||
return new DynamoDBStoreTool(dynamoClient, tableName, keyPrefix)
|
||||
} else {
|
||||
return new DynamoDBRetrieveTool(dynamoClient, tableName, keyPrefix)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AWSDynamoDBKVStorage_Tools }
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<!-- Variant 8: DynamoDB Hexagon Style -->
|
||||
<defs>
|
||||
<linearGradient id="dynamoHex" x1="0%" y1="0%" x2="100%" y2="100%">
|
||||
<stop offset="0%" style="stop-color:#FF9900;stop-opacity:1" />
|
||||
<stop offset="50%" style="stop-color:#EC7211;stop-opacity:1" />
|
||||
<stop offset="100%" style="stop-color:#C45500;stop-opacity:1" />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
|
||||
<!-- Hexagon shape (AWS service style) -->
|
||||
<path d="M16 2L27.4 8.5V23.5L16 30L4.6 23.5V8.5Z" fill="url(#dynamoHex)"/>
|
||||
|
||||
<!-- Database with layers -->
|
||||
<g transform="translate(16, 14)">
|
||||
<ellipse cx="0" cy="-3" rx="7" ry="2.5" fill="#FFFFFF" opacity="0.95"/>
|
||||
<rect x="-7" y="-3" width="14" height="8" fill="#FFFFFF" opacity="0.9"/>
|
||||
<ellipse cx="0" cy="5" rx="7" ry="2.5" fill="#FFFFFF"/>
|
||||
|
||||
<!-- Data lines -->
|
||||
<line x1="-4" y1="0" x2="-2" y2="0" stroke="#FF9900" stroke-width="1.5" stroke-linecap="round"/>
|
||||
<line x1="2" y1="0" x2="4" y2="0" stroke="#FF9900" stroke-width="1.5" stroke-linecap="round"/>
|
||||
<line x1="-4" y1="2.5" x2="-2" y2="2.5" stroke="#EC7211" stroke-width="1.5" stroke-linecap="round"/>
|
||||
<line x1="2" y1="2.5" x2="4" y2="2.5" stroke="#EC7211" stroke-width="1.5" stroke-linecap="round"/>
|
||||
</g>
|
||||
|
||||
<!-- KV text -->
|
||||
<text x="16" y="26" font-family="Arial" font-size="8" font-weight="bold" fill="#232F3E" text-anchor="middle">K:V</text>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 1.4 KiB |
|
|
@ -1,146 +0,0 @@
|
|||
import { Tool } from '@langchain/core/tools'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { AWS_REGIONS, DEFAULT_AWS_REGION, getAWSCredentials } from '../../../src/awsToolsUtils'
|
||||
import { SNSClient, ListTopicsCommand, PublishCommand } from '@aws-sdk/client-sns'
|
||||
|
||||
class AWSSNSTool extends Tool {
|
||||
name = 'aws_sns_publish'
|
||||
description = 'Publishes a message to an AWS SNS topic'
|
||||
private snsClient: SNSClient
|
||||
private topicArn: string
|
||||
|
||||
constructor(snsClient: SNSClient, topicArn: string) {
|
||||
super()
|
||||
this.snsClient = snsClient
|
||||
this.topicArn = topicArn
|
||||
}
|
||||
|
||||
async _call(message: string): Promise<string> {
|
||||
try {
|
||||
const command = new PublishCommand({
|
||||
TopicArn: this.topicArn,
|
||||
Message: message
|
||||
})
|
||||
|
||||
const response = await this.snsClient.send(command)
|
||||
return `Successfully published message to SNS topic. MessageId: ${response.MessageId}`
|
||||
} catch (error) {
|
||||
return `Failed to publish message to SNS: ${error}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class AWSSNS_Tools implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'AWS SNS'
|
||||
this.name = 'awsSNS'
|
||||
this.version = 1.0
|
||||
this.type = 'AWSSNS'
|
||||
this.icon = 'awssns.svg'
|
||||
this.category = 'Tools'
|
||||
this.description = 'Publish messages to AWS SNS topics'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AWSSNSTool)]
|
||||
this.credential = {
|
||||
label: 'AWS Credentials',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['awsApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'AWS Region',
|
||||
name: 'region',
|
||||
type: 'options',
|
||||
options: AWS_REGIONS,
|
||||
default: DEFAULT_AWS_REGION,
|
||||
description: 'AWS Region where your SNS topics are located'
|
||||
},
|
||||
{
|
||||
label: 'SNS Topic',
|
||||
name: 'topicArn',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listTopics',
|
||||
description: 'Select the SNS topic to publish to',
|
||||
refresh: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
listTopics: async (nodeData: INodeData, options?: ICommonObject): Promise<INodeOptionsValue[]> => {
|
||||
try {
|
||||
const credentials = await getAWSCredentials(nodeData, options ?? {})
|
||||
const region = (nodeData.inputs?.region as string) || DEFAULT_AWS_REGION
|
||||
|
||||
const snsClient = new SNSClient({
|
||||
region: region,
|
||||
credentials: credentials
|
||||
})
|
||||
|
||||
const command = new ListTopicsCommand({})
|
||||
const response = await snsClient.send(command)
|
||||
|
||||
if (!response.Topics || response.Topics.length === 0) {
|
||||
return [
|
||||
{
|
||||
label: 'No topics found',
|
||||
name: 'placeholder',
|
||||
description: 'No SNS topics found in this region'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
return response.Topics.map((topic) => {
|
||||
const topicArn = topic.TopicArn || ''
|
||||
const topicName = topicArn.split(':').pop() || topicArn
|
||||
return {
|
||||
label: topicName,
|
||||
name: topicArn,
|
||||
description: topicArn
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
console.error('Error loading SNS topics:', error)
|
||||
return [
|
||||
{
|
||||
label: 'AWS Credentials Required',
|
||||
name: 'placeholder',
|
||||
description: 'Enter AWS Access Key ID and Secret Access Key'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentials = await getAWSCredentials(nodeData, options)
|
||||
const region = (nodeData.inputs?.region as string) || DEFAULT_AWS_REGION
|
||||
const topicArn = nodeData.inputs?.topicArn as string
|
||||
|
||||
if (!topicArn) {
|
||||
throw new Error('SNS Topic ARN is required')
|
||||
}
|
||||
|
||||
const snsClient = new SNSClient({
|
||||
region: region,
|
||||
credentials: credentials
|
||||
})
|
||||
|
||||
return new AWSSNSTool(snsClient, topicArn)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AWSSNS_Tools }
|
||||
|
|
@ -1 +0,0 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 85 85" fill="#fff" fill-rule="evenodd" stroke="#000" stroke-linecap="round" stroke-linejoin="round"><use xlink:href="#A" x="2.5" y="2.5"/><symbol id="A" overflow="visible"><g stroke="none"><path d="M30.904 72.366l-8.268-2.304-7.813-8.891 8.711.722 7.37 10.474z" fill="#b8852e"/><path d="M30.904 72.365l5.913-12.36-5.528-.78-7.755 2.665 7.37 10.474z" fill="#d9a741"/><path d="M11.79 66.511l-4.641-1.303-5.119-5.831 5.434.85 4.326 6.284z" fill="#b8852e"/><path d="M11.791 66.512l3.335-6.959-3.114-.442-4.548 1.117 4.327 6.284z" fill="#d9a741"/><path d="M0 59.576l4.746.966L28 40.361 4.746 17.76 0 20.146v39.43z" fill="#876929"/><path d="M23.102 21.74v37.102L4.746 60.542V17.76l18.356 3.98z" fill="#d9a741"/><path d="M10.612 61.798l8.14 1.641 22.484-25.988-22.484-26.756-8.14 4.108v46.995z" fill="#876929"/><path d="M65.236 25.429v32.04l-46.484 5.97V10.695l46.484 14.734z" fill="#d9a741"/><path d="M56.828 80l-11.743-3.282-11.102-12.639 12.385 1.036L56.828 80z" fill="#b8852e"/><path d="M56.828 80l8.408-17.562-7.86-1.117-11.009 3.794L56.828 80z" fill="#d9a741"/><path d="M28 65.173l11.977 2.304 16.245-33.739L39.977 0 28 6.04v59.133z" fill="#876929"/><path d="M39.977 67.478L80 59.482V19.971L39.977 0v67.478z" fill="#d9a741"/></g></symbol></svg>
|
||||
|
Before Width: | Height: | Size: 1.3 KiB |
|
|
@ -4,13 +4,7 @@ import { RunnableConfig } from '@langchain/core/runnables'
|
|||
import { CallbackManagerForToolRun, Callbacks, CallbackManager, parseCallbackConfigArg } from '@langchain/core/callbacks/manager'
|
||||
import { StructuredTool } from '@langchain/core/tools'
|
||||
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
getCredentialData,
|
||||
getCredentialParam,
|
||||
executeJavaScriptCode,
|
||||
createCodeExecutionSandbox,
|
||||
parseWithTypeConversion
|
||||
} from '../../../src/utils'
|
||||
import { getCredentialData, getCredentialParam, executeJavaScriptCode, createCodeExecutionSandbox } from '../../../src/utils'
|
||||
import { isValidUUID, isValidURL } from '../../../src/validator'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
|
|
@ -75,8 +69,7 @@ class AgentAsTool_Tools implements INode {
|
|||
description: 'Override the config passed to the Agentflow.',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
acceptVariable: true
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
|
|
@ -279,7 +272,7 @@ class AgentflowTool extends StructuredTool {
|
|||
}
|
||||
let parsed
|
||||
try {
|
||||
parsed = await parseWithTypeConversion(this.schema, arg)
|
||||
parsed = await this.schema.parseAsync(arg)
|
||||
} catch (e) {
|
||||
throw new Error(`Received tool input did not match expected schema: ${JSON.stringify(arg)}`)
|
||||
}
|
||||
|
|
@ -369,14 +362,11 @@ try {
|
|||
|
||||
const sandbox = createCodeExecutionSandbox('', [], {}, additionalSandbox)
|
||||
|
||||
let response = await executeJavaScriptCode(code, sandbox, {
|
||||
useSandbox: false
|
||||
const response = await executeJavaScriptCode(code, sandbox, {
|
||||
useSandbox: false,
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response === 'object') {
|
||||
response = JSON.stringify(response)
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,13 +4,7 @@ import { RunnableConfig } from '@langchain/core/runnables'
|
|||
import { CallbackManagerForToolRun, Callbacks, CallbackManager, parseCallbackConfigArg } from '@langchain/core/callbacks/manager'
|
||||
import { StructuredTool } from '@langchain/core/tools'
|
||||
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
getCredentialData,
|
||||
getCredentialParam,
|
||||
executeJavaScriptCode,
|
||||
createCodeExecutionSandbox,
|
||||
parseWithTypeConversion
|
||||
} from '../../../src/utils'
|
||||
import { getCredentialData, getCredentialParam, executeJavaScriptCode, createCodeExecutionSandbox } from '../../../src/utils'
|
||||
import { isValidUUID, isValidURL } from '../../../src/validator'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
|
|
@ -75,8 +69,7 @@ class ChatflowTool_Tools implements INode {
|
|||
description: 'Override the config passed to the Chatflow.',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
acceptVariable: true
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
|
|
@ -287,7 +280,7 @@ class ChatflowTool extends StructuredTool {
|
|||
}
|
||||
let parsed
|
||||
try {
|
||||
parsed = await parseWithTypeConversion(this.schema, arg)
|
||||
parsed = await this.schema.parseAsync(arg)
|
||||
} catch (e) {
|
||||
throw new Error(`Received tool input did not match expected schema: ${JSON.stringify(arg)}`)
|
||||
}
|
||||
|
|
@ -377,14 +370,11 @@ try {
|
|||
|
||||
const sandbox = createCodeExecutionSandbox('', [], {}, additionalSandbox)
|
||||
|
||||
let response = await executeJavaScriptCode(code, sandbox, {
|
||||
useSandbox: false
|
||||
const response = await executeJavaScriptCode(code, sandbox, {
|
||||
useSandbox: false,
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
if (typeof response === 'object') {
|
||||
response = JSON.stringify(response)
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam, parseWithTypeConversion } from '../../../src/utils'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { StructuredTool, ToolInputParsingException, ToolParams } from '@langchain/core/tools'
|
||||
import { Sandbox } from '@e2b/code-interpreter'
|
||||
import { z } from 'zod'
|
||||
|
|
@ -159,7 +159,7 @@ export class E2BTool extends StructuredTool {
|
|||
}
|
||||
let parsed
|
||||
try {
|
||||
parsed = await parseWithTypeConversion(this.schema, arg)
|
||||
parsed = await this.schema.parseAsync(arg)
|
||||
} catch (e) {
|
||||
throw new ToolInputParsingException(`Received tool input did not match expected schema`, JSON.stringify(arg))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { z } from 'zod'
|
|||
import { RunnableConfig } from '@langchain/core/runnables'
|
||||
import { StructuredTool, ToolParams } from '@langchain/core/tools'
|
||||
import { CallbackManagerForToolRun, Callbacks, CallbackManager, parseCallbackConfigArg } from '@langchain/core/callbacks/manager'
|
||||
import { executeJavaScriptCode, createCodeExecutionSandbox, parseWithTypeConversion } from '../../../src/utils'
|
||||
import { executeJavaScriptCode, createCodeExecutionSandbox } from '../../../src/utils'
|
||||
import { ICommonObject } from '../../../src/Interface'
|
||||
|
||||
class ToolInputParsingException extends Error {
|
||||
|
|
@ -68,7 +68,7 @@ export class DynamicStructuredTool<
|
|||
}
|
||||
let parsed
|
||||
try {
|
||||
parsed = await parseWithTypeConversion(this.schema, arg)
|
||||
parsed = await this.schema.parseAsync(arg)
|
||||
} catch (e) {
|
||||
throw new ToolInputParsingException(`Received tool input did not match expected schema`, JSON.stringify(arg))
|
||||
}
|
||||
|
|
@ -124,11 +124,9 @@ export class DynamicStructuredTool<
|
|||
|
||||
const sandbox = createCodeExecutionSandbox('', this.variables || [], flow, additionalSandbox)
|
||||
|
||||
let response = await executeJavaScriptCode(this.code, sandbox)
|
||||
|
||||
if (typeof response === 'object') {
|
||||
response = JSON.stringify(response)
|
||||
}
|
||||
const response = await executeJavaScriptCode(this.code, sandbox, {
|
||||
timeout: 10000
|
||||
})
|
||||
|
||||
return response
|
||||
}
|
||||
|
|
|
|||