Compare commits
241 Commits
bugfix/Ove
...
main
| Author | SHA1 | Date |
|---|---|---|
|
|
465005a503 | |
|
|
e6e0c2d07b | |
|
|
660a8e357a | |
|
|
113180d03b | |
|
|
069ba28bc0 | |
|
|
20db1597a4 | |
|
|
478a294095 | |
|
|
6a59af11e6 | |
|
|
562370b8e2 | |
|
|
4e92db6910 | |
|
|
7cc2c13694 | |
|
|
3ad2b3a559 | |
|
|
da32fc7167 | |
|
|
315e3aedc3 | |
|
|
9dbb4bf623 | |
|
|
1f3f7a7194 | |
|
|
4d79653741 | |
|
|
03ef28afbc | |
|
|
0cc7b3036e | |
|
|
097404f24a | |
|
|
2029588d4d | |
|
|
c9db81096a | |
|
|
b5f7fac015 | |
|
|
ca22160361 | |
|
|
ffe69936dc | |
|
|
b8f7a200fb | |
|
|
2f2b6e1713 | |
|
|
4e1fac501f | |
|
|
888994bc8f | |
|
|
3cab803918 | |
|
|
366d38b861 | |
|
|
2414057c08 | |
|
|
4a642f02d0 | |
|
|
ceb0512e2f | |
|
|
94cae3b66f | |
|
|
3fafd15a80 | |
|
|
9ff3d653ae | |
|
|
0dc14b5cd3 | |
|
|
b9a020dc70 | |
|
|
761ffe6851 | |
|
|
6d3755d16e | |
|
|
faf0a0a315 | |
|
|
4624e15c2e | |
|
|
a7b6f9b208 | |
|
|
2bd96090f0 | |
|
|
346a55b6d8 | |
|
|
03c1750d73 | |
|
|
ec1762b10f | |
|
|
02bb2ba62b | |
|
|
fdb6422aef | |
|
|
fe6f5f88a5 | |
|
|
82124d4871 | |
|
|
3b8b21342d | |
|
|
679a0409f5 | |
|
|
1fa9303d7c | |
|
|
75eb5f57aa | |
|
|
3d731664f9 | |
|
|
0f8d45d25c | |
|
|
3e8db185dd | |
|
|
6f5b0d9906 | |
|
|
fd7fc2f4d7 | |
|
|
a92f7dfc3f | |
|
|
80224275d9 | |
|
|
4417102f6c | |
|
|
0149688a16 | |
|
|
f3d5b7766d | |
|
|
97515989a2 | |
|
|
601de76aea | |
|
|
c99d870c82 | |
|
|
5df09a15b8 | |
|
|
e925801b63 | |
|
|
eed7581d0e | |
|
|
1ae1638ed9 | |
|
|
0a3c8b94ab | |
|
|
9554b1a8e3 | |
|
|
ac565b8981 | |
|
|
37ef6ffa50 | |
|
|
2ae4678da4 | |
|
|
6f94d61f22 | |
|
|
62d34066c9 | |
|
|
f3f2eabb89 | |
|
|
bff859520a | |
|
|
4111ec31b0 | |
|
|
7ab586c865 | |
|
|
ac794ab6eb | |
|
|
1fb12cd931 | |
|
|
a0dca552a2 | |
|
|
a38d37f4b5 | |
|
|
1a410d84ac | |
|
|
7a50755546 | |
|
|
ac252516f8 | |
|
|
6fe5b98d6f | |
|
|
9b8fee3d8f | |
|
|
8d0a198e2f | |
|
|
580957e4aa | |
|
|
a86f618186 | |
|
|
8c1175225f | |
|
|
28b0174eea | |
|
|
b501932491 | |
|
|
6890ced939 | |
|
|
0065e8f1a0 | |
|
|
31434e52ce | |
|
|
84a0a45ff7 | |
|
|
dd284e37c3 | |
|
|
b5da234ce7 | |
|
|
e48f28d13d | |
|
|
cf6539cd3f | |
|
|
011d60332e | |
|
|
e9d4c3b54b | |
|
|
41131dfac3 | |
|
|
42152dd036 | |
|
|
fc50f2308b | |
|
|
f560768133 | |
|
|
c4322ce70b | |
|
|
79023c8909 | |
|
|
05763db8d3 | |
|
|
6e291cf05d | |
|
|
89a0f23fe5 | |
|
|
c00ae78488 | |
|
|
b2dcdab5b9 | |
|
|
6885c38d18 | |
|
|
6e2f2df269 | |
|
|
4af067a444 | |
|
|
e002e617df | |
|
|
4987a2880d | |
|
|
736c2b11a1 | |
|
|
6fb9bb559f | |
|
|
32bf030924 | |
|
|
099cf481b4 | |
|
|
113086a2fb | |
|
|
c17dd1f141 | |
|
|
42fed5713e | |
|
|
449e8113e0 | |
|
|
9e178d6887 | |
|
|
2ab20f71d9 | |
|
|
b026671887 | |
|
|
23cb5f7801 | |
|
|
763e33b073 | |
|
|
a5a728fd06 | |
|
|
9b3971d8d8 | |
|
|
af1464f7c2 | |
|
|
bf1ddc3be5 | |
|
|
ad0679801a | |
|
|
9cac8d7a00 | |
|
|
e5381f5090 | |
|
|
b126472816 | |
|
|
4ce0851858 | |
|
|
44087bc706 | |
|
|
55f8f69060 | |
|
|
6e44051bea | |
|
|
7a74e33be1 | |
|
|
e99aecb473 | |
|
|
ba6a602cbe | |
|
|
fbae51b260 | |
|
|
114a844964 | |
|
|
68dc041d02 | |
|
|
32cd06cd28 | |
|
|
32e5b13c46 | |
|
|
db4de4552a | |
|
|
9c070c7205 | |
|
|
fddd40a5cd | |
|
|
bbcfb5ab63 | |
|
|
fa15b6873d | |
|
|
9181ae2879 | |
|
|
3b1b4dc5f9 | |
|
|
b608219642 | |
|
|
3187377c61 | |
|
|
feb899ab19 | |
|
|
9e743e4aa1 | |
|
|
141c49013a | |
|
|
b024cd61f4 | |
|
|
78144f37b5 | |
|
|
e3e4d6a904 | |
|
|
5930f1119c | |
|
|
8aa2507ed9 | |
|
|
ff9a2a65b5 | |
|
|
d29db16bfc | |
|
|
28fec16873 | |
|
|
3a33bfadf0 | |
|
|
89a806f722 | |
|
|
ed27ad0c58 | |
|
|
049596a7b5 | |
|
|
5259bab778 | |
|
|
9b54aa8879 | |
|
|
0998bf4327 | |
|
|
e8dac2048f | |
|
|
498129e9d2 | |
|
|
46816c7c1e | |
|
|
bbb03b7b3b | |
|
|
aea2b184da | |
|
|
8846fd14e6 | |
|
|
5ae6ae2916 | |
|
|
9a6fd97f2c | |
|
|
221ac9b25d | |
|
|
caffad0fb0 | |
|
|
8562d4a563 | |
|
|
d272683a98 | |
|
|
00342bde88 | |
|
|
a3f47af027 | |
|
|
d081221a97 | |
|
|
f2bd83252d | |
|
|
910a3c5229 | |
|
|
d77919ba50 | |
|
|
e8c36b6894 | |
|
|
efc9ac222f | |
|
|
dca91b979b | |
|
|
9a06a85a8d | |
|
|
96a57a58e7 | |
|
|
fbe9f34a60 | |
|
|
2b7a074c8b | |
|
|
cc4a773010 | |
|
|
d584c0b700 | |
|
|
ebf222731e | |
|
|
2605a1f74e | |
|
|
2e1999e6f1 | |
|
|
5e5b2a18e2 | |
|
|
cf965f3d8e | |
|
|
0ac01d3cbb | |
|
|
1bed5a264e | |
|
|
8a6b95ef0e | |
|
|
9839009823 | |
|
|
791c1e3274 | |
|
|
e3eeb5d8a8 | |
|
|
9d438529a6 | |
|
|
ee5ab1bd6d | |
|
|
849b94b049 | |
|
|
14fc1b4d20 | |
|
|
bf05f25f7e | |
|
|
6baec93860 | |
|
|
30e8317327 | |
|
|
aea2801b8c | |
|
|
a25c5c4514 | |
|
|
768de6140c | |
|
|
0627693133 | |
|
|
bbf6970600 | |
|
|
9b60cf1234 | |
|
|
be7599542b | |
|
|
4c3b729b79 | |
|
|
e326bc8f49 | |
|
|
e7553a1c4e | |
|
|
9efb70e04c |
|
|
@ -0,0 +1,72 @@
|
|||
name: Docker Image CI - Docker Hub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise:${{ steps.defaults.outputs.tag_version }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push worker image
|
||||
# -------------------------
|
||||
- name: Build and push worker image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: docker/worker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise-worker:${{ steps.defaults.outputs.tag_version }}
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
name: Docker Image CI - AWS ECR
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ format('{0}.dkr.ecr.{1}.amazonaws.com/flowise:{2}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.tag_version) }}
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
registry:
|
||||
description: 'Container Registry to push the image to.'
|
||||
type: choice
|
||||
required: true
|
||||
default: 'aws_ecr'
|
||||
options:
|
||||
- 'docker_hub'
|
||||
- 'aws_ecr'
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
image_type:
|
||||
description: 'Type of image to build and push.'
|
||||
type: choice
|
||||
required: true
|
||||
default: 'main'
|
||||
options:
|
||||
- 'main'
|
||||
- 'worker'
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "registry=${{ github.event.inputs.registry || 'aws_ecr' }}" >> $GITHUB_OUTPUT
|
||||
echo "image_type=${{ github.event.inputs.image_type || 'main' }}" >> $GITHUB_OUTPUT
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
# ------------------------
|
||||
# Login Steps (conditional)
|
||||
# ------------------------
|
||||
- name: Login to Docker Hub
|
||||
if: steps.defaults.outputs.registry == 'docker_hub'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
if: steps.defaults.outputs.registry == 'aws_ecr'
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
if: steps.defaults.outputs.registry == 'aws_ecr'
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push (conditional tags)
|
||||
# -------------------------
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: |
|
||||
${{
|
||||
steps.defaults.outputs.image_type == 'worker' && 'docker/worker/Dockerfile' ||
|
||||
(steps.defaults.outputs.registry == 'docker_hub' && './docker/Dockerfile' || 'Dockerfile')
|
||||
}}
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{
|
||||
steps.defaults.outputs.registry == 'docker_hub' &&
|
||||
format('flowiseai/flowise{0}:{1}',
|
||||
steps.defaults.outputs.image_type == 'worker' && '-worker' || '',
|
||||
steps.defaults.outputs.tag_version) ||
|
||||
format('{0}.dkr.ecr.{1}.amazonaws.com/flowise{2}:{3}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.image_type == 'worker' && '-worker' || '',
|
||||
steps.defaults.outputs.tag_version)
|
||||
}}
|
||||
|
|
@ -114,50 +114,52 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
to make sure everything works fine in production.
|
||||
|
||||
11. Commit code and submit Pull Request from forked branch pointing to [Flowise master](https://github.com/FlowiseAI/Flowise/tree/master).
|
||||
11. Commit code and submit Pull Request from forked branch pointing to [Flowise main](https://github.com/FlowiseAI/Flowise/tree/main).
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
|
||||
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
|
||||
| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
|
||||
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
|
||||
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Custom Tool or Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Custom Tool or Function | String | |
|
||||
| ALLOW_BUILTIN_DEP | Allow project dependencies to be used for Custom Tool or Function | Boolean | false |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
|
||||
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
|
||||
| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
|
||||
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
|
||||
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
|
||||
| TRUST_PROXY | Configure proxy trust settings for proper IP detection. Values: 'true' (trust all), 'false' (disable), number (hop count), or Express proxy values (e.g., 'loopback', 'linklocal', 'uniquelocal', IP addresses). [Learn More](https://expressjs.com/en/guide/behind-proxies.html) | Boolean/String/Number | true |
|
||||
|
||||
You can also specify the env variables when using `npx`. For example:
|
||||
|
||||
|
|
|
|||
39
Dockerfile
39
Dockerfile
|
|
@ -5,34 +5,41 @@
|
|||
# docker run -d -p 3000:3000 flowise
|
||||
|
||||
FROM node:20-alpine
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
# Install curl for container-level health checks
|
||||
# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
# Install system dependencies and build tools
|
||||
RUN apk update && \
|
||||
apk add --no-cache \
|
||||
libc6-compat \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
build-base \
|
||||
cairo-dev \
|
||||
pango-dev \
|
||||
chromium \
|
||||
curl && \
|
||||
npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
|
||||
WORKDIR /usr/src
|
||||
WORKDIR /usr/src/flowise
|
||||
|
||||
# Copy app source
|
||||
COPY . .
|
||||
|
||||
RUN pnpm install
|
||||
# Install dependencies and build
|
||||
RUN pnpm install && \
|
||||
pnpm build
|
||||
|
||||
RUN pnpm build
|
||||
# Give the node user ownership of the application files
|
||||
RUN chown -R node:node .
|
||||
|
||||
# Switch to non-root user (node user already exists in node:20-alpine)
|
||||
USER node
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD [ "pnpm", "start" ]
|
||||
CMD [ "pnpm", "start" ]
|
||||
63
README.md
63
README.md
|
|
@ -5,6 +5,8 @@
|
|||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
[](https://twitter.com/FlowiseAI)
|
||||
|
|
@ -13,10 +15,25 @@
|
|||
|
||||
English | [繁體中文](./i18n/README-TW.md) | [简体中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
|
||||
|
||||
</div>
|
||||
|
||||
<h3>Build AI Agents, Visually</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
## 📚 Table of Contents
|
||||
|
||||
- [⚡ Quick Start](#-quick-start)
|
||||
- [🐳 Docker](#-docker)
|
||||
- [👨💻 Developers](#-developers)
|
||||
- [🌱 Env Variables](#-env-variables)
|
||||
- [📖 Documentation](#-documentation)
|
||||
- [🌐 Self Host](#-self-host)
|
||||
- [☁️ Flowise Cloud](#️-flowise-cloud)
|
||||
- [🙋 Support](#-support)
|
||||
- [🙌 Contributing](#-contributing)
|
||||
- [📄 License](#-license)
|
||||
|
||||
## ⚡Quick Start
|
||||
|
||||
Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
||||
|
|
@ -47,9 +64,11 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
### Docker Image
|
||||
|
||||
1. Build the image locally:
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
|
||||
2. Run image:
|
||||
|
||||
```bash
|
||||
|
|
@ -57,6 +76,7 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
```
|
||||
|
||||
3. Stop image:
|
||||
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
|
@ -79,13 +99,13 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
### Setup
|
||||
|
||||
1. Clone the repository
|
||||
1. Clone the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. Go into repository folder
|
||||
2. Go into repository folder:
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
|
|
@ -105,10 +125,24 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
<details>
|
||||
<summary>Exit code 134 (JavaScript heap out of memory)</summary>
|
||||
If you get this error when running the above `build` script, try increasing the Node.js heap size and run the script again:
|
||||
If you get this error when running the above `build` script, try increasing the Node.js heap size and run the script again:
|
||||
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
pnpm build
|
||||
```bash
|
||||
# macOS / Linux / Git Bash
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Windows PowerShell
|
||||
$env:NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Windows CMD
|
||||
set NODE_OPTIONS=--max-old-space-size=4096
|
||||
```
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
|
@ -124,7 +158,7 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
- Create `.env` file and specify the `VITE_PORT` (refer to `.env.example`) in `packages/ui`
|
||||
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`
|
||||
- Run
|
||||
- Run:
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
|
|
@ -134,11 +168,11 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
## 🌱 Env Variables
|
||||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
Flowise supports different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
[Flowise Docs](https://docs.flowiseai.com/)
|
||||
You can view the Flowise Docs [here](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 Self Host
|
||||
|
||||
|
|
@ -156,6 +190,10 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Northflank](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
[](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/configuration/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/configuration/deployment/render)
|
||||
|
|
@ -180,11 +218,11 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
|
||||
## ☁️ Flowise Cloud
|
||||
|
||||
[Get Started with Flowise Cloud](https://flowiseai.com/)
|
||||
Get Started with [Flowise Cloud](https://flowiseai.com/).
|
||||
|
||||
## 🙋 Support
|
||||
|
||||
Feel free to ask any questions, raise problems, and request new features in [discussion](https://github.com/FlowiseAI/Flowise/discussions)
|
||||
Feel free to ask any questions, raise problems, and request new features in [Discussion](https://github.com/FlowiseAI/Flowise/discussions).
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
|
|
@ -192,9 +230,10 @@ Thanks go to these awesome contributors
|
|||
|
||||
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
</a><br><br>
|
||||
|
||||
See [Contributing Guide](CONTRIBUTING.md). Reach out to us at [Discord](https://discord.gg/jbaHfsRVBW) if you have any questions or issues.
|
||||
|
||||
See [contributing guide](CONTRIBUTING.md). Reach out to us at [Discord](https://discord.gg/jbaHfsRVBW) if you have any questions or issues.
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 License
|
||||
|
|
|
|||
58
SECURITY.md
58
SECURITY.md
|
|
@ -1,40 +1,38 @@
|
|||
### Responsible Disclosure Policy
|
||||
### Responsible Disclosure Policy
|
||||
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
|
||||
### Vulnerabilities
|
||||
### Out of scope vulnerabilities
|
||||
|
||||
The following types of issues are some of the most common vulnerabilities:
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
### Reporting Guidelines
|
||||
|
||||
### Reporting Guidelines
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
### Disclosure Guidelines
|
||||
|
||||
### Disclosure Guidelines
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
### Response to Reports
|
||||
|
||||
### Response to Reports
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ DATABASE_PATH=/root/.flowise
|
|||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_REJECT_UNAUTHORIZED=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
||||
|
|
@ -37,8 +38,11 @@ SECRETKEY_PATH=/root/.flowise
|
|||
# DEBUG=true
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
# LOG_LEVEL=info #(error | warn | info | verbose | debug)
|
||||
# LOG_SANITIZE_BODY_FIELDS=password,pwd,pass,secret,token,apikey,api_key,accesstoken,access_token,refreshtoken,refresh_token,clientsecret,client_secret,privatekey,private_key,secretkey,secret_key,auth,authorization,credential,credentials
|
||||
# LOG_SANITIZE_HEADER_FIELDS=authorization,x-api-key,x-auth-token,cookie
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
# ALLOW_BUILTIN_DEP=false
|
||||
|
||||
|
||||
############################################################################################################
|
||||
|
|
@ -97,6 +101,7 @@ JWT_TOKEN_EXPIRY_IN_MINUTES=360
|
|||
JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
||||
# EXPIRE_AUTH_TOKENS_ON_RESTART=true # (if you need to expire all tokens on app restart)
|
||||
# EXPRESS_SESSION_SECRET=flowise
|
||||
# SECURE_COOKIES=
|
||||
|
||||
# INVITE_TOKEN_EXPIRY_IN_HOURS=24
|
||||
# PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=15
|
||||
|
|
@ -162,4 +167,14 @@ JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
|||
# REDIS_KEY=
|
||||
# REDIS_CA=
|
||||
# REDIS_KEEP_ALIVE=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECURITY ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# HTTP_DENY_LIST=
|
||||
# CUSTOM_MCP_SECURITY_CHECK=true
|
||||
# CUSTOM_MCP_PROTOCOL=sse #(stdio | sse)
|
||||
# TRUST_PROXY=true #(true | false | 1 | loopback| linklocal | uniquelocal | IP addresses | loopback, IP addresses)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ If you like to persist your data (flows, logs, credentials, storage), set these
|
|||
- SECRETKEY_PATH=/root/.flowise
|
||||
- BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/configuration/environment-variables)
|
||||
|
||||
## Queue Mode:
|
||||
|
||||
|
|
|
|||
|
|
@ -46,10 +46,13 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -88,6 +91,7 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -138,6 +142,12 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${PORT:-3000}/api/v1/ping']
|
||||
interval: 10s
|
||||
|
|
@ -182,10 +192,13 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -224,6 +237,7 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -274,6 +288,12 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${WORKER_PORT:-5566}/healthz']
|
||||
interval: 10s
|
||||
|
|
|
|||
|
|
@ -31,10 +31,13 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -73,6 +76,7 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -123,6 +127,12 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
healthcheck:
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ DATABASE_PATH=/root/.flowise
|
|||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_REJECT_UNAUTHORIZED=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
||||
|
|
@ -37,8 +38,11 @@ SECRETKEY_PATH=/root/.flowise
|
|||
# DEBUG=true
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
# LOG_LEVEL=info #(error | warn | info | verbose | debug)
|
||||
# LOG_SANITIZE_BODY_FIELDS=password,pwd,pass,secret,token,apikey,api_key,accesstoken,access_token,refreshtoken,refresh_token,clientsecret,client_secret,privatekey,private_key,secretkey,secret_key,auth,authorization,credential,credentials
|
||||
# LOG_SANITIZE_HEADER_FIELDS=authorization,x-api-key,x-auth-token,cookie
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
# ALLOW_BUILTIN_DEP=false
|
||||
|
||||
|
||||
############################################################################################################
|
||||
|
|
@ -97,6 +101,7 @@ JWT_TOKEN_EXPIRY_IN_MINUTES=360
|
|||
JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
||||
# EXPIRE_AUTH_TOKENS_ON_RESTART=true # (if you need to expire all tokens on app restart)
|
||||
# EXPRESS_SESSION_SECRET=flowise
|
||||
# SECURE_COOKIES=
|
||||
|
||||
# INVITE_TOKEN_EXPIRY_IN_HOURS=24
|
||||
# PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=15
|
||||
|
|
@ -162,4 +167,14 @@ JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
|||
# REDIS_KEY=
|
||||
# REDIS_CA=
|
||||
# REDIS_KEEP_ALIVE=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECURITY ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# HTTP_DENY_LIST=
|
||||
# CUSTOM_MCP_SECURITY_CHECK=true
|
||||
# CUSTOM_MCP_PROTOCOL=sse #(stdio | sse)
|
||||
# TRUST_PROXY=true #(true | false | 1 | loopback| linklocal | uniquelocal | IP addresses | loopback, IP addresses)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
|
|||
# Install Chromium and curl for container-level health checks
|
||||
RUN apk add --no-cache chromium curl
|
||||
|
||||
#install PNPM globaly
|
||||
#install PNPM globally
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
|
|
|
|||
|
|
@ -31,10 +31,13 @@ services:
|
|||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL DEPENDENCIES
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
|
|
@ -73,6 +76,7 @@ services:
|
|||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
|
|
@ -123,6 +127,12 @@ services:
|
|||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
ports:
|
||||
- '${WORKER_PORT}:${WORKER_PORT}'
|
||||
healthcheck:
|
||||
|
|
|
|||
|
|
@ -112,41 +112,41 @@ Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
|||
pnpm start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/main) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| 变量名 | 描述 | 类型 | 默认值 |
|
||||
| ---------------------------- | ------------------------------------------------------- | ----------------------------------------------- | ----------------------------------- | --- | --- |
|
||||
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 | | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb | |
|
||||
| DEBUG | 打印组件的日志 | 布尔值 | |
|
||||
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
|
||||
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
|
||||
| MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
|
||||
| BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | S3 存储文件夹路径, 当`STORAGE_TYPE`是`s3` | 字符串 | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS 访问密钥 (Access Key) | 字符串 | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS 密钥 (Secret Key) | 字符串 | |
|
||||
| S3_STORAGE_REGION | S3 存储地区 | 字符串 | |
|
||||
| S3_ENDPOINT_URL | S3 端点 URL | 字符串 | |
|
||||
| S3_FORCE_PATH_STYLE | 将其设置为 true 以强制请求使用路径样式寻址 | 布尔值 | false |
|
||||
| SHOW_COMMUNITY_NODES | 显示由社区创建的节点 | 布尔值 | |
|
||||
| DISABLED_NODES | 从界面中隐藏节点(以逗号分隔的节点名称列表) | 字符串 | |
|
||||
|-----------------------------|---------------------------------------------------------|-------------------------------------------------|-------------------------------------|
|
||||
| `PORT` | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| `FLOWISE_FILE_SIZE_LIMIT` | 上传文件大小限制 | 字符串 | 50mb |
|
||||
| `DEBUG` | 打印组件的日志 | 布尔值 | |
|
||||
| `LOG_PATH` | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| `LOG_LEVEL` | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| `TOOL_FUNCTION_BUILTIN_DEP` | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| `TOOL_FUNCTION_EXTERNAL_DEP`| 用于工具函数的外部模块 | 字符串 | |
|
||||
| `DATABASE_TYPE` | 存储 Flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| `DATABASE_PATH` | 数据库保存的位置(当 `DATABASE_TYPE` 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| `DATABASE_HOST` | 主机 URL 或 IP 地址(当 `DATABASE_TYPE` 不是 sqlite 时)| 字符串 | |
|
||||
| `DATABASE_PORT` | 数据库端口(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `DATABASE_USERNAME` | 数据库用户名(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `DATABASE_PASSWORD` | 数据库密码(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `DATABASE_NAME` | 数据库名称(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `SECRETKEY_PATH` | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| `FLOWISE_SECRETKEY_OVERWRITE`| 加密密钥用于替代存储在 `SECRETKEY_PATH` 中的密钥 | 字符串 | |
|
||||
| `MODEL_LIST_CONFIG_JSON` | 加载模型的位置 | 字符串 | `/your_model_list_config_file_path` |
|
||||
| `STORAGE_TYPE` | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
|
||||
| `BLOB_STORAGE_PATH` | 本地上传文件存储路径(当 `STORAGE_TYPE` 为 `local`) | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| `S3_STORAGE_BUCKET_NAME` | S3 存储文件夹路径(当 `STORAGE_TYPE` 为 `s3`) | 字符串 | |
|
||||
| `S3_STORAGE_ACCESS_KEY_ID` | AWS 访问密钥 (Access Key) | 字符串 | |
|
||||
| `S3_STORAGE_SECRET_ACCESS_KEY` | AWS 密钥 (Secret Key) | 字符串 | |
|
||||
| `S3_STORAGE_REGION` | S3 存储地区 | 字符串 | |
|
||||
| `S3_ENDPOINT_URL` | S3 端点 URL | 字符串 | |
|
||||
| `S3_FORCE_PATH_STYLE` | 设置为 true 以强制请求使用路径样式寻址 | 布尔值 | false |
|
||||
| `SHOW_COMMUNITY_NODES` | 显示由社区创建的节点 | 布尔值 | |
|
||||
| `DISABLED_NODES` | 从界面中隐藏节点(以逗号分隔的节点名称列表) | 字符串 | |
|
||||
|
||||
您也可以在使用 `npx` 时指定环境变量。例如:
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@
|
|||
|
||||
[English](../README.md) | 繁體中文 | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
|
||||
<h3>可視化建構 AI/LLM 流程</h3>
|
||||
<h3>可視化建置 AI/LLM 流程</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
|
|
@ -37,16 +37,16 @@
|
|||
|
||||
### Docker Compose
|
||||
|
||||
1. 克隆 Flowise 項目
|
||||
2. 進入項目根目錄的 `docker` 文件夾
|
||||
3. 複製 `.env.example` 文件,粘貼到相同位置,並重命名為 `.env` 文件
|
||||
1. 複製 Flowise 專案
|
||||
2. 進入專案根目錄的 `docker` 資料夾
|
||||
3. 複製 `.env.example` 文件,貼到相同位置,並重新命名為 `.env` 文件
|
||||
4. `docker compose up -d`
|
||||
5. 打開 [http://localhost:3000](http://localhost:3000)
|
||||
6. 您可以通過 `docker compose stop` 停止容器
|
||||
6. 您可以透過 `docker compose stop` 停止容器
|
||||
|
||||
### Docker 映像
|
||||
|
||||
1. 本地構建映像:
|
||||
1. 本地建置映像:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
|
|
@ -63,7 +63,7 @@
|
|||
|
||||
## 👨💻 開發者
|
||||
|
||||
Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
||||
Flowise 在單個 mono 儲存庫中有 3 個不同的模組。
|
||||
|
||||
- `server`: 提供 API 邏輯的 Node 後端
|
||||
- `ui`: React 前端
|
||||
|
|
@ -79,33 +79,33 @@ Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
|||
|
||||
### 設置
|
||||
|
||||
1. 克隆存儲庫
|
||||
1. 複製儲存庫
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. 進入存儲庫文件夾
|
||||
2. 進入儲存庫文件夾
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. 安裝所有模塊的所有依賴項:
|
||||
3. 安裝所有模組的所有依賴項:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. 構建所有代碼:
|
||||
4. 建置所有程式碼:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>退出代碼 134(JavaScript 堆內存不足)</summary>
|
||||
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 堆大小並重新運行腳本:
|
||||
<summary>Exit code 134(JavaScript heap out of memory)</summary>
|
||||
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 中的 Heap 記憶體大小並重新運行腳本:
|
||||
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
pnpm build
|
||||
|
|
@ -118,9 +118,9 @@ Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
|||
pnpm start
|
||||
```
|
||||
|
||||
您現在可以訪問 [http://localhost:3000](http://localhost:3000)
|
||||
您現在可以開啟 [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
6. 對於開發構建:
|
||||
6. 對於開發建置:
|
||||
|
||||
- 在 `packages/ui` 中創建 `.env` 文件並指定 `VITE_PORT`(參考 `.env.example`)
|
||||
- 在 `packages/server` 中創建 `.env` 文件並指定 `PORT`(參考 `.env.example`)
|
||||
|
|
@ -130,19 +130,19 @@ Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
|||
pnpm dev
|
||||
```
|
||||
|
||||
任何代碼更改都會自動重新加載應用程序 [http://localhost:8080](http://localhost:8080)
|
||||
任何程式碼更改都會自動重新加載應用程式 [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
## 🌱 環境變量
|
||||
## 🌱 環境變數
|
||||
|
||||
Flowise 支持不同的環境變量來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變量。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
Flowise 支持不同的環境變數來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變數。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 文檔
|
||||
|
||||
[Flowise 文檔](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 自我托管
|
||||
## 🌐 自行架設
|
||||
|
||||
在您現有的基礎設施中部署 Flowise 自我托管,我們支持各種 [部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
在您現有的基礎設施中部署 Flowise,我們支持各種自行架設選項 [部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/configuration/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/configuration/deployment/azure)
|
||||
|
|
@ -178,9 +178,9 @@ Flowise 支持不同的環境變量來配置您的實例。您可以在 `package
|
|||
|
||||
</details>
|
||||
|
||||
## ☁️ Flowise 雲
|
||||
## ☁️ Flowise 雲端平台
|
||||
|
||||
[開始使用 Flowise 雲](https://flowiseai.com/)
|
||||
[開始使用 Flowise 雲端平台](https://flowiseai.com/)
|
||||
|
||||
## 🙋 支持
|
||||
|
||||
|
|
@ -194,9 +194,9 @@ Flowise 支持不同的環境變量來配置您的實例。您可以在 `package
|
|||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
請參閱 [貢獻指南](../CONTRIBUTING.md)。如果您有任何問題或問題,請通過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
|
||||
請參閱 [貢獻指南](../CONTRIBUTING.md)。如果您有任何問題或問題,請透過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 許可證
|
||||
|
||||
此存儲庫中的源代碼根據 [Apache 許可證版本 2.0](../LICENSE.md) 提供。
|
||||
此儲存庫中的原始碼根據 [Apache 2.0 授權條款](../LICENSE.md) 授權使用。
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
version: "2"
|
||||
services:
|
||||
otel-collector:
|
||||
read_only: true
|
||||
image: otel/opentelemetry-collector-contrib
|
||||
command: ["--config=/etc/otelcol-contrib/config.yaml", "--feature-gates=-exporter.datadogexporter.DisableAPMStats", "${OTELCOL_ARGS}"]
|
||||
volumes:
|
||||
|
|
|
|||
16
package.json
16
package.json
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "3.0.2",
|
||||
"version": "3.0.11",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
@ -51,7 +51,7 @@
|
|||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
"husky": "^8.0.1",
|
||||
"kill-port": "^2.0.1",
|
||||
"kill-port": "2.0.1",
|
||||
"lint-staged": "^13.0.3",
|
||||
"prettier": "^2.7.1",
|
||||
"pretty-quick": "^3.1.3",
|
||||
|
|
@ -66,20 +66,26 @@
|
|||
"sqlite3"
|
||||
],
|
||||
"overrides": {
|
||||
"axios": "1.7.9",
|
||||
"axios": "1.12.0",
|
||||
"body-parser": "2.0.2",
|
||||
"braces": "3.0.3",
|
||||
"cross-spawn": "7.0.6",
|
||||
"form-data": "4.0.4",
|
||||
"glob-parent": "6.0.2",
|
||||
"http-proxy-middleware": "3.0.3",
|
||||
"json5": "2.2.3",
|
||||
"nth-check": "2.1.1",
|
||||
"path-to-regexp": "0.1.12",
|
||||
"prismjs": "1.29.0",
|
||||
"rollup": "4.45.0",
|
||||
"semver": "7.7.1",
|
||||
"set-value": "4.1.0",
|
||||
"solid-js": "1.9.7",
|
||||
"tar-fs": "3.1.0",
|
||||
"unset-value": "2.0.1",
|
||||
"webpack-dev-middleware": "7.4.2"
|
||||
"webpack-dev-middleware": "7.4.2",
|
||||
"ws": "8.18.3",
|
||||
"xlsx": "https://cdn.sheetjs.com/xlsx-0.20.3/xlsx-0.20.3.tgz"
|
||||
}
|
||||
},
|
||||
"engines": {
|
||||
|
|
@ -89,7 +95,7 @@
|
|||
"resolutions": {
|
||||
"@google/generative-ai": "^0.24.0",
|
||||
"@grpc/grpc-js": "^1.10.10",
|
||||
"@langchain/core": "0.3.37",
|
||||
"@langchain/core": "0.3.61",
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.6",
|
||||
"openai": "4.96.0",
|
||||
"protobufjs": "7.4.0"
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-api",
|
||||
"version": "1.0.2",
|
||||
"version": "1.0.3",
|
||||
"description": "Flowise API documentation server",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeCredential, INodeParams } from '../src/Interface'
|
||||
|
||||
class CometApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Comet API'
|
||||
this.name = 'cometApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Comet API Key',
|
||||
name: 'cometApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: CometApi }
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ElevenLabsApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Eleven Labs API'
|
||||
this.name = 'elevenLabsApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Sign up for a Eleven Labs account and <a target="_blank" href="https://elevenlabs.io/app/settings/api-keys">create an API Key</a>.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Eleven Labs API Key',
|
||||
name: 'elevenLabsApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ElevenLabsApi }
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class OxylabsApiCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Oxylabs API'
|
||||
this.name = 'oxylabsApi'
|
||||
this.version = 1.0
|
||||
this.description = 'Oxylabs API credentials description, to add more info'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Oxylabs Username',
|
||||
name: 'username',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Oxylabs Password',
|
||||
name: 'password',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: OxylabsApiCredential }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class SambanovaApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Sambanova API'
|
||||
this.name = 'sambanovaApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Sambanova Api Key',
|
||||
name: 'sambanovaApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: SambanovaApi }
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataBearerTokenCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
description: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata Bearer Token'
|
||||
this.name = 'teradataBearerToken'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-Vector-Store-User-Guide/Setting-up-Vector-Store/Importing-Modules-Required-for-Vector-Store">official guide</a> on how to get Teradata Bearer Token'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Token',
|
||||
name: 'token',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataBearerTokenCredential }
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataTD2Credential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata TD2 Auth'
|
||||
this.name = 'teradataTD2Auth'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Teradata TD2 Auth Username',
|
||||
name: 'tdUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Teradata TD2 Auth Password',
|
||||
name: 'tdPassword',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataTD2Credential }
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataVectorStoreApiCredentials implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata Vector Store API Credentials'
|
||||
this.name = 'teradataVectorStoreApiCredentials'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Teradata Host IP',
|
||||
name: 'tdHostIp',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Username',
|
||||
name: 'tdUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'tdPassword',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Vector_Store_Base_URL',
|
||||
name: 'baseURL',
|
||||
description: 'Teradata Vector Store Base URL',
|
||||
placeholder: `Base_URL`,
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'JWT Token',
|
||||
name: 'jwtToken',
|
||||
type: 'password',
|
||||
description: 'Bearer token for JWT authentication',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataVectorStoreApiCredentials }
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
roots: ['<rootDir>/nodes'],
|
||||
transform: {
|
||||
'^.+\\.tsx?$': 'ts-jest'
|
||||
},
|
||||
testRegex: '(/__tests__/.*|(\\.|/)(test|spec))\\.tsx?$',
|
||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
|
||||
verbose: true,
|
||||
testPathIgnorePatterns: ['/node_modules/', '/dist/'],
|
||||
moduleNameMapper: {
|
||||
'^../../../src/(.*)$': '<rootDir>/src/$1'
|
||||
}
|
||||
}
|
||||
|
|
@ -3,6 +3,48 @@
|
|||
{
|
||||
"name": "awsChatBedrock",
|
||||
"models": [
|
||||
{
|
||||
"label": "anthropic.claude-opus-4-5-20251101-v1:0",
|
||||
"name": "anthropic.claude-opus-4-5-20251101-v1:0",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"name": "anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"description": "Claude 4.5 Sonnet",
|
||||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"name": "anthropic.claude-haiku-4-5-20251001-v1:0",
|
||||
"description": "Claude 4.5 Haiku",
|
||||
"input_cost": 0.000001,
|
||||
"output_cost": 0.000005
|
||||
},
|
||||
{
|
||||
"label": "openai.gpt-oss-20b-1:0",
|
||||
"name": "openai.gpt-oss-20b-1:0",
|
||||
"description": "21B parameters model optimized for lower latency, local, and specialized use cases",
|
||||
"input_cost": 0.00007,
|
||||
"output_cost": 0.0003
|
||||
},
|
||||
{
|
||||
"label": "openai.gpt-oss-120b-1:0",
|
||||
"name": "openai.gpt-oss-120b-1:0",
|
||||
"description": "120B parameters model optimized for production, general purpose, and high-reasoning use cases",
|
||||
"input_cost": 0.00015,
|
||||
"output_cost": 0.0006
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"name": "anthropic.claude-opus-4-1-20250805-v1:0",
|
||||
"description": "Claude 4.1 Opus",
|
||||
"input_cost": 0.000015,
|
||||
"output_cost": 0.000075
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"name": "anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
|
|
@ -280,6 +322,30 @@
|
|||
{
|
||||
"name": "azureChatOpenAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gpt-5.1",
|
||||
"name": "gpt-5.1",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5",
|
||||
"name": "gpt-5",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-mini",
|
||||
"name": "gpt-5-mini",
|
||||
"input_cost": 0.00000025,
|
||||
"output_cost": 0.000002
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-nano",
|
||||
"name": "gpt-5-nano",
|
||||
"input_cost": 0.00000005,
|
||||
"output_cost": 0.0000004
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1",
|
||||
"name": "gpt-4.1",
|
||||
|
|
@ -357,6 +423,18 @@
|
|||
"name": "gpt-4.5-preview",
|
||||
"input_cost": 0.000075,
|
||||
"output_cost": 0.00015
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"input_cost": 0.0000004,
|
||||
"output_cost": 0.0000016
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-chat-latest",
|
||||
"name": "gpt-5-chat-latest",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
@ -416,12 +494,45 @@
|
|||
"name": "gpt-4-1106-preview",
|
||||
"input_cost": 0.00001,
|
||||
"output_cost": 0.00003
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"input_cost": 0.0000004,
|
||||
"output_cost": 0.0000016
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-chat-latest",
|
||||
"name": "gpt-5-chat-latest",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "chatAnthropic",
|
||||
"models": [
|
||||
{
|
||||
"label": "claude-opus-4-5",
|
||||
"name": "claude-opus-4-5",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-5",
|
||||
"name": "claude-sonnet-4-5",
|
||||
"description": "Claude 4.5 Sonnet",
|
||||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "claude-haiku-4-5",
|
||||
"name": "claude-haiku-4-5",
|
||||
"description": "Claude 4.5 Haiku",
|
||||
"input_cost": 0.000001,
|
||||
"output_cost": 0.000005
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-0",
|
||||
"name": "claude-sonnet-4-0",
|
||||
|
|
@ -429,6 +540,13 @@
|
|||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-1",
|
||||
"name": "claude-opus-4-1",
|
||||
"description": "Claude 4.1 Opus",
|
||||
"input_cost": 0.000015,
|
||||
"output_cost": 0.000075
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-0",
|
||||
"name": "claude-opus-4-0",
|
||||
|
|
@ -524,17 +642,41 @@
|
|||
"name": "chatGoogleGenerativeAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-2.5-flash-preview-05-20",
|
||||
"name": "gemini-2.5-flash-preview-05-20",
|
||||
"input_cost": 0.15e-6,
|
||||
"output_cost": 6e-7
|
||||
"label": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro-preview-03-25",
|
||||
"name": "gemini-2.5-pro-preview-03-25",
|
||||
"label": "gemini-3-pro-image-preview",
|
||||
"name": "gemini-3-pro-image-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro",
|
||||
"name": "gemini-2.5-pro",
|
||||
"input_cost": 0.3e-6,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash",
|
||||
"name": "gemini-2.5-flash",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-image",
|
||||
"name": "gemini-2.5-flash-image",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-lite",
|
||||
"name": "gemini-2.5-flash-lite",
|
||||
"input_cost": 1e-7,
|
||||
"output_cost": 4e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash",
|
||||
|
|
@ -581,6 +723,42 @@
|
|||
{
|
||||
"name": "chatGoogleVertexAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro",
|
||||
"name": "gemini-2.5-pro",
|
||||
"input_cost": 0.3e-6,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash",
|
||||
"name": "gemini-2.5-flash",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-lite",
|
||||
"name": "gemini-2.5-flash-lite",
|
||||
"input_cost": 1e-7,
|
||||
"output_cost": 4e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.0-flash",
|
||||
"name": "gemini-2.0-flash-001",
|
||||
"input_cost": 1e-7,
|
||||
"output_cost": 4e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.0-flash-lite",
|
||||
"name": "gemini-2.0-flash-lite-001",
|
||||
"input_cost": 7.5e-8,
|
||||
"output_cost": 3e-7
|
||||
},
|
||||
{
|
||||
"label": "gemini-1.5-flash-002",
|
||||
"name": "gemini-1.5-flash-002",
|
||||
|
|
@ -617,6 +795,34 @@
|
|||
"input_cost": 1.25e-7,
|
||||
"output_cost": 3.75e-7
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-5@20251101",
|
||||
"name": "claude-opus-4-5@20251101",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-5@20250929",
|
||||
"name": "claude-sonnet-4-5@20250929",
|
||||
"description": "Claude 4.5 Sonnet",
|
||||
"input_cost": 0.000003,
|
||||
"output_cost": 0.000015
|
||||
},
|
||||
{
|
||||
"label": "claude-haiku-4-5@20251001",
|
||||
"name": "claude-haiku-4-5@20251001",
|
||||
"description": "Claude 4.5 Haiku",
|
||||
"input_cost": 0.000001,
|
||||
"output_cost": 0.000005
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-1@20250805",
|
||||
"name": "claude-opus-4-1@20250805",
|
||||
"description": "Claude 4.1 Opus",
|
||||
"input_cost": 0.000015,
|
||||
"output_cost": 0.000075
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4@20250514",
|
||||
"name": "claude-sonnet-4@20250514",
|
||||
|
|
@ -673,11 +879,63 @@
|
|||
"input_cost": 2.5e-7,
|
||||
"output_cost": 1.25e-6
|
||||
}
|
||||
],
|
||||
"regions": [
|
||||
{ "label": "us-east1", "name": "us-east1" },
|
||||
{ "label": "us-east4", "name": "us-east4" },
|
||||
{ "label": "us-central1", "name": "us-central1" },
|
||||
{ "label": "us-west1", "name": "us-west1" },
|
||||
{ "label": "europe-west4", "name": "europe-west4" },
|
||||
{ "label": "europe-west1", "name": "europe-west1" },
|
||||
{ "label": "europe-west3", "name": "europe-west3" },
|
||||
{ "label": "europe-west2", "name": "europe-west2" },
|
||||
{ "label": "asia-east1", "name": "asia-east1" },
|
||||
{ "label": "asia-southeast1", "name": "asia-southeast1" },
|
||||
{ "label": "asia-northeast1", "name": "asia-northeast1" },
|
||||
{ "label": "asia-south1", "name": "asia-south1" },
|
||||
{ "label": "australia-southeast1", "name": "australia-southeast1" },
|
||||
{ "label": "southamerica-east1", "name": "southamerica-east1" },
|
||||
{ "label": "africa-south1", "name": "africa-south1" },
|
||||
{ "label": "asia-east2", "name": "asia-east2" },
|
||||
{ "label": "asia-northeast2", "name": "asia-northeast2" },
|
||||
{ "label": "asia-northeast3", "name": "asia-northeast3" },
|
||||
{ "label": "asia-south2", "name": "asia-south2" },
|
||||
{ "label": "asia-southeast2", "name": "asia-southeast2" },
|
||||
{ "label": "australia-southeast2", "name": "australia-southeast2" },
|
||||
{ "label": "europe-central2", "name": "europe-central2" },
|
||||
{ "label": "europe-north1", "name": "europe-north1" },
|
||||
{ "label": "europe-north2", "name": "europe-north2" },
|
||||
{ "label": "europe-southwest1", "name": "europe-southwest1" },
|
||||
{ "label": "europe-west10", "name": "europe-west10" },
|
||||
{ "label": "europe-west12", "name": "europe-west12" },
|
||||
{ "label": "europe-west6", "name": "europe-west6" },
|
||||
{ "label": "europe-west8", "name": "europe-west8" },
|
||||
{ "label": "europe-west9", "name": "europe-west9" },
|
||||
{ "label": "me-central1", "name": "me-central1" },
|
||||
{ "label": "me-central2", "name": "me-central2" },
|
||||
{ "label": "me-west1", "name": "me-west1" },
|
||||
{ "label": "northamerica-northeast1", "name": "northamerica-northeast1" },
|
||||
{ "label": "northamerica-northeast2", "name": "northamerica-northeast2" },
|
||||
{ "label": "northamerica-south1", "name": "northamerica-south1" },
|
||||
{ "label": "southamerica-west1", "name": "southamerica-west1" },
|
||||
{ "label": "us-east5", "name": "us-east5" },
|
||||
{ "label": "us-south1", "name": "us-south1" },
|
||||
{ "label": "us-west2", "name": "us-west2" },
|
||||
{ "label": "us-west3", "name": "us-west3" },
|
||||
{ "label": "us-west4", "name": "us-west4" }
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "groqChat",
|
||||
"models": [
|
||||
{
|
||||
"label": "openai/gpt-oss-20b",
|
||||
"name": "openai/gpt-oss-20b"
|
||||
},
|
||||
{
|
||||
"label": "openai/gpt-oss-120b",
|
||||
"name": "openai/gpt-oss-120b"
|
||||
},
|
||||
{
|
||||
"label": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
||||
"name": "meta-llama/llama-4-maverick-17b-128e-instruct"
|
||||
|
|
@ -789,6 +1047,30 @@
|
|||
{
|
||||
"name": "chatOpenAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gpt-5.1",
|
||||
"name": "gpt-5.1",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5",
|
||||
"name": "gpt-5",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-mini",
|
||||
"name": "gpt-5-mini",
|
||||
"input_cost": 0.00000025,
|
||||
"output_cost": 0.000002
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-nano",
|
||||
"name": "gpt-5-nano",
|
||||
"input_cost": 0.00000005,
|
||||
"output_cost": 0.0000004
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1",
|
||||
"name": "gpt-4.1",
|
||||
|
|
@ -1523,6 +1805,18 @@
|
|||
"name": "gpt-4-32k",
|
||||
"input_cost": 0.00006,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gpt-4.1-mini",
|
||||
"name": "gpt-4.1-mini",
|
||||
"input_cost": 0.0000004,
|
||||
"output_cost": 0.0000016
|
||||
},
|
||||
{
|
||||
"label": "gpt-5-chat-latest",
|
||||
"name": "gpt-5-chat-latest",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
@ -1723,29 +2017,65 @@
|
|||
"name": "googlevertexaiEmbeddings",
|
||||
"models": [
|
||||
{
|
||||
"label": "multimodalembedding",
|
||||
"name": "multimodalembedding"
|
||||
"label": "gemini-embedding-001",
|
||||
"name": "gemini-embedding-001"
|
||||
},
|
||||
{
|
||||
"label": "text-embedding-004",
|
||||
"name": "text-embedding-004"
|
||||
},
|
||||
{
|
||||
"label": "text-embedding-005",
|
||||
"name": "text-embedding-005"
|
||||
},
|
||||
{
|
||||
"label": "text-multilingual-embedding-002",
|
||||
"name": "text-multilingual-embedding-002"
|
||||
},
|
||||
{
|
||||
"label": "textembedding-gecko@001",
|
||||
"name": "textembedding-gecko@001"
|
||||
},
|
||||
{
|
||||
"label": "textembedding-gecko@latest",
|
||||
"name": "textembedding-gecko@latest"
|
||||
},
|
||||
{
|
||||
"label": "textembedding-gecko-multilingual@latest",
|
||||
"name": "textembedding-gecko-multilingual@latest"
|
||||
}
|
||||
],
|
||||
"regions": [
|
||||
{ "label": "us-east1", "name": "us-east1" },
|
||||
{ "label": "us-east4", "name": "us-east4" },
|
||||
{ "label": "us-central1", "name": "us-central1" },
|
||||
{ "label": "us-west1", "name": "us-west1" },
|
||||
{ "label": "europe-west4", "name": "europe-west4" },
|
||||
{ "label": "europe-west1", "name": "europe-west1" },
|
||||
{ "label": "europe-west3", "name": "europe-west3" },
|
||||
{ "label": "europe-west2", "name": "europe-west2" },
|
||||
{ "label": "asia-east1", "name": "asia-east1" },
|
||||
{ "label": "asia-southeast1", "name": "asia-southeast1" },
|
||||
{ "label": "asia-northeast1", "name": "asia-northeast1" },
|
||||
{ "label": "asia-south1", "name": "asia-south1" },
|
||||
{ "label": "australia-southeast1", "name": "australia-southeast1" },
|
||||
{ "label": "southamerica-east1", "name": "southamerica-east1" },
|
||||
{ "label": "africa-south1", "name": "africa-south1" },
|
||||
{ "label": "asia-east2", "name": "asia-east2" },
|
||||
{ "label": "asia-northeast2", "name": "asia-northeast2" },
|
||||
{ "label": "asia-northeast3", "name": "asia-northeast3" },
|
||||
{ "label": "asia-south2", "name": "asia-south2" },
|
||||
{ "label": "asia-southeast2", "name": "asia-southeast2" },
|
||||
{ "label": "australia-southeast2", "name": "australia-southeast2" },
|
||||
{ "label": "europe-central2", "name": "europe-central2" },
|
||||
{ "label": "europe-north1", "name": "europe-north1" },
|
||||
{ "label": "europe-north2", "name": "europe-north2" },
|
||||
{ "label": "europe-southwest1", "name": "europe-southwest1" },
|
||||
{ "label": "europe-west10", "name": "europe-west10" },
|
||||
{ "label": "europe-west12", "name": "europe-west12" },
|
||||
{ "label": "europe-west6", "name": "europe-west6" },
|
||||
{ "label": "europe-west8", "name": "europe-west8" },
|
||||
{ "label": "europe-west9", "name": "europe-west9" },
|
||||
{ "label": "me-central1", "name": "me-central1" },
|
||||
{ "label": "me-central2", "name": "me-central2" },
|
||||
{ "label": "me-west1", "name": "me-west1" },
|
||||
{ "label": "northamerica-northeast1", "name": "northamerica-northeast1" },
|
||||
{ "label": "northamerica-northeast2", "name": "northamerica-northeast2" },
|
||||
{ "label": "northamerica-south1", "name": "northamerica-south1" },
|
||||
{ "label": "southamerica-west1", "name": "southamerica-west1" },
|
||||
{ "label": "us-east5", "name": "us-east5" },
|
||||
{ "label": "us-south1", "name": "us-south1" },
|
||||
{ "label": "us-west2", "name": "us-west2" },
|
||||
{ "label": "us-west3", "name": "us-west3" },
|
||||
{ "label": "us-west4", "name": "us-west4" }
|
||||
]
|
||||
},
|
||||
{
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,4 +1,5 @@
|
|||
import { CommonType, ICommonObject, ICondition, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import removeMarkdown from 'remove-markdown'
|
||||
|
||||
class Condition_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -300,8 +301,8 @@ class Condition_Agentflow implements INode {
|
|||
value2 = parseFloat(_value2 as string) || 0
|
||||
break
|
||||
default: // string
|
||||
value1 = _value1 as string
|
||||
value2 = _value2 as string
|
||||
value1 = removeMarkdown((_value1 as string) || '')
|
||||
value2 = removeMarkdown((_value2 as string) || '')
|
||||
}
|
||||
|
||||
const compareOperationResult = compareOperationFunctions[operation](value1, value2)
|
||||
|
|
@ -316,7 +317,7 @@ class Condition_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
// If no condition is fullfilled, add isFulfilled to the ELSE condition
|
||||
// If no condition is fulfilled, add isFulfilled to the ELSE condition
|
||||
const dummyElseConditionData = {
|
||||
type: 'string',
|
||||
value1: '',
|
||||
|
|
|
|||
|
|
@ -8,8 +8,7 @@ import {
|
|||
INodeParams,
|
||||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { availableDependencies, defaultAllowBuiltInDep, getVars, prepareSandboxVars } from '../../../src/utils'
|
||||
import { NodeVM } from '@flowiseai/nodevm'
|
||||
import { getVars, executeJavaScriptCode, createCodeExecutionSandbox, processTemplateVariables } from '../../../src/utils'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
interface ICustomFunctionInputVariables {
|
||||
|
|
@ -19,9 +18,9 @@ interface ICustomFunctionInputVariables {
|
|||
|
||||
const exampleFunc = `/*
|
||||
* You can use any libraries imported in Flowise
|
||||
* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid
|
||||
* You can use properties specified in Input Variables with the prefix $. For example: $foo
|
||||
* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state
|
||||
* You can get custom variables: $vars.<variable-name>
|
||||
* You can get global variables: $vars.<variable-name>
|
||||
* Must return a string value at the end of function
|
||||
*/
|
||||
|
||||
|
|
@ -61,7 +60,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Custom Function'
|
||||
this.name = 'customFunctionAgentflow'
|
||||
this.version = 1.0
|
||||
this.version = 1.1
|
||||
this.type = 'CustomFunction'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute custom function'
|
||||
|
|
@ -108,8 +107,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -135,7 +133,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const javascriptFunction = nodeData.inputs?.customFunctionJavascriptFunction as string
|
||||
const functionInputVariables = nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]
|
||||
const functionInputVariables = (nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]) ?? []
|
||||
const _customFunctionUpdateState = nodeData.inputs?.customFunctionUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
|
@ -146,78 +144,57 @@ class CustomFunction_Agentflow implements INode {
|
|||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _customFunctionUpdateState)
|
||||
}
|
||||
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, options)
|
||||
const flow = {
|
||||
input,
|
||||
state,
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
input,
|
||||
state: newState
|
||||
rawOutput: options.postProcessing?.rawOutput || '',
|
||||
chatHistory: options.postProcessing?.chatHistory || [],
|
||||
sourceDocuments: options.postProcessing?.sourceDocuments,
|
||||
usedTools: options.postProcessing?.usedTools,
|
||||
artifacts: options.postProcessing?.artifacts,
|
||||
fileAnnotations: options.postProcessing?.fileAnnotations
|
||||
}
|
||||
|
||||
let sandbox: any = {
|
||||
$input: input,
|
||||
util: undefined,
|
||||
Symbol: undefined,
|
||||
child_process: undefined,
|
||||
fs: undefined,
|
||||
process: undefined
|
||||
}
|
||||
sandbox['$vars'] = prepareSandboxVars(variables)
|
||||
sandbox['$flow'] = flow
|
||||
|
||||
// Create additional sandbox variables for custom function inputs
|
||||
const additionalSandbox: ICommonObject = {}
|
||||
for (const item of functionInputVariables) {
|
||||
const variableName = item.variableName
|
||||
const variableValue = item.variableValue
|
||||
sandbox[`$${variableName}`] = variableValue
|
||||
additionalSandbox[`$${variableName}`] = variableValue
|
||||
}
|
||||
|
||||
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
|
||||
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
|
||||
: defaultAllowBuiltInDep
|
||||
const externalDeps = process.env.TOOL_FUNCTION_EXTERNAL_DEP ? process.env.TOOL_FUNCTION_EXTERNAL_DEP.split(',') : []
|
||||
const deps = availableDependencies.concat(externalDeps)
|
||||
const sandbox = createCodeExecutionSandbox(input, variables, flow, additionalSandbox)
|
||||
|
||||
const nodeVMOptions = {
|
||||
console: 'inherit',
|
||||
sandbox,
|
||||
require: {
|
||||
external: { modules: deps },
|
||||
builtin: builtinDeps
|
||||
},
|
||||
eval: false,
|
||||
wasm: false,
|
||||
timeout: 10000
|
||||
} as any
|
||||
// Setup streaming function if needed
|
||||
const streamOutput = isStreamable
|
||||
? (output: string) => {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, output)
|
||||
}
|
||||
: undefined
|
||||
|
||||
const vm = new NodeVM(nodeVMOptions)
|
||||
try {
|
||||
const response = await vm.run(`module.exports = async function() {${javascriptFunction}}()`, __dirname)
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox, {
|
||||
libraries: ['axios'],
|
||||
streamOutput
|
||||
})
|
||||
|
||||
let finalOutput = response
|
||||
if (typeof response === 'object') {
|
||||
finalOutput = JSON.stringify(response, null, 2)
|
||||
}
|
||||
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, finalOutput)
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _customFunctionUpdateState)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
newState = processTemplateVariables(newState, finalOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import {
|
|||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import axios, { AxiosRequestConfig } from 'axios'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getCredentialData, getCredentialParam, processTemplateVariables, parseJsonBody } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
|
@ -30,7 +30,7 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Execute Flow'
|
||||
this.name = 'executeFlowAgentflow'
|
||||
this.version = 1.0
|
||||
this.version = 1.2
|
||||
this.type = 'ExecuteFlow'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute another flow'
|
||||
|
|
@ -62,7 +62,8 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
name: 'executeFlowOverrideConfig',
|
||||
description: 'Override the config passed to the flow',
|
||||
type: 'json',
|
||||
optional: true
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
|
|
@ -101,8 +102,7 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -162,12 +162,15 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
const flowInput = nodeData.inputs?.executeFlowInput as string
|
||||
const returnResponseAs = nodeData.inputs?.executeFlowReturnResponseAs as string
|
||||
const _executeFlowUpdateState = nodeData.inputs?.executeFlowUpdateState
|
||||
const overrideConfig =
|
||||
typeof nodeData.inputs?.executeFlowOverrideConfig === 'string' &&
|
||||
nodeData.inputs.executeFlowOverrideConfig.startsWith('{') &&
|
||||
nodeData.inputs.executeFlowOverrideConfig.endsWith('}')
|
||||
? JSON.parse(nodeData.inputs.executeFlowOverrideConfig)
|
||||
: nodeData.inputs?.executeFlowOverrideConfig
|
||||
|
||||
let overrideConfig = nodeData.inputs?.executeFlowOverrideConfig
|
||||
if (typeof overrideConfig === 'string' && overrideConfig.startsWith('{') && overrideConfig.endsWith('}')) {
|
||||
try {
|
||||
overrideConfig = parseJsonBody(overrideConfig)
|
||||
} catch (parseError) {
|
||||
throw new Error(`Invalid JSON in executeFlowOverrideConfig: ${parseError.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
|
|
@ -181,7 +184,8 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
if (selectedFlowId === options.chatflowid) throw new Error('Cannot call the same agentflow!')
|
||||
|
||||
let headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json'
|
||||
'Content-Type': 'application/json',
|
||||
'flowise-tool': 'true'
|
||||
}
|
||||
if (chatflowApiKey) headers = { ...headers, Authorization: `Bearer ${chatflowApiKey}` }
|
||||
|
||||
|
|
@ -215,13 +219,7 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = resultText
|
||||
}
|
||||
}
|
||||
}
|
||||
newState = processTemplateVariables(newState, resultText)
|
||||
|
||||
// Only add to runtime chat history if this is the first node
|
||||
const inputMessages = []
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import axios, { AxiosRequestConfig, Method, ResponseType } from 'axios'
|
||||
import { AxiosRequestConfig, Method, ResponseType } from 'axios'
|
||||
import FormData from 'form-data'
|
||||
import * as querystring from 'querystring'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getCredentialData, getCredentialParam, parseJsonBody } from '../../../src/utils'
|
||||
import { secureAxiosRequest } from '../../../src/httpSecurity'
|
||||
|
||||
class HTTP_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -18,37 +19,6 @@ class HTTP_Agentflow implements INode {
|
|||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
private sanitizeJsonString(jsonString: string): string {
|
||||
// Remove common problematic escape sequences that are not valid JSON
|
||||
let sanitized = jsonString
|
||||
// Remove escaped square brackets (not valid JSON)
|
||||
.replace(/\\(\[|\])/g, '$1')
|
||||
// Fix unquoted string values in JSON (simple case)
|
||||
.replace(/:\s*([a-zA-Z][a-zA-Z0-9]*)\s*([,}])/g, ': "$1"$2')
|
||||
// Fix trailing commas
|
||||
.replace(/,(\s*[}\]])/g, '$1')
|
||||
|
||||
return sanitized
|
||||
}
|
||||
|
||||
private parseJsonBody(body: string): any {
|
||||
try {
|
||||
// First try to parse as-is
|
||||
return JSON.parse(body)
|
||||
} catch (error) {
|
||||
try {
|
||||
// If that fails, try to sanitize and parse
|
||||
const sanitized = this.sanitizeJsonString(body)
|
||||
return JSON.parse(sanitized)
|
||||
} catch (sanitizeError) {
|
||||
// If sanitization also fails, throw the original error with helpful message
|
||||
throw new Error(
|
||||
`Invalid JSON format in body. Original error: ${error.message}. Please ensure your JSON is properly formatted with quoted strings and valid escape sequences.`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
constructor() {
|
||||
this.label = 'HTTP'
|
||||
this.name = 'httpAgentflow'
|
||||
|
|
@ -97,7 +67,8 @@ class HTTP_Agentflow implements INode {
|
|||
{
|
||||
label: 'URL',
|
||||
name: 'url',
|
||||
type: 'string'
|
||||
type: 'string',
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Headers',
|
||||
|
|
@ -304,7 +275,7 @@ class HTTP_Agentflow implements INode {
|
|||
if (method !== 'GET' && body) {
|
||||
switch (bodyType) {
|
||||
case 'json': {
|
||||
requestConfig.data = typeof body === 'string' ? this.parseJsonBody(body) : body
|
||||
requestConfig.data = typeof body === 'string' ? parseJsonBody(body) : body
|
||||
requestHeaders['Content-Type'] = 'application/json'
|
||||
break
|
||||
}
|
||||
|
|
@ -322,14 +293,14 @@ class HTTP_Agentflow implements INode {
|
|||
break
|
||||
}
|
||||
case 'xWwwFormUrlencoded':
|
||||
requestConfig.data = querystring.stringify(typeof body === 'string' ? this.parseJsonBody(body) : body)
|
||||
requestConfig.data = querystring.stringify(typeof body === 'string' ? parseJsonBody(body) : body)
|
||||
requestHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make the HTTP request
|
||||
const response = await axios(requestConfig)
|
||||
// Make the secure HTTP request that validates all URLs in redirect chains
|
||||
const response = await secureAxiosRequest(requestConfig)
|
||||
|
||||
// Process response based on response type
|
||||
let responseData
|
||||
|
|
|
|||
|
|
@ -208,7 +208,7 @@ class HumanInput_Agentflow implements INode {
|
|||
humanInputDescription = (nodeData.inputs?.humanInputDescription as string) || 'Do you want to proceed?'
|
||||
const messages = [...pastChatHistory, ...runtimeChatHistory]
|
||||
// Find the last message in the messages array
|
||||
const lastMessage = (messages[messages.length - 1] as any).content || ''
|
||||
const lastMessage = messages.length > 0 ? (messages[messages.length - 1] as any).content || '' : ''
|
||||
humanInputDescription = `${lastMessage}\n\n${humanInputDescription}`
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
|
|
@ -241,8 +241,11 @@ class HumanInput_Agentflow implements INode {
|
|||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
for await (const chunk of await llmNodeInstance.stream(messages)) {
|
||||
sseStreamer.streamTokenEvent(chatId, chunk.content.toString())
|
||||
response = response.concat(chunk)
|
||||
const content = typeof chunk === 'string' ? chunk : chunk.content.toString()
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
}
|
||||
humanInputDescription = response.content as string
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { parseJsonBody } from '../../../src/utils'
|
||||
|
||||
class Iteration_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -39,12 +40,17 @@ class Iteration_Agentflow implements INode {
|
|||
const iterationInput = nodeData.inputs?.iterationInput
|
||||
|
||||
// Helper function to clean JSON strings with redundant backslashes
|
||||
const cleanJsonString = (str: string): string => {
|
||||
return str.replace(/\\(["'[\]{}])/g, '$1')
|
||||
const safeParseJson = (str: string): string => {
|
||||
try {
|
||||
return parseJsonBody(str)
|
||||
} catch {
|
||||
// Try parsing after cleaning
|
||||
return parseJsonBody(str.replace(/\\(["'[\]{}])/g, '$1'))
|
||||
}
|
||||
}
|
||||
|
||||
const iterationInputArray =
|
||||
typeof iterationInput === 'string' && iterationInput !== '' ? JSON.parse(cleanJsonString(iterationInput)) : iterationInput
|
||||
typeof iterationInput === 'string' && iterationInput !== '' ? safeParseJson(iterationInput) : iterationInput
|
||||
|
||||
if (!iterationInputArray || !Array.isArray(iterationInputArray)) {
|
||||
throw new Error('Invalid input array')
|
||||
|
|
|
|||
|
|
@ -2,17 +2,20 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { ICommonObject, IMessage, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages'
|
||||
import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
|
||||
import { z } from 'zod'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { ILLMMessage, IStructuredOutput } from '../Interface.Agentflow'
|
||||
import { ILLMMessage } from '../Interface.Agentflow'
|
||||
import {
|
||||
addImageArtifactsToMessages,
|
||||
extractArtifactsFromResponse,
|
||||
getPastChatHistoryImageMessages,
|
||||
getUniqueImageMessages,
|
||||
processMessagesWithImages,
|
||||
replaceBase64ImagesWithFileReferences,
|
||||
replaceInlineDataWithFileReferences,
|
||||
updateFlowState
|
||||
} from '../utils'
|
||||
import { get } from 'lodash'
|
||||
import { processTemplateVariables, configureStructuredOutput } from '../../../src/utils'
|
||||
import { flatten } from 'lodash'
|
||||
|
||||
class LLM_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -31,7 +34,7 @@ class LLM_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'LLM'
|
||||
this.name = 'llmAgentflow'
|
||||
this.version = 1.0
|
||||
this.version = 1.1
|
||||
this.type = 'LLM'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Large language models to analyze user-provided inputs and generate responses'
|
||||
|
|
@ -287,8 +290,7 @@ class LLM_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -399,7 +401,11 @@ class LLM_Agentflow implements INode {
|
|||
const role = msg.role
|
||||
const content = msg.content
|
||||
if (role && content) {
|
||||
messages.push({ role, content })
|
||||
if (role === 'system') {
|
||||
messages.unshift({ role, content })
|
||||
} else {
|
||||
messages.push({ role, content })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -444,10 +450,16 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
delete nodeData.inputs?.llmMessages
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant responses as user messages
|
||||
* Images are converted from FILE-STORAGE::<image_path> to base 64 image_url format
|
||||
*/
|
||||
await addImageArtifactsToMessages(messages, options)
|
||||
|
||||
// Configure structured output if specified
|
||||
const isStructuredOutput = _llmStructuredOutput && Array.isArray(_llmStructuredOutput) && _llmStructuredOutput.length > 0
|
||||
if (isStructuredOutput) {
|
||||
llmNodeInstance = this.configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
llmNodeInstance = configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
}
|
||||
|
||||
// Initialize response and determine if streaming is possible
|
||||
|
|
@ -463,9 +475,11 @@ class LLM_Agentflow implements INode {
|
|||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
/*
|
||||
* Invoke LLM
|
||||
*/
|
||||
if (isStreamable) {
|
||||
response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
|
|
@ -474,11 +488,15 @@ class LLM_Agentflow implements INode {
|
|||
// Stream whole response back to UI if this is the last node
|
||||
if (isLastNode && options.sseStreamer) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
let responseContent = JSON.stringify(response, null, 2)
|
||||
if (typeof response.content === 'string') {
|
||||
responseContent = response.content
|
||||
let finalResponse = ''
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, responseContent)
|
||||
sseStreamer.streamTokenEvent(chatId, finalResponse)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -486,6 +504,40 @@ class LLM_Agentflow implements INode {
|
|||
const endTime = Date.now()
|
||||
const timeDelta = endTime - startTime
|
||||
|
||||
// Extract artifacts and file annotations from response metadata
|
||||
let artifacts: any[] = []
|
||||
let fileAnnotations: any[] = []
|
||||
if (response.response_metadata) {
|
||||
const {
|
||||
artifacts: extractedArtifacts,
|
||||
fileAnnotations: extractedFileAnnotations,
|
||||
savedInlineImages
|
||||
} = await extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
|
||||
|
||||
if (extractedArtifacts.length > 0) {
|
||||
artifacts = extractedArtifacts
|
||||
|
||||
// Stream artifacts if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamArtifactsEvent(chatId, artifacts)
|
||||
}
|
||||
}
|
||||
|
||||
if (extractedFileAnnotations.length > 0) {
|
||||
fileAnnotations = extractedFileAnnotations
|
||||
|
||||
// Stream file annotations if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
}
|
||||
|
||||
// Replace inlineData base64 with file references in the response
|
||||
if (savedInlineImages && savedInlineImages.length > 0) {
|
||||
replaceInlineDataWithFileReferences(response, savedInlineImages)
|
||||
}
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_llmUpdateState && Array.isArray(_llmUpdateState) && _llmUpdateState.length > 0) {
|
||||
|
|
@ -505,10 +557,22 @@ class LLM_Agentflow implements INode {
|
|||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else if (response.content === '') {
|
||||
// Empty response content, this could happen when there is only image data
|
||||
finalResponse = ''
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
const output = this.prepareOutputObject(response, finalResponse, startTime, endTime, timeDelta, isStructuredOutput)
|
||||
const output = this.prepareOutputObject(
|
||||
response,
|
||||
finalResponse,
|
||||
startTime,
|
||||
endTime,
|
||||
timeDelta,
|
||||
isStructuredOutput,
|
||||
artifacts,
|
||||
fileAnnotations
|
||||
)
|
||||
|
||||
// End analytics tracking
|
||||
if (analyticHandlers && llmIds) {
|
||||
|
|
@ -520,41 +584,23 @@ class LLM_Agentflow implements INode {
|
|||
this.sendStreamingEvents(options, chatId, response)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
const stateValue = newState[key].toString()
|
||||
if (stateValue.includes('{{ output')) {
|
||||
// Handle simple output replacement
|
||||
if (stateValue === '{{ output }}') {
|
||||
newState[key] = finalResponse
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle JSON path expressions like {{ output.item1 }}
|
||||
// eslint-disable-next-line
|
||||
const match = stateValue.match(/{{[\s]*output\.([\w\.]+)[\s]*}}/)
|
||||
if (match) {
|
||||
try {
|
||||
// Parse the response if it's JSON
|
||||
const jsonResponse = typeof finalResponse === 'string' ? JSON.parse(finalResponse) : finalResponse
|
||||
// Get the value using lodash get
|
||||
const path = match[1]
|
||||
const value = get(jsonResponse, path)
|
||||
newState[key] = value ?? stateValue // Fall back to original if path not found
|
||||
} catch (e) {
|
||||
// If JSON parsing fails, keep original template
|
||||
console.warn(`Failed to parse JSON or find path in output: ${e}`)
|
||||
newState[key] = stateValue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Stream file annotations if any were extracted
|
||||
if (fileAnnotations.length > 0 && isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, finalResponse)
|
||||
|
||||
/**
|
||||
* Remove the temporarily added image artifact messages before storing
|
||||
* This is to avoid storing the actual base64 data into database
|
||||
*/
|
||||
const messagesToStore = messages.filter((msg: any) => !msg._isTemporaryImageMessage)
|
||||
|
||||
// Replace the actual messages array with one that includes the file references for images instead of base64 data
|
||||
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
|
||||
messages,
|
||||
messagesToStore,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
)
|
||||
|
|
@ -605,7 +651,13 @@ class LLM_Agentflow implements INode {
|
|||
{
|
||||
role: returnRole,
|
||||
content: finalResponse,
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id,
|
||||
...(((artifacts && artifacts.length > 0) || (fileAnnotations && fileAnnotations.length > 0)) && {
|
||||
additional_kwargs: {
|
||||
...(artifacts && artifacts.length > 0 && { artifacts }),
|
||||
...(fileAnnotations && fileAnnotations.length > 0 && { fileAnnotations })
|
||||
}
|
||||
})
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -775,59 +827,6 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures structured output for the LLM
|
||||
*/
|
||||
private configureStructuredOutput(llmNodeInstance: BaseChatModel, llmStructuredOutput: IStructuredOutput[]): BaseChatModel {
|
||||
try {
|
||||
const zodObj: ICommonObject = {}
|
||||
for (const sch of llmStructuredOutput) {
|
||||
if (sch.type === 'string') {
|
||||
zodObj[sch.key] = z.string().describe(sch.description || '')
|
||||
} else if (sch.type === 'stringArray') {
|
||||
zodObj[sch.key] = z.array(z.string()).describe(sch.description || '')
|
||||
} else if (sch.type === 'number') {
|
||||
zodObj[sch.key] = z.number().describe(sch.description || '')
|
||||
} else if (sch.type === 'boolean') {
|
||||
zodObj[sch.key] = z.boolean().describe(sch.description || '')
|
||||
} else if (sch.type === 'enum') {
|
||||
const enumValues = sch.enumValues?.split(',').map((item: string) => item.trim()) || []
|
||||
zodObj[sch.key] = z
|
||||
.enum(enumValues.length ? (enumValues as [string, ...string[]]) : ['default'])
|
||||
.describe(sch.description || '')
|
||||
} else if (sch.type === 'jsonArray') {
|
||||
const jsonSchema = sch.jsonSchema
|
||||
if (jsonSchema) {
|
||||
try {
|
||||
// Parse the JSON schema
|
||||
const schemaObj = JSON.parse(jsonSchema)
|
||||
|
||||
// Create a Zod schema from the JSON schema
|
||||
const itemSchema = this.createZodSchemaFromJSON(schemaObj)
|
||||
|
||||
// Create an array schema of the item schema
|
||||
zodObj[sch.key] = z.array(itemSchema).describe(sch.description || '')
|
||||
} catch (err) {
|
||||
console.error(`Error parsing JSON schema for ${sch.key}:`, err)
|
||||
// Fallback to generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
} else {
|
||||
// If no schema provided, use generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
}
|
||||
}
|
||||
const structuredOutput = z.object(zodObj)
|
||||
|
||||
// @ts-ignore
|
||||
return llmNodeInstance.withStructuredOutput(structuredOutput)
|
||||
} catch (exception) {
|
||||
console.error(exception)
|
||||
return llmNodeInstance
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles streaming response from the LLM
|
||||
*/
|
||||
|
|
@ -844,16 +843,20 @@ class LLM_Agentflow implements INode {
|
|||
for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) {
|
||||
if (sseStreamer) {
|
||||
let content = ''
|
||||
if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
content = chunk
|
||||
} else if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
const contents = chunk.content as MessageContentText[]
|
||||
content = contents.map((item) => item.text).join('')
|
||||
} else {
|
||||
} else if (chunk.content) {
|
||||
content = chunk.content.toString()
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
}
|
||||
|
||||
response = response.concat(chunk)
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during streaming:', error)
|
||||
|
|
@ -875,7 +878,9 @@ class LLM_Agentflow implements INode {
|
|||
startTime: number,
|
||||
endTime: number,
|
||||
timeDelta: number,
|
||||
isStructuredOutput: boolean
|
||||
isStructuredOutput: boolean,
|
||||
artifacts: any[] = [],
|
||||
fileAnnotations: any[] = []
|
||||
): any {
|
||||
const output: any = {
|
||||
content: finalResponse,
|
||||
|
|
@ -894,15 +899,27 @@ class LLM_Agentflow implements INode {
|
|||
output.usageMetadata = response.usage_metadata
|
||||
}
|
||||
|
||||
if (response.response_metadata) {
|
||||
output.responseMetadata = response.response_metadata
|
||||
}
|
||||
|
||||
if (isStructuredOutput && typeof response === 'object') {
|
||||
const structuredOutput = response as Record<string, any>
|
||||
for (const key in structuredOutput) {
|
||||
if (structuredOutput[key]) {
|
||||
if (structuredOutput[key] !== undefined && structuredOutput[key] !== null) {
|
||||
output[key] = structuredOutput[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (artifacts && artifacts.length > 0) {
|
||||
output.artifacts = flatten(artifacts)
|
||||
}
|
||||
|
||||
if (fileAnnotations && fileAnnotations.length > 0) {
|
||||
output.fileAnnotations = fileAnnotations
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
|
|
@ -913,7 +930,12 @@ class LLM_Agentflow implements INode {
|
|||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
|
||||
if (response.tool_calls) {
|
||||
sseStreamer.streamCalledToolsEvent(chatId, response.tool_calls)
|
||||
const formattedToolCalls = response.tool_calls.map((toolCall: any) => ({
|
||||
tool: toolCall.name || 'tool',
|
||||
toolInput: toolCall.args,
|
||||
toolOutput: ''
|
||||
}))
|
||||
sseStreamer.streamCalledToolsEvent(chatId, flatten(formattedToolCalls))
|
||||
}
|
||||
|
||||
if (response.usage_metadata) {
|
||||
|
|
@ -922,107 +944,6 @@ class LLM_Agentflow implements INode {
|
|||
|
||||
sseStreamer.streamEndEvent(chatId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Zod schema from a JSON schema object
|
||||
* @param jsonSchema The JSON schema object
|
||||
* @returns A Zod schema
|
||||
*/
|
||||
private createZodSchemaFromJSON(jsonSchema: any): z.ZodTypeAny {
|
||||
// If the schema is an object with properties, create an object schema
|
||||
if (typeof jsonSchema === 'object' && jsonSchema !== null) {
|
||||
const schemaObj: Record<string, z.ZodTypeAny> = {}
|
||||
|
||||
// Process each property in the schema
|
||||
for (const [key, value] of Object.entries(jsonSchema)) {
|
||||
if (value === null) {
|
||||
// Handle null values
|
||||
schemaObj[key] = z.null()
|
||||
} else if (typeof value === 'object' && !Array.isArray(value)) {
|
||||
// Check if the property has a type definition
|
||||
if ('type' in value) {
|
||||
const type = value.type as string
|
||||
const description = ('description' in value ? (value.description as string) : '') || ''
|
||||
|
||||
// Create the appropriate Zod type based on the type property
|
||||
if (type === 'string') {
|
||||
schemaObj[key] = z.string().describe(description)
|
||||
} else if (type === 'number') {
|
||||
schemaObj[key] = z.number().describe(description)
|
||||
} else if (type === 'boolean') {
|
||||
schemaObj[key] = z.boolean().describe(description)
|
||||
} else if (type === 'array') {
|
||||
// If it's an array type, check if items is defined
|
||||
if ('items' in value && value.items) {
|
||||
const itemSchema = this.createZodSchemaFromJSON(value.items)
|
||||
schemaObj[key] = z.array(itemSchema).describe(description)
|
||||
} else {
|
||||
// Default to array of any if items not specified
|
||||
schemaObj[key] = z.array(z.any()).describe(description)
|
||||
}
|
||||
} else if (type === 'object') {
|
||||
// If it's an object type, check if properties is defined
|
||||
if ('properties' in value && value.properties) {
|
||||
const nestedSchema = this.createZodSchemaFromJSON(value.properties)
|
||||
schemaObj[key] = nestedSchema.describe(description)
|
||||
} else {
|
||||
// Default to record of any if properties not specified
|
||||
schemaObj[key] = z.record(z.any()).describe(description)
|
||||
}
|
||||
} else {
|
||||
// Default to any for unknown types
|
||||
schemaObj[key] = z.any().describe(description)
|
||||
}
|
||||
|
||||
// Check if the property is optional
|
||||
if ('optional' in value && value.optional === true) {
|
||||
schemaObj[key] = schemaObj[key].optional()
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values without a type property
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = this.createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// It's a nested object without a type property, recursively create schema
|
||||
schemaObj[key] = this.createZodSchemaFromJSON(value)
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = this.createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// For primitive values (which shouldn't be in the schema directly)
|
||||
// Use the corresponding Zod type
|
||||
if (typeof value === 'string') {
|
||||
schemaObj[key] = z.string()
|
||||
} else if (typeof value === 'number') {
|
||||
schemaObj[key] = z.number()
|
||||
} else if (typeof value === 'boolean') {
|
||||
schemaObj[key] = z.boolean()
|
||||
} else {
|
||||
schemaObj[key] = z.any()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return z.object(schemaObj)
|
||||
}
|
||||
|
||||
// Fallback to any for unknown types
|
||||
return z.any()
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LLM_Agentflow }
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
class Loop_Agentflow implements INode {
|
||||
label: string
|
||||
|
|
@ -19,7 +20,7 @@ class Loop_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Loop'
|
||||
this.name = 'loopAgentflow'
|
||||
this.version = 1.0
|
||||
this.version = 1.2
|
||||
this.type = 'Loop'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Loop back to a previous node'
|
||||
|
|
@ -40,6 +41,39 @@ class Loop_Agentflow implements INode {
|
|||
name: 'maxLoopCount',
|
||||
type: 'number',
|
||||
default: 5
|
||||
},
|
||||
{
|
||||
label: 'Fallback Message',
|
||||
name: 'fallbackMessage',
|
||||
type: 'string',
|
||||
description: 'Message to display if the loop count is exceeded',
|
||||
placeholder: 'Enter your fallback message here',
|
||||
rows: 4,
|
||||
acceptVariable: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'loopUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -58,12 +92,20 @@ class Loop_Agentflow implements INode {
|
|||
})
|
||||
}
|
||||
return returnOptions
|
||||
},
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const loopBackToNode = nodeData.inputs?.loopBackToNode as string
|
||||
const _maxLoopCount = nodeData.inputs?.maxLoopCount as string
|
||||
const fallbackMessage = nodeData.inputs?.fallbackMessage as string
|
||||
const _loopUpdateState = nodeData.inputs?.loopUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
||||
|
|
@ -75,16 +117,34 @@ class Loop_Agentflow implements INode {
|
|||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
|
||||
}
|
||||
|
||||
const finalOutput = 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_loopUpdateState && Array.isArray(_loopUpdateState) && _loopUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _loopUpdateState)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: data,
|
||||
output: {
|
||||
content: 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`,
|
||||
content: finalOutput,
|
||||
nodeID: loopBackToNodeId,
|
||||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
|
||||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5,
|
||||
fallbackMessage
|
||||
},
|
||||
state
|
||||
state: newState
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import {
|
|||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
|
|
@ -35,7 +36,7 @@ class Retriever_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Retriever'
|
||||
this.name = 'retrieverAgentflow'
|
||||
this.version = 1.0
|
||||
this.version = 1.1
|
||||
this.type = 'Retriever'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Retrieve information from vector database'
|
||||
|
|
@ -86,8 +87,7 @@ class Retriever_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -197,14 +197,7 @@ class Retriever_Agentflow implements INode {
|
|||
sseStreamer.streamTokenEvent(chatId, finalOutput)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
newState = processTemplateVariables(newState, finalOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { ARTIFACTS_PREFIX, TOOL_ARGS_PREFIX } from '../../../src/agents'
|
||||
import zodToJsonSchema from 'zod-to-json-schema'
|
||||
|
|
@ -28,7 +29,7 @@ class Tool_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Tool'
|
||||
this.name = 'toolAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.2
|
||||
this.type = 'Tool'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Tools allow LLM to interact with external systems'
|
||||
|
|
@ -79,8 +80,7 @@ class Tool_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -161,7 +161,7 @@ class Tool_Agentflow implements INode {
|
|||
toolInputArgs = { properties: allProperties }
|
||||
} else {
|
||||
// Handle single tool instance
|
||||
toolInputArgs = toolInstance.schema ? zodToJsonSchema(toolInstance.schema) : {}
|
||||
toolInputArgs = toolInstance.schema ? zodToJsonSchema(toolInstance.schema as any) : {}
|
||||
}
|
||||
|
||||
if (toolInputArgs && Object.keys(toolInputArgs).length > 0) {
|
||||
|
|
@ -227,6 +227,37 @@ class Tool_Agentflow implements INode {
|
|||
|
||||
let toolCallArgs: Record<string, any> = {}
|
||||
|
||||
const parseInputValue = (value: string): any => {
|
||||
if (typeof value !== 'string') {
|
||||
return value
|
||||
}
|
||||
|
||||
// Remove escape characters (backslashes before special characters)
|
||||
// ex: \["a", "b", "c", "d", "e"\]
|
||||
let cleanedValue = value
|
||||
.replace(/\\"/g, '"') // \" -> "
|
||||
.replace(/\\\\/g, '\\') // \\ -> \
|
||||
.replace(/\\\[/g, '[') // \[ -> [
|
||||
.replace(/\\\]/g, ']') // \] -> ]
|
||||
.replace(/\\\{/g, '{') // \{ -> {
|
||||
.replace(/\\\}/g, '}') // \} -> }
|
||||
|
||||
// Try to parse as JSON if it looks like JSON/array
|
||||
if (
|
||||
(cleanedValue.startsWith('[') && cleanedValue.endsWith(']')) ||
|
||||
(cleanedValue.startsWith('{') && cleanedValue.endsWith('}'))
|
||||
) {
|
||||
try {
|
||||
return JSON.parse(cleanedValue)
|
||||
} catch (e) {
|
||||
// If parsing fails, return the cleaned value
|
||||
return cleanedValue
|
||||
}
|
||||
}
|
||||
|
||||
return cleanedValue
|
||||
}
|
||||
|
||||
if (newToolNodeInstance.transformNodeInputsToToolArgs) {
|
||||
const defaultParams = newToolNodeInstance.transformNodeInputsToToolArgs(newNodeData)
|
||||
|
||||
|
|
@ -239,10 +270,11 @@ class Tool_Agentflow implements INode {
|
|||
for (const item of toolInputArgs) {
|
||||
const variableName = item.inputArgName
|
||||
const variableValue = item.inputArgValue
|
||||
toolCallArgs[variableName] = variableValue
|
||||
toolCallArgs[variableName] = parseInputValue(variableValue)
|
||||
}
|
||||
|
||||
const flowConfig = {
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
input: input,
|
||||
|
|
@ -298,14 +330,7 @@ class Tool_Agentflow implements INode {
|
|||
sseStreamer.streamTokenEvent(chatId, toolOutput)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = toolOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
newState = processTemplateVariables(newState, toolOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
import { BaseMessage, MessageContentImageUrl } from '@langchain/core/messages'
|
||||
import { BaseMessage, MessageContentImageUrl, AIMessageChunk } from '@langchain/core/messages'
|
||||
import { getImageUploads } from '../../src/multiModalUtils'
|
||||
import { getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload } from '../../src/Interface'
|
||||
import { addSingleFileToStorage, getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload, INodeData } from '../../src/Interface'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { IFlowState } from './Interface.Agentflow'
|
||||
import { mapMimeTypeToInputField } from '../../src/utils'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters, mapMimeTypeToInputField } from '../../src/utils'
|
||||
import fetch from 'node-fetch'
|
||||
|
||||
export const addImagesToMessages = async (
|
||||
options: ICommonObject,
|
||||
|
|
@ -18,7 +19,8 @@ export const addImagesToMessages = async (
|
|||
for (const upload of imageUploads) {
|
||||
let bf = upload.data
|
||||
if (upload.type == 'stored-file') {
|
||||
const contents = await getFileFromStorage(upload.name, options.orgId, options.chatflowid, options.chatId)
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
|
||||
|
||||
|
|
@ -89,8 +91,9 @@ export const processMessagesWithImages = async (
|
|||
if (item.type === 'stored-file' && item.name && item.mime.startsWith('image/')) {
|
||||
hasImageReferences = true
|
||||
try {
|
||||
const fileName = item.name.replace(/^FILE-STORAGE::/, '')
|
||||
// Get file contents from storage
|
||||
const contents = await getFileFromStorage(item.name, options.orgId, options.chatflowid, options.chatId)
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
|
||||
// Create base64 data URL
|
||||
const base64Data = 'data:' + item.mime + ';base64,' + contents.toString('base64')
|
||||
|
|
@ -313,13 +316,17 @@ export const getPastChatHistoryImageMessages = async (
|
|||
if (message.additional_kwargs && message.additional_kwargs.fileUploads) {
|
||||
// example: [{"type":"stored-file","name":"0_DiXc4ZklSTo3M8J4.jpg","mime":"image/jpeg"}]
|
||||
const fileUploads = message.additional_kwargs.fileUploads
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const fileAnnotations = message.additional_kwargs.fileAnnotations
|
||||
const usedTools = message.additional_kwargs.usedTools
|
||||
try {
|
||||
let messageWithFileUploads = ''
|
||||
const uploads: IFileUpload[] = typeof fileUploads === 'string' ? JSON.parse(fileUploads) : fileUploads
|
||||
const imageContents: MessageContentImageUrl[] = []
|
||||
for (const upload of uploads) {
|
||||
if (upload.type === 'stored-file' && upload.mime.startsWith('image/')) {
|
||||
const fileData = await getFileFromStorage(upload.name, options.orgId, options.chatflowid, options.chatId)
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const fileData = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
const bf = 'data:' + upload.mime + ';base64,' + fileData.toString('base64')
|
||||
|
||||
|
|
@ -354,26 +361,87 @@ export const getPastChatHistoryImageMessages = async (
|
|||
}
|
||||
}
|
||||
const documents: string = await fileLoaderNodeInstance.init(nodeData, '', nodeOptions)
|
||||
messageWithFileUploads += `<doc name='${upload.name}'>${documents}</doc>\n\n`
|
||||
messageWithFileUploads += `<doc name='${upload.name}'>${handleEscapeCharacters(documents, true)}</doc>\n\n`
|
||||
}
|
||||
}
|
||||
const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content
|
||||
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
|
||||
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
|
||||
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
|
||||
|
||||
if (imageContents.length > 0) {
|
||||
chatHistory.push({
|
||||
const imageMessage: any = {
|
||||
role: messageRole,
|
||||
content: imageContents
|
||||
})
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
imageMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) imageMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) imageMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) imageMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(imageMessage)
|
||||
transformedPastMessages.push({
|
||||
role: messageRole,
|
||||
content: [...JSON.parse((pastChatHistory[i] as any).additional_kwargs.fileUploads)]
|
||||
})
|
||||
}
|
||||
chatHistory.push({
|
||||
|
||||
const contentMessage: any = {
|
||||
role: messageRole,
|
||||
content: messageContent
|
||||
})
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
contentMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) contentMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) contentMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) contentMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(contentMessage)
|
||||
} catch (e) {
|
||||
// failed to parse fileUploads, continue with text only
|
||||
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
|
||||
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
|
||||
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
|
||||
|
||||
const errorMessage: any = {
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
errorMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) errorMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) errorMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) errorMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(errorMessage)
|
||||
}
|
||||
} else if (message.additional_kwargs) {
|
||||
const hasArtifacts =
|
||||
message.additional_kwargs.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts) &&
|
||||
message.additional_kwargs.artifacts.length > 0
|
||||
const hasFileAnnotations =
|
||||
message.additional_kwargs.fileAnnotations &&
|
||||
Array.isArray(message.additional_kwargs.fileAnnotations) &&
|
||||
message.additional_kwargs.fileAnnotations.length > 0
|
||||
const hasUsedTools =
|
||||
message.additional_kwargs.usedTools &&
|
||||
Array.isArray(message.additional_kwargs.usedTools) &&
|
||||
message.additional_kwargs.usedTools.length > 0
|
||||
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
const messageAdditionalKwargs: any = {}
|
||||
if (hasArtifacts) messageAdditionalKwargs.artifacts = message.additional_kwargs.artifacts
|
||||
if (hasFileAnnotations) messageAdditionalKwargs.fileAnnotations = message.additional_kwargs.fileAnnotations
|
||||
if (hasUsedTools) messageAdditionalKwargs.usedTools = message.additional_kwargs.usedTools
|
||||
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content,
|
||||
additional_kwargs: messageAdditionalKwargs
|
||||
})
|
||||
} else {
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
|
|
@ -392,12 +460,443 @@ export const getPastChatHistoryImageMessages = async (
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets MIME type from filename extension
|
||||
*/
|
||||
export const getMimeTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const mimeTypes: { [key: string]: string } = {
|
||||
png: 'image/png',
|
||||
jpg: 'image/jpeg',
|
||||
jpeg: 'image/jpeg',
|
||||
gif: 'image/gif',
|
||||
pdf: 'application/pdf',
|
||||
txt: 'text/plain',
|
||||
csv: 'text/csv',
|
||||
json: 'application/json',
|
||||
html: 'text/html',
|
||||
xml: 'application/xml'
|
||||
}
|
||||
return mimeTypes[extension || ''] || 'application/octet-stream'
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets artifact type from filename extension for UI rendering
|
||||
*/
|
||||
export const getArtifactTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const artifactTypes: { [key: string]: string } = {
|
||||
png: 'png',
|
||||
jpg: 'jpeg',
|
||||
jpeg: 'jpeg',
|
||||
html: 'html',
|
||||
htm: 'html',
|
||||
md: 'markdown',
|
||||
markdown: 'markdown',
|
||||
json: 'json',
|
||||
js: 'javascript',
|
||||
javascript: 'javascript',
|
||||
tex: 'latex',
|
||||
latex: 'latex',
|
||||
txt: 'text',
|
||||
csv: 'text',
|
||||
pdf: 'text'
|
||||
}
|
||||
return artifactTypes[extension || ''] || 'text'
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves base64 image data to storage and returns file information
|
||||
*/
|
||||
export const saveBase64Image = async (
|
||||
outputItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!outputItem.result) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = outputItem.result
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension and MIME type
|
||||
const outputFormat = outputItem.output_format || 'png'
|
||||
const fileName = `generated_image_${outputItem.id || Date.now()}.${outputFormat}`
|
||||
const mimeType = outputFormat === 'png' ? 'image/png' : 'image/jpeg'
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving base64 image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves Gemini inline image data to storage and returns file information
|
||||
*/
|
||||
export const saveGeminiInlineImage = async (
|
||||
inlineItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!inlineItem.data || !inlineItem.mimeType) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = inlineItem.data
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension from MIME type
|
||||
const mimeType = inlineItem.mimeType
|
||||
let extension = 'png'
|
||||
if (mimeType.includes('jpeg') || mimeType.includes('jpg')) {
|
||||
extension = 'jpg'
|
||||
} else if (mimeType.includes('png')) {
|
||||
extension = 'png'
|
||||
} else if (mimeType.includes('gif')) {
|
||||
extension = 'gif'
|
||||
} else if (mimeType.includes('webp')) {
|
||||
extension = 'webp'
|
||||
}
|
||||
|
||||
const fileName = `gemini_generated_image_${Date.now()}.${extension}`
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving Gemini inline image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads file content from container file citation
|
||||
*/
|
||||
export const downloadContainerFile = async (
|
||||
containerId: string,
|
||||
fileId: string,
|
||||
filename: string,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; totalSize: number } | null> => {
|
||||
try {
|
||||
const credentialData = await getCredentialData(modelNodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, modelNodeData)
|
||||
|
||||
if (!openAIApiKey) {
|
||||
console.warn('No OpenAI API key available for downloading container file')
|
||||
return null
|
||||
}
|
||||
|
||||
// Download the file using OpenAI Container API
|
||||
const response = await fetch(`https://api.openai.com/v1/containers/${containerId}/files/${fileId}/content`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: '*/*',
|
||||
Authorization: `Bearer ${openAIApiKey}`
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`Failed to download container file ${fileId} from container ${containerId}: ${response.status} ${response.statusText}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
const dataBuffer = Buffer.from(data)
|
||||
const mimeType = getMimeTypeFromFilename(filename)
|
||||
|
||||
// Store the file using the same storage utility as OpenAIAssistant
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
dataBuffer,
|
||||
filename,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error downloading container file:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace inlineData base64 with file references in the response content
|
||||
*/
|
||||
export const replaceInlineDataWithFileReferences = (
|
||||
response: AIMessageChunk,
|
||||
savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
): void => {
|
||||
// Check if content is an array
|
||||
if (!Array.isArray(response.content)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Replace base64 data with file references in response content
|
||||
let savedImageIndex = 0
|
||||
for (let i = 0; i < response.content.length; i++) {
|
||||
const contentItem = response.content[i]
|
||||
if (
|
||||
typeof contentItem === 'object' &&
|
||||
contentItem.type === 'inlineData' &&
|
||||
contentItem.inlineData &&
|
||||
savedImageIndex < savedInlineImages.length
|
||||
) {
|
||||
const savedImage = savedInlineImages[savedImageIndex]
|
||||
// Replace with file reference
|
||||
response.content[i] = {
|
||||
type: 'stored-file',
|
||||
name: savedImage.fileName,
|
||||
mime: savedImage.mimeType,
|
||||
path: savedImage.filePath
|
||||
}
|
||||
savedImageIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the inlineData from response_metadata to avoid duplication
|
||||
if (response.response_metadata?.inlineData) {
|
||||
delete response.response_metadata.inlineData
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts artifacts from response metadata (both annotations and built-in tools)
|
||||
*/
|
||||
export const extractArtifactsFromResponse = async (
|
||||
responseMetadata: any,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{
|
||||
artifacts: any[]
|
||||
fileAnnotations: any[]
|
||||
savedInlineImages?: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
}> => {
|
||||
const artifacts: any[] = []
|
||||
const fileAnnotations: any[] = []
|
||||
const savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }> = []
|
||||
|
||||
// Handle Gemini inline data (image generation)
|
||||
if (responseMetadata?.inlineData && Array.isArray(responseMetadata.inlineData)) {
|
||||
for (const inlineItem of responseMetadata.inlineData) {
|
||||
if (inlineItem.type === 'gemini_inline_data' && inlineItem.data && inlineItem.mimeType) {
|
||||
try {
|
||||
const savedImageResult = await saveGeminiInlineImage(inlineItem, options)
|
||||
if (savedImageResult) {
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
|
||||
// Track saved image for replacing base64 data in content
|
||||
savedInlineImages.push({
|
||||
filePath: savedImageResult.filePath,
|
||||
fileName: savedImageResult.fileName,
|
||||
mimeType: inlineItem.mimeType
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing Gemini inline image artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!responseMetadata?.output || !Array.isArray(responseMetadata.output)) {
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
for (const outputItem of responseMetadata.output) {
|
||||
// Handle container file citations from annotations
|
||||
if (outputItem.type === 'message' && outputItem.content && Array.isArray(outputItem.content)) {
|
||||
for (const contentItem of outputItem.content) {
|
||||
if (contentItem.annotations && Array.isArray(contentItem.annotations)) {
|
||||
for (const annotation of contentItem.annotations) {
|
||||
if (annotation.type === 'container_file_citation' && annotation.file_id && annotation.filename) {
|
||||
try {
|
||||
// Download and store the file content
|
||||
const downloadResult = await downloadContainerFile(
|
||||
annotation.container_id,
|
||||
annotation.file_id,
|
||||
annotation.filename,
|
||||
modelNodeData,
|
||||
options
|
||||
)
|
||||
|
||||
if (downloadResult) {
|
||||
const fileType = getArtifactTypeFromFilename(annotation.filename)
|
||||
|
||||
if (fileType === 'png' || fileType === 'jpeg' || fileType === 'jpg') {
|
||||
const artifact = {
|
||||
type: fileType,
|
||||
data: downloadResult.filePath
|
||||
}
|
||||
|
||||
artifacts.push(artifact)
|
||||
} else {
|
||||
fileAnnotations.push({
|
||||
filePath: downloadResult.filePath,
|
||||
fileName: annotation.filename
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing annotation:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle built-in tool artifacts (like image generation)
|
||||
if (outputItem.type === 'image_generation_call' && outputItem.result) {
|
||||
try {
|
||||
const savedImageResult = await saveBase64Image(outputItem, options)
|
||||
if (savedImageResult) {
|
||||
// Replace the base64 result with the file path in the response metadata
|
||||
outputItem.result = savedImageResult.filePath
|
||||
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing image generation artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant messages as user messages
|
||||
* This allows the LLM to see and reference the generated images in the conversation
|
||||
* Messages are marked with a special flag for later removal
|
||||
*/
|
||||
export const addImageArtifactsToMessages = async (messages: BaseMessageLike[], options: ICommonObject): Promise<void> => {
|
||||
const imageExtensions = ['png', 'jpg', 'jpeg', 'gif', 'webp']
|
||||
const messagesToInsert: Array<{ index: number; message: any }> = []
|
||||
|
||||
// Iterate through messages to find assistant messages with image artifacts
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const message = messages[i] as any
|
||||
|
||||
// Check if this is an assistant message with artifacts
|
||||
if (
|
||||
(message.role === 'assistant' || message.role === 'ai') &&
|
||||
message.additional_kwargs?.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts)
|
||||
) {
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const imageArtifacts: Array<{ type: string; name: string; mime: string }> = []
|
||||
|
||||
// Extract image artifacts
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type && artifact.data) {
|
||||
// Check if this is an image artifact by file type
|
||||
if (imageExtensions.includes(artifact.type.toLowerCase())) {
|
||||
// Extract filename from the file path
|
||||
const fileName = artifact.data.split('/').pop() || artifact.data
|
||||
const mimeType = `image/${artifact.type.toLowerCase()}`
|
||||
|
||||
imageArtifacts.push({
|
||||
type: 'stored-file',
|
||||
name: fileName,
|
||||
mime: mimeType
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found image artifacts, prepare to insert a user message after this assistant message
|
||||
if (imageArtifacts.length > 0) {
|
||||
// Check if the next message already contains these image artifacts to avoid duplicates
|
||||
const nextMessage = messages[i + 1] as any
|
||||
const shouldInsert =
|
||||
!nextMessage ||
|
||||
nextMessage.role !== 'user' ||
|
||||
!Array.isArray(nextMessage.content) ||
|
||||
!nextMessage.content.some(
|
||||
(item: any) =>
|
||||
(item.type === 'stored-file' || item.type === 'image_url') &&
|
||||
imageArtifacts.some((artifact) => {
|
||||
// Compare with and without FILE-STORAGE:: prefix
|
||||
const artifactName = artifact.name.replace('FILE-STORAGE::', '')
|
||||
const itemName = item.name?.replace('FILE-STORAGE::', '') || ''
|
||||
return artifactName === itemName
|
||||
})
|
||||
)
|
||||
|
||||
if (shouldInsert) {
|
||||
messagesToInsert.push({
|
||||
index: i + 1,
|
||||
message: {
|
||||
role: 'user',
|
||||
content: imageArtifacts,
|
||||
_isTemporaryImageMessage: true // Mark for later removal
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert messages in reverse order to maintain correct indices
|
||||
for (let i = messagesToInsert.length - 1; i >= 0; i--) {
|
||||
const { index, message } = messagesToInsert[i]
|
||||
messages.splice(index, 0, message)
|
||||
}
|
||||
|
||||
// Convert stored-file references to base64 image_url format
|
||||
if (messagesToInsert.length > 0) {
|
||||
const { updatedMessages } = await processMessagesWithImages(messages, options)
|
||||
// Replace the messages array content with the updated messages
|
||||
messages.length = 0
|
||||
messages.push(...updatedMessages)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the flow state with new values
|
||||
*/
|
||||
export const updateFlowState = (state: ICommonObject, llmUpdateState: IFlowState[]): ICommonObject => {
|
||||
export const updateFlowState = (state: ICommonObject, updateState: IFlowState[]): ICommonObject => {
|
||||
let newFlowState: Record<string, any> = {}
|
||||
for (const state of llmUpdateState) {
|
||||
for (const state of updateState) {
|
||||
newFlowState[state.key] = state.value
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ json.dumps(my_dict)`
|
|||
// TODO: get print console output
|
||||
finalResult = await pyodide.runPythonAsync(code)
|
||||
} catch (error) {
|
||||
throw new Error(`Sorry, I'm unable to find answer for question: "${input}" using follwoing code: "${pythonCode}"`)
|
||||
throw new Error(`Sorry, I'm unable to find answer for question: "${input}" using following code: "${pythonCode}"`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { getBaseClasses, transformBracesWithColon } from '../../../src/utils'
|
||||
import { getBaseClasses, transformBracesWithColon, convertChatHistoryToText, convertBaseMessagetoIMessage } from '../../../src/utils'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
|
|
@ -23,8 +23,10 @@ import { Moderation, checkInputs, streamResponse } from '../../moderation/Modera
|
|||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import type { Document } from '@langchain/core/documents'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { RESPONSE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { RESPONSE_TEMPLATE, REPHRASE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
|
||||
class ConversationalRetrievalToolAgent_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -42,7 +44,7 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Tool Agent'
|
||||
this.name = 'conversationalRetrievalToolAgent'
|
||||
this.author = 'niztal(falkor)'
|
||||
this.author = 'niztal(falkor) and nikitas-novatix'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -79,6 +81,26 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
optional: true,
|
||||
default: RESPONSE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Prompt',
|
||||
name: 'rephrasePrompt',
|
||||
type: 'string',
|
||||
description: 'Using previous chat history, rephrase question into a standalone question',
|
||||
warning: 'Prompt must include input variables: {chat_history} and {question}',
|
||||
rows: 4,
|
||||
additionalParams: true,
|
||||
optional: true,
|
||||
default: REPHRASE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Model',
|
||||
name: 'rephraseModel',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Optional: Use a different (faster/cheaper) model for rephrasing. If not specified, uses the main Tool Calling Chat Model.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
|
|
@ -103,8 +125,9 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
// The agent will be prepared in run() with the correct user message - it needs the actual runtime input for rephrasing
|
||||
async init(_nodeData: INodeData, _input: string, _options: ICommonObject): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
|
|
@ -148,6 +171,23 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
sseStreamer.streamUsedToolsEvent(chatId, res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
|
||||
// If the tool is set to returnDirect, stream the output to the client
|
||||
if (res.usedTools && res.usedTools.length) {
|
||||
let inputTools = nodeData.inputs?.tools
|
||||
inputTools = flatten(inputTools)
|
||||
for (const tool of res.usedTools) {
|
||||
const inputTool = inputTools.find((inputTool: Tool) => inputTool.name === tool.tool)
|
||||
if (inputTool && (inputTool as any).returnDirect && shouldStreamResponse) {
|
||||
sseStreamer.streamTokenEvent(chatId, tool.toolOutput)
|
||||
// Prevent CustomChainHandler from streaming the same output again
|
||||
if (res.output === tool.toolOutput) {
|
||||
res.output = ''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The CustomChainHandler will send the stream end event
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
|
|
@ -210,9 +250,11 @@ const prepareAgent = async (
|
|||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const rephraseModel = (nodeData.inputs?.rephraseModel as BaseChatModel) || model // Use main model if not specified
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
let systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
|
|
@ -220,6 +262,9 @@ const prepareAgent = async (
|
|||
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
|
||||
|
||||
systemMessage = transformBracesWithColon(systemMessage)
|
||||
if (rephrasePrompt) {
|
||||
rephrasePrompt = transformBracesWithColon(rephrasePrompt)
|
||||
}
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
|
|
@ -263,6 +308,37 @@ const prepareAgent = async (
|
|||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
// Function to get standalone question (either rephrased or original)
|
||||
const getStandaloneQuestion = async (input: string): Promise<string> => {
|
||||
// If no rephrase prompt, return the original input
|
||||
if (!rephrasePrompt) {
|
||||
return input
|
||||
}
|
||||
|
||||
// Get chat history (use empty string if none)
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true)) as BaseMessage[]
|
||||
const iMessages = convertBaseMessagetoIMessage(messages)
|
||||
const chatHistoryString = convertChatHistoryToText(iMessages)
|
||||
|
||||
// Always rephrase to normalize/expand user queries for better retrieval
|
||||
try {
|
||||
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt)
|
||||
const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, rephraseModel, new StringOutputParser()])
|
||||
const res = await condenseQuestionChain.invoke({
|
||||
question: input,
|
||||
chat_history: chatHistoryString
|
||||
})
|
||||
return res
|
||||
} catch (error) {
|
||||
console.error('Error rephrasing question:', error)
|
||||
// On error, fall back to original input
|
||||
return input
|
||||
}
|
||||
}
|
||||
|
||||
// Get standalone question before creating runnable
|
||||
const standaloneQuestion = await getStandaloneQuestion(flowObj?.input || '')
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
|
|
@ -272,7 +348,9 @@ const prepareAgent = async (
|
|||
return messages ?? []
|
||||
},
|
||||
context: async (i: { input: string; chatHistory?: string }) => {
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(i.input)
|
||||
// Use the standalone question (rephrased or original) for retrieval
|
||||
const retrievalQuery = standaloneQuestion || i.input
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(retrievalQuery)
|
||||
const formattedDocs = formatDocs(relevantDocs)
|
||||
return formattedDocs
|
||||
}
|
||||
|
|
@ -295,4 +373,6 @@ const prepareAgent = async (
|
|||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ConversationalRetrievalToolAgent_Agents }
|
||||
module.exports = {
|
||||
nodeClass: ConversationalRetrievalToolAgent_Agents
|
||||
}
|
||||
|
|
|
|||
|
|
@ -578,7 +578,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(
|
||||
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
|
|
@ -703,7 +703,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
clearInterval(timeout)
|
||||
reject(
|
||||
|
|
@ -1096,7 +1096,7 @@ async function handleToolSubmission(params: ToolSubmissionParams): Promise<ToolS
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'AWS ChatBedrock'
|
||||
this.name = 'awsChatBedrock'
|
||||
this.version = 6.0
|
||||
this.version = 6.1
|
||||
this.type = 'AWSChatBedrock'
|
||||
this.icon = 'aws.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -100,6 +100,16 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
'Allow image input. Refer to the <a href="https://docs.flowiseai.com/using-flowise/uploads#image" target="_blank">docs</a> for more details.',
|
||||
default: false,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Latency Optimized',
|
||||
name: 'latencyOptimized',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Enable latency optimized configuration for supported models. Refer to the supported <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/latency-optimized-inference.html" target="_blank">latecny optimized models</a> for more details.',
|
||||
default: false,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -122,6 +132,7 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const latencyOptimized = nodeData.inputs?.latencyOptimized as boolean
|
||||
|
||||
const obj: ChatBedrockConverseInput = {
|
||||
region: iRegion,
|
||||
|
|
@ -131,6 +142,10 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (latencyOptimized) {
|
||||
obj.performanceConfig = { latency: 'optimized' }
|
||||
}
|
||||
|
||||
/**
|
||||
* Long-term credentials specified in LLM configuration are optional.
|
||||
* Bedrock's credential provider falls back to the AWS SDK to fetch
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
|
||||
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'
|
||||
import { OpenAI as OpenAIClient } from 'openai'
|
||||
|
||||
const serverCredentialsExists =
|
||||
!!process.env.AZURE_OPENAI_API_KEY &&
|
||||
|
|
@ -26,7 +27,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'Azure ChatOpenAI'
|
||||
this.name = 'azureChatOpenAI'
|
||||
this.version = 7.0
|
||||
this.version = 7.1
|
||||
this.type = 'AzureChatOpenAI'
|
||||
this.icon = 'Azure.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -154,6 +155,15 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
optional: false,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Reasoning',
|
||||
description: 'Whether the model supports reasoning. Only applicable for reasoning models.',
|
||||
name: 'reasoning',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Reasoning Effort',
|
||||
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 and o3 models.',
|
||||
|
|
@ -173,9 +183,34 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
name: 'high'
|
||||
}
|
||||
],
|
||||
default: 'medium',
|
||||
optional: false,
|
||||
additionalParams: true
|
||||
additionalParams: true,
|
||||
show: {
|
||||
reasoning: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Reasoning Summary',
|
||||
description: `A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process`,
|
||||
name: 'reasoningSummary',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
},
|
||||
{
|
||||
label: 'Concise',
|
||||
name: 'concise'
|
||||
},
|
||||
{
|
||||
label: 'Detailed',
|
||||
name: 'detailed'
|
||||
}
|
||||
],
|
||||
additionalParams: true,
|
||||
show: {
|
||||
reasoning: true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -199,7 +234,8 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const topP = nodeData.inputs?.topP as string
|
||||
const basePath = nodeData.inputs?.basepath as string
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
|
||||
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort | null
|
||||
const reasoningSummary = nodeData.inputs?.reasoningSummary as 'auto' | 'concise' | 'detailed' | null
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
|
||||
|
|
@ -237,11 +273,22 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
console.error('Error parsing base options', exception)
|
||||
}
|
||||
}
|
||||
if (modelName === 'o3-mini' || modelName.includes('o1')) {
|
||||
if (modelName.includes('o1') || modelName.includes('o3') || modelName.includes('gpt-5')) {
|
||||
delete obj.temperature
|
||||
}
|
||||
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
|
||||
obj.reasoningEffort = reasoningEffort
|
||||
delete obj.stop
|
||||
const reasoning: OpenAIClient.Reasoning = {}
|
||||
if (reasoningEffort) {
|
||||
reasoning.effort = reasoningEffort
|
||||
}
|
||||
if (reasoningSummary) {
|
||||
reasoning.summary = reasoningSummary
|
||||
}
|
||||
obj.reasoning = reasoning
|
||||
|
||||
if (maxTokens) {
|
||||
delete obj.maxTokens
|
||||
obj.maxCompletionTokens = parseInt(maxTokens, 10)
|
||||
}
|
||||
}
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
|
|||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
builtInTools: Record<string, any>[] = []
|
||||
id: string
|
||||
|
||||
constructor(
|
||||
|
|
@ -27,7 +28,7 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
|
|||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.modelName = this.configuredModel
|
||||
this.model = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
|
|
@ -38,4 +39,8 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
|
|||
setVisionModel(): void {
|
||||
// pass
|
||||
}
|
||||
|
||||
addBuiltInTools(builtInTool: Record<string, any>): void {
|
||||
this.builtInTools.push(builtInTool)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ class ChatAnthropic_ChatModels implements INode {
|
|||
label: 'Extended Thinking',
|
||||
name: 'extendedThinking',
|
||||
type: 'boolean',
|
||||
description: 'Enable extended thinking for reasoning model such as Claude Sonnet 3.7',
|
||||
description: 'Enable extended thinking for reasoning model such as Claude Sonnet 3.7 and Claude 4',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
|
|
|
|||
|
|
@ -136,7 +136,8 @@ class ChatCerebras_ChatModels implements INode {
|
|||
|
||||
const obj: ChatOpenAIFields = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
model: modelName,
|
||||
apiKey: cerebrasAIApiKey,
|
||||
openAIApiKey: cerebrasAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,176 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatCometAPI_ChatModels implements INode {
|
||||
readonly baseURL: string = 'https://api.cometapi.com/v1'
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatCometAPI'
|
||||
this.name = 'chatCometAPI'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatCometAPI'
|
||||
this.icon = 'cometapi.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around CometAPI large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['cometApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'gpt-5-mini',
|
||||
description: 'Enter the model name (e.g., gpt-5-mini, claude-sonnet-4-20250514, gemini-2.0-flash)'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.7,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Streaming',
|
||||
name: 'streaming',
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokens',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Probability',
|
||||
name: 'topP',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Frequency Penalty',
|
||||
name: 'frequencyPenalty',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Presence Penalty',
|
||||
name: 'presencePenalty',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Base Options',
|
||||
name: 'baseOptions',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
description: 'Additional options to pass to the CometAPI client. This should be a JSON object.'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
|
||||
const presencePenalty = nodeData.inputs?.presencePenalty as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
|
||||
if (nodeData.inputs?.credentialId) {
|
||||
nodeData.credential = nodeData.inputs?.credentialId
|
||||
}
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('cometApiKey', credentialData, nodeData)
|
||||
|
||||
// Custom error handling for missing API key
|
||||
if (!openAIApiKey || openAIApiKey.trim() === '') {
|
||||
throw new Error(
|
||||
'CometAPI API Key is missing or empty. Please provide a valid CometAPI API key in the credential configuration.'
|
||||
)
|
||||
}
|
||||
|
||||
// Custom error handling for missing model name
|
||||
if (!modelName || modelName.trim() === '') {
|
||||
throw new Error('Model Name is required. Please enter a valid model name (e.g., gpt-5-mini, claude-sonnet-4-20250514).')
|
||||
}
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: ChatOpenAIFields = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
apiKey: openAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
if (presencePenalty) obj.presencePenalty = parseFloat(presencePenalty)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
if (baseOptions) {
|
||||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
if (parsedBaseOptions.baseURL) {
|
||||
console.warn("The 'baseURL' parameter is not allowed when using the ChatCometAPI node.")
|
||||
parsedBaseOptions.baseURL = undefined
|
||||
}
|
||||
} catch (exception) {
|
||||
throw new Error('Invalid JSON in the BaseOptions: ' + exception)
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI({
|
||||
...obj,
|
||||
configuration: {
|
||||
baseURL: this.baseURL,
|
||||
...parsedBaseOptions
|
||||
}
|
||||
})
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatCometAPI_ChatModels }
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" width="207" height="232">
|
||||
<path d="M0 0 C0.67675781 0.46921875 1.35351562 0.9384375 2.05078125 1.421875 C17.41087449 13.29936953 26.5563465 33.10648567 29 52 C30.88353091 73.63839398 25.57681073 94.24662535 12.078125 111.44921875 C3.52802952 121.42968735 -6.99880017 129.74646365 -17.29296875 137.85546875 C-20.80734684 140.63958646 -24.27616958 143.47806764 -27.75 146.3125 C-33.38311845 150.9007126 -39.05678704 155.43068832 -44.78027344 159.90551758 C-49.04401373 163.24101866 -53.27752081 166.61244975 -57.5 170 C-65.19965911 176.17403006 -72.95956911 182.26929551 -80.72949219 188.35449219 C-82.46597474 189.71488377 -84.201812 191.07609486 -85.9375 192.4375 C-86.79347778 193.10785278 -86.79347778 193.10785278 -87.66674805 193.79174805 C-90.77553239 196.23184554 -93.86474805 198.69340592 -96.93359375 201.18359375 C-97.59174072 201.71605713 -98.2498877 202.24852051 -98.92797852 202.79711914 C-100.18028531 203.81199799 -101.42972104 204.83043537 -102.67553711 205.85327148 C-113.67406316 214.75771491 -113.67406316 214.75771491 -119 215.3125 C-119.66 215.209375 -120.32 215.10625 -121 215 C-120.76709766 206.13030267 -117.19647897 198.99781748 -113.5 191.125 C-112.58712887 189.14895917 -111.67783856 187.17130284 -110.76916504 185.19332886 C-109.94887846 183.41284051 -109.12017404 181.63623744 -108.29101562 179.85986328 C-106.94852395 177.01575524 -106.94852395 177.01575524 -106 174 C-111.81280806 176.66712818 -116.32649031 179.60521955 -121.0625 183.875 C-121.70026367 184.43727295 -122.33802734 184.9995459 -122.99511719 185.57885742 C-127.01702202 189.14802701 -130.94253647 192.81269758 -134.84643555 196.51000977 C-148.71629517 209.64341666 -148.71629517 209.64341666 -155 213 C-155.99 212.67 -156.98 212.34 -158 212 C-157.4887063 206.47279763 -156.48994938 202.10988914 -154.26953125 196.96484375 C-153.99662766 196.32366104 -153.72372406 195.68247833 -153.44255066 195.02186584 C-152.57534585 192.98952075 -151.69455566 190.96343746 -150.8125 188.9375 C-148.52960643 183.63017721 -146.25725257 178.31827892 -144 173 C-143.70816833 172.31351379 -143.41633667 171.62702759 -143.11566162 170.91973877 C-142.25821582 168.88940943 -141.41429529 166.85397475 -140.57421875 164.81640625 C-140.07881592 163.6260376 -139.58341309 162.43566895 -139.07299805 161.20922852 C-138.07156521 158.21404429 -137.75796746 156.11997935 -138 153 C-146.93919642 160.27430311 -154.518973 168.82712219 -161.6875 177.8125 C-165.32819613 182.32251855 -169.13147644 186.77267556 -174 190 C-174.99 190 -175.98 190 -177 190 C-177.4046542 183.89279484 -176.01282428 179.27664925 -173.9140625 173.57421875 C-173.5860817 172.65574158 -173.25810089 171.7372644 -172.92018127 170.79095459 C-171.83227842 167.75258546 -170.72872039 164.72015532 -169.625 161.6875 C-168.84968909 159.53233408 -168.07537462 157.37680947 -167.30200195 155.22094727 C-165.68521472 150.71991256 -164.06188369 146.22131861 -162.43310547 141.72460938 C-160.13107613 135.36796141 -157.84629984 129.00524003 -155.56640625 122.640625 C-152.24887824 113.38342752 -148.91109678 104.13369846 -145.56491089 94.88682556 C-143.03925807 87.89940757 -140.53790092 80.90366027 -138.04859924 73.9032135 C-136.26757654 68.89703093 -134.47331016 63.8956159 -132.67861938 58.89431763 C-131.8214001 56.49792847 -130.9687066 54.09991471 -130.12094116 51.70016479 C-123.98775591 34.3499988 -118.12632414 19.31829963 -105 6 C-104.44957031 5.37738281 -103.89914063 4.75476563 -103.33203125 4.11328125 C-76.27136915 -25.62003884 -30.00866348 -21.14678947 0 0 Z " fill="#00ACE2" transform="translate(177,17)"/>
|
||||
<path d="M0 0 C3.59370889 2.76840946 6.81090677 5.77847531 10 9 C10.69867188 9.66515625 11.39734375 10.3303125 12.1171875 11.015625 C16.54012757 15.52461449 19.531169 20.41283818 22.375 26 C22.70902832 26.65234619 23.04305664 27.30469238 23.38720703 27.97680664 C29.78966214 41.20843735 30.40448825 59.20573624 26.08984375 73.18359375 C18.48979965 92.82385108 6.27019435 105.41854323 -13 114 C-29.20527458 120.38314632 -45.91187826 119.08787574 -61.9140625 112.8671875 C-78.47633521 105.1244532 -90.6818902 90.79579279 -97.20117188 73.89526367 C-101.70761398 60.18076397 -101.08909063 42.12663774 -95 29 C-94.57589844 28.06671875 -94.15179688 27.1334375 -93.71484375 26.171875 C-85.2846631 9.3584785 -71.84223513 -1.671465 -54.3125 -7.96484375 C-35.99378812 -13.48589997 -16.09003976 -10.05627485 0 0 Z " fill="#0274C3" transform="translate(163,25)"/>
|
||||
<path d="M0 0 C3.59370889 2.76840946 6.81090677 5.77847531 10 9 C10.69867188 9.66515625 11.39734375 10.3303125 12.1171875 11.015625 C16.54012757 15.52461449 19.531169 20.41283818 22.375 26 C22.70902832 26.65234619 23.04305664 27.30469238 23.38720703 27.97680664 C29.78966214 41.20843735 30.40448825 59.20573624 26.08984375 73.18359375 C18.48979965 92.82385108 6.27019435 105.41854323 -13 114 C-29.20527458 120.38314632 -45.91187826 119.08787574 -61.9140625 112.8671875 C-78.47633521 105.1244532 -90.6818902 90.79579279 -97.20117188 73.89526367 C-101.70761398 60.18076397 -101.08909063 42.12663774 -95 29 C-94.57589844 28.06671875 -94.15179688 27.1334375 -93.71484375 26.171875 C-85.2846631 9.3584785 -71.84223513 -1.671465 -54.3125 -7.96484375 C-35.99378812 -13.48589997 -16.09003976 -10.05627485 0 0 Z M-72.85546875 22.3046875 C-81.52384195 33.1993642 -85.32925872 46.19509438 -84 60 C-81.19770636 74.79342134 -74.05177982 85.87095721 -62 95 C-50.07317504 102.49999729 -36.59178226 103.84984433 -22.875 100.9375 C-9.58998661 97.14684284 0.96143129 88.7625654 7.6796875 76.7578125 C13.61298631 64.36459073 14.80612594 52.14069452 11.02734375 38.90625 C6.83721139 27.05279572 -1.00703398 17.2712335 -11.984375 10.95703125 C-15.54241409 9.26765223 -19.22605928 8.10166317 -23 7 C-23.99 6.67 -24.98 6.34 -26 6 C-44.99521417 4.32054509 -59.38243396 8.38333807 -72.85546875 22.3046875 Z " fill="#FAFDFE" transform="translate(163,25)"/>
|
||||
<path d="M0 0 C6.24302767 5.06772084 11.11257121 12.4655725 12.15625 20.50390625 C12.39769334 29.34676869 10.95006126 36.08814626 5.75 43.375 C1.38925675 47.21456516 -1.15219336 48.71018589 -7.05664062 48.625 C-10.77603931 48.20106141 -13.73923312 46.63634037 -17 44.875 C-17.68956787 44.5147876 -18.37913574 44.1545752 -19.08959961 43.78344727 C-41.85230667 31.66318165 -41.85230667 31.66318165 -46.25 21.375 C-47.21511912 15.34300547 -45.21326136 11.66919243 -42.0234375 6.7421875 C-37.16499414 0.25712874 -31.52400844 -3.17464768 -23.75 -5.3125 C-15.10762666 -6.39279667 -7.23796009 -4.98215226 0 0 Z " fill="#FCFDFE" transform="translate(149.25,47.625)"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.4 KiB |
|
|
@ -1,7 +1,7 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatFireworks } from '@langchain/community/chat_models/fireworks'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatFireworks, ChatFireworksParams } from './core'
|
||||
|
||||
class ChatFireworks_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -41,8 +41,8 @@ class ChatFireworks_ChatModels implements INode {
|
|||
label: 'Model',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'accounts/fireworks/models/llama-v2-13b-chat',
|
||||
placeholder: 'accounts/fireworks/models/llama-v2-13b-chat'
|
||||
default: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
|
||||
placeholder: 'accounts/fireworks/models/llama-v3p1-8b-instruct'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -71,9 +71,8 @@ class ChatFireworks_ChatModels implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const fireworksApiKey = getCredentialParam('fireworksApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<ChatFireworks> = {
|
||||
const obj: ChatFireworksParams = {
|
||||
fireworksApiKey,
|
||||
model: modelName,
|
||||
modelName,
|
||||
temperature: temperature ? parseFloat(temperature) : undefined,
|
||||
streaming: streaming ?? true
|
||||
|
|
|
|||
|
|
@ -0,0 +1,126 @@
|
|||
import type { BaseChatModelParams, LangSmithParams } from '@langchain/core/language_models/chat_models'
|
||||
import {
|
||||
type OpenAIClient,
|
||||
type ChatOpenAICallOptions,
|
||||
type OpenAIChatInput,
|
||||
type OpenAICoreRequestOptions,
|
||||
ChatOpenAICompletions
|
||||
} from '@langchain/openai'
|
||||
|
||||
import { getEnvironmentVariable } from '@langchain/core/utils/env'
|
||||
|
||||
type FireworksUnsupportedArgs = 'frequencyPenalty' | 'presencePenalty' | 'logitBias' | 'functions'
|
||||
|
||||
type FireworksUnsupportedCallOptions = 'functions' | 'function_call'
|
||||
|
||||
export type ChatFireworksCallOptions = Partial<Omit<ChatOpenAICallOptions, FireworksUnsupportedCallOptions>>
|
||||
|
||||
export type ChatFireworksParams = Partial<Omit<OpenAIChatInput, 'openAIApiKey' | FireworksUnsupportedArgs>> &
|
||||
BaseChatModelParams & {
|
||||
/**
|
||||
* Prefer `apiKey`
|
||||
*/
|
||||
fireworksApiKey?: string
|
||||
/**
|
||||
* The Fireworks API key to use.
|
||||
*/
|
||||
apiKey?: string
|
||||
}
|
||||
|
||||
export class ChatFireworks extends ChatOpenAICompletions<ChatFireworksCallOptions> {
|
||||
static lc_name() {
|
||||
return 'ChatFireworks'
|
||||
}
|
||||
|
||||
_llmType() {
|
||||
return 'fireworks'
|
||||
}
|
||||
|
||||
get lc_secrets(): { [key: string]: string } | undefined {
|
||||
return {
|
||||
fireworksApiKey: 'FIREWORKS_API_KEY',
|
||||
apiKey: 'FIREWORKS_API_KEY'
|
||||
}
|
||||
}
|
||||
|
||||
lc_serializable = true
|
||||
|
||||
fireworksApiKey?: string
|
||||
|
||||
apiKey?: string
|
||||
|
||||
constructor(fields?: ChatFireworksParams) {
|
||||
const fireworksApiKey = fields?.apiKey || fields?.fireworksApiKey || getEnvironmentVariable('FIREWORKS_API_KEY')
|
||||
|
||||
if (!fireworksApiKey) {
|
||||
throw new Error(
|
||||
`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`
|
||||
)
|
||||
}
|
||||
|
||||
super({
|
||||
...fields,
|
||||
model: fields?.model || fields?.modelName || 'accounts/fireworks/models/llama-v3p1-8b-instruct',
|
||||
apiKey: fireworksApiKey,
|
||||
configuration: {
|
||||
baseURL: 'https://api.fireworks.ai/inference/v1'
|
||||
},
|
||||
streamUsage: false
|
||||
})
|
||||
|
||||
this.fireworksApiKey = fireworksApiKey
|
||||
this.apiKey = fireworksApiKey
|
||||
}
|
||||
|
||||
getLsParams(options: any): LangSmithParams {
|
||||
const params = super.getLsParams(options)
|
||||
params.ls_provider = 'fireworks'
|
||||
return params
|
||||
}
|
||||
|
||||
toJSON() {
|
||||
const result = super.toJSON()
|
||||
|
||||
if ('kwargs' in result && typeof result.kwargs === 'object' && result.kwargs != null) {
|
||||
delete result.kwargs.openai_api_key
|
||||
delete result.kwargs.configuration
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// eslint-disable-next-line
|
||||
async completionWithRetry(
|
||||
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
|
||||
options?: OpenAICoreRequestOptions
|
||||
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>
|
||||
|
||||
// eslint-disable-next-line
|
||||
async completionWithRetry(
|
||||
request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
|
||||
options?: OpenAICoreRequestOptions
|
||||
): Promise<OpenAIClient.Chat.Completions.ChatCompletion>
|
||||
|
||||
/**
|
||||
* Calls the Fireworks API with retry logic in case of failures.
|
||||
* @param request The request to send to the Fireworks API.
|
||||
* @param options Optional configuration for the API call.
|
||||
* @returns The response from the Fireworks API.
|
||||
*/
|
||||
// eslint-disable-next-line
|
||||
async completionWithRetry(
|
||||
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
|
||||
options?: OpenAICoreRequestOptions
|
||||
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk> | OpenAIClient.Chat.Completions.ChatCompletion> {
|
||||
delete request.frequency_penalty
|
||||
delete request.presence_penalty
|
||||
delete request.logit_bias
|
||||
delete request.functions
|
||||
|
||||
if (request.stream === true) {
|
||||
return super.completionWithRetry(request, options)
|
||||
}
|
||||
|
||||
return super.completionWithRetry(request, options)
|
||||
}
|
||||
}
|
||||
|
|
@ -2,10 +2,9 @@ import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai'
|
|||
import type { SafetySetting } from '@google/generative-ai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { ChatGoogleGenerativeAI } from './FlowiseChatGoogleGenerativeAI'
|
||||
import { GoogleGenerativeAIChatInput } from '@langchain/google-genai'
|
||||
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI'
|
||||
|
||||
class GoogleGenerativeAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -22,7 +21,7 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGoogleGenerativeAI'
|
||||
this.name = 'chatGoogleGenerativeAI'
|
||||
this.version = 3.0
|
||||
this.version = 3.1
|
||||
this.type = 'ChatGoogleGenerativeAI'
|
||||
this.icon = 'GoogleGemini.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -101,62 +100,91 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Harm Category',
|
||||
name: 'harmCategory',
|
||||
type: 'multiOptions',
|
||||
label: 'Safety Settings',
|
||||
name: 'safetySettings',
|
||||
type: 'array',
|
||||
description:
|
||||
'Refer to <a target="_blank" href="https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/configure-safety-attributes#safety_attribute_definitions">official guide</a> on how to use Harm Category',
|
||||
options: [
|
||||
'Safety settings for the model. Refer to the <a href="https://ai.google.dev/gemini-api/docs/safety-settings">official guide</a> on how to use Safety Settings',
|
||||
array: [
|
||||
{
|
||||
label: 'Dangerous',
|
||||
name: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
|
||||
label: 'Harm Category',
|
||||
name: 'harmCategory',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Dangerous',
|
||||
name: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
||||
description: 'Promotes, facilitates, or encourages harmful acts.'
|
||||
},
|
||||
{
|
||||
label: 'Harassment',
|
||||
name: HarmCategory.HARM_CATEGORY_HARASSMENT,
|
||||
description: 'Negative or harmful comments targeting identity and/or protected attributes.'
|
||||
},
|
||||
{
|
||||
label: 'Hate Speech',
|
||||
name: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
||||
description: 'Content that is rude, disrespectful, or profane.'
|
||||
},
|
||||
{
|
||||
label: 'Sexually Explicit',
|
||||
name: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
||||
description: 'Contains references to sexual acts or other lewd content.'
|
||||
},
|
||||
{
|
||||
label: 'Civic Integrity',
|
||||
name: HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY,
|
||||
description: 'Election-related queries.'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Harassment',
|
||||
name: HarmCategory.HARM_CATEGORY_HARASSMENT
|
||||
},
|
||||
{
|
||||
label: 'Hate Speech',
|
||||
name: HarmCategory.HARM_CATEGORY_HATE_SPEECH
|
||||
},
|
||||
{
|
||||
label: 'Sexually Explicit',
|
||||
name: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
|
||||
label: 'Harm Block Threshold',
|
||||
name: 'harmBlockThreshold',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'None',
|
||||
name: HarmBlockThreshold.BLOCK_NONE,
|
||||
description: 'Always show regardless of probability of unsafe content'
|
||||
},
|
||||
{
|
||||
label: 'Only High',
|
||||
name: HarmBlockThreshold.BLOCK_ONLY_HIGH,
|
||||
description: 'Block when high probability of unsafe content'
|
||||
},
|
||||
{
|
||||
label: 'Medium and Above',
|
||||
name: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
|
||||
description: 'Block when medium or high probability of unsafe content'
|
||||
},
|
||||
{
|
||||
label: 'Low and Above',
|
||||
name: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
|
||||
description: 'Block when low, medium or high probability of unsafe content'
|
||||
},
|
||||
{
|
||||
label: 'Threshold Unspecified (Default Threshold)',
|
||||
name: HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED,
|
||||
description: 'Threshold is unspecified, block using default threshold'
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Harm Block Threshold',
|
||||
name: 'harmBlockThreshold',
|
||||
type: 'multiOptions',
|
||||
description:
|
||||
'Refer to <a target="_blank" href="https://cloud.google.com/vertex-ai/docs/generative-ai/multimodal/configure-safety-attributes#safety_setting_thresholds">official guide</a> on how to use Harm Block Threshold',
|
||||
options: [
|
||||
{
|
||||
label: 'Low and Above',
|
||||
name: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
|
||||
},
|
||||
{
|
||||
label: 'Medium and Above',
|
||||
name: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
|
||||
},
|
||||
{
|
||||
label: 'None',
|
||||
name: HarmBlockThreshold.BLOCK_NONE
|
||||
},
|
||||
{
|
||||
label: 'Only High',
|
||||
name: HarmBlockThreshold.BLOCK_ONLY_HIGH
|
||||
},
|
||||
{
|
||||
label: 'Threshold Unspecified',
|
||||
name: HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED
|
||||
}
|
||||
],
|
||||
label: 'Thinking Budget',
|
||||
name: 'thinkingBudget',
|
||||
type: 'number',
|
||||
description: 'Guides the number of thinking tokens. -1 for dynamic, 0 to disable, or positive integer (Gemini 2.5 models).',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
additionalParams: true,
|
||||
show: {
|
||||
modelName: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.5-flash-lite']
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
|
|
@ -195,11 +223,12 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const harmCategory = nodeData.inputs?.harmCategory as string
|
||||
const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
|
||||
const _safetySettings = nodeData.inputs?.safetySettings as string
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const baseUrl = nodeData.inputs?.baseUrl as string | undefined
|
||||
const thinkingBudget = nodeData.inputs?.thinkingBudget as string
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
|
|
@ -219,18 +248,34 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
if (cache) obj.cache = cache
|
||||
if (temperature) obj.temperature = parseFloat(temperature)
|
||||
if (baseUrl) obj.baseUrl = baseUrl
|
||||
if (thinkingBudget) obj.thinkingBudget = parseInt(thinkingBudget, 10)
|
||||
|
||||
// Safety Settings
|
||||
let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
|
||||
let harmBlockThresholds: string[] = convertMultiOptionsToStringArray(harmBlockThreshold)
|
||||
if (harmCategories.length != harmBlockThresholds.length)
|
||||
throw new Error(`Harm Category & Harm Block Threshold are not the same length`)
|
||||
const safetySettings: SafetySetting[] = harmCategories.map((harmCategory, index) => {
|
||||
return {
|
||||
category: harmCategory as HarmCategory,
|
||||
threshold: harmBlockThresholds[index] as HarmBlockThreshold
|
||||
let safetySettings: SafetySetting[] = []
|
||||
if (_safetySettings) {
|
||||
try {
|
||||
const parsedSafetySettings = typeof _safetySettings === 'string' ? JSON.parse(_safetySettings) : _safetySettings
|
||||
if (Array.isArray(parsedSafetySettings)) {
|
||||
const validSettings = parsedSafetySettings
|
||||
.filter((setting: any) => setting.harmCategory && setting.harmBlockThreshold)
|
||||
.map((setting: any) => ({
|
||||
category: setting.harmCategory as HarmCategory,
|
||||
threshold: setting.harmBlockThreshold as HarmBlockThreshold
|
||||
}))
|
||||
|
||||
// Remove duplicates by keeping only the first occurrence of each harm category
|
||||
const seenCategories = new Set<HarmCategory>()
|
||||
safetySettings = validSettings.filter((setting) => {
|
||||
if (seenCategories.has(setting.category)) {
|
||||
return false
|
||||
}
|
||||
seenCategories.add(setting.category)
|
||||
return true
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.warn('Failed to parse safety settings:', error)
|
||||
}
|
||||
})
|
||||
}
|
||||
if (safetySettings.length > 0) obj.safetySettings = safetySettings
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,733 +0,0 @@
|
|||
/** Disabled due to the withStructuredOutput
|
||||
|
||||
import { BaseMessage, AIMessage, AIMessageChunk, isBaseMessage, ChatMessage, MessageContentComplex } from '@langchain/core/messages'
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
|
||||
import { BaseChatModel, type BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
|
||||
import { ToolCallChunk } from '@langchain/core/messages/tool'
|
||||
import { NewTokenIndices } from '@langchain/core/callbacks/base'
|
||||
import {
|
||||
EnhancedGenerateContentResponse,
|
||||
Content,
|
||||
Part,
|
||||
Tool,
|
||||
GenerativeModel,
|
||||
GoogleGenerativeAI as GenerativeAI
|
||||
} from '@google/generative-ai'
|
||||
import type {
|
||||
FunctionCallPart,
|
||||
FunctionResponsePart,
|
||||
SafetySetting,
|
||||
UsageMetadata,
|
||||
FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,
|
||||
GenerateContentRequest
|
||||
} from '@google/generative-ai'
|
||||
import { ICommonObject, IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
import { StructuredToolInterface } from '@langchain/core/tools'
|
||||
import { isStructuredTool } from '@langchain/core/utils/function_calling'
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema'
|
||||
import { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
|
||||
import type FlowiseGoogleAICacheManager from '../../cache/GoogleGenerativeAIContextCache/FlowiseGoogleAICacheManager'
|
||||
|
||||
const DEFAULT_IMAGE_MAX_TOKEN = 8192
|
||||
const DEFAULT_IMAGE_MODEL = 'gemini-1.5-flash-latest'
|
||||
|
||||
interface TokenUsage {
|
||||
completionTokens?: number
|
||||
promptTokens?: number
|
||||
totalTokens?: number
|
||||
}
|
||||
|
||||
interface GoogleGenerativeAIChatCallOptions extends BaseLanguageModelCallOptions {
|
||||
tools?: StructuredToolInterface[] | GoogleGenerativeAIFunctionDeclarationsTool[]
|
||||
streamUsage?: boolean
|
||||
}
|
||||
|
||||
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, 'streamUsage'> {
|
||||
modelName?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxOutputTokens?: number
|
||||
topP?: number
|
||||
topK?: number
|
||||
stopSequences?: string[]
|
||||
safetySettings?: SafetySetting[]
|
||||
apiKey?: string
|
||||
apiVersion?: string
|
||||
baseUrl?: string
|
||||
streaming?: boolean
|
||||
}
|
||||
|
||||
class LangchainChatGoogleGenerativeAI
|
||||
extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk>
|
||||
implements GoogleGenerativeAIChatInput
|
||||
{
|
||||
modelName = 'gemini-pro'
|
||||
|
||||
temperature?: number
|
||||
|
||||
maxOutputTokens?: number
|
||||
|
||||
topP?: number
|
||||
|
||||
topK?: number
|
||||
|
||||
stopSequences: string[] = []
|
||||
|
||||
safetySettings?: SafetySetting[]
|
||||
|
||||
apiKey?: string
|
||||
|
||||
baseUrl?: string
|
||||
|
||||
streaming = false
|
||||
|
||||
streamUsage = true
|
||||
|
||||
private client: GenerativeModel
|
||||
|
||||
private contextCache?: FlowiseGoogleAICacheManager
|
||||
|
||||
get _isMultimodalModel() {
|
||||
return true
|
||||
}
|
||||
|
||||
constructor(fields?: GoogleGenerativeAIChatInput) {
|
||||
super(fields ?? {})
|
||||
|
||||
this.modelName = fields?.model?.replace(/^models\//, '') ?? fields?.modelName?.replace(/^models\//, '') ?? 'gemini-pro'
|
||||
|
||||
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens
|
||||
|
||||
if (this.maxOutputTokens && this.maxOutputTokens < 0) {
|
||||
throw new Error('`maxOutputTokens` must be a positive integer')
|
||||
}
|
||||
|
||||
this.temperature = fields?.temperature ?? this.temperature
|
||||
if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
|
||||
throw new Error('`temperature` must be in the range of [0.0,1.0]')
|
||||
}
|
||||
|
||||
this.topP = fields?.topP ?? this.topP
|
||||
if (this.topP && this.topP < 0) {
|
||||
throw new Error('`topP` must be a positive integer')
|
||||
}
|
||||
|
||||
if (this.topP && this.topP > 1) {
|
||||
throw new Error('`topP` must be below 1.')
|
||||
}
|
||||
|
||||
this.topK = fields?.topK ?? this.topK
|
||||
if (this.topK && this.topK < 0) {
|
||||
throw new Error('`topK` must be a positive integer')
|
||||
}
|
||||
|
||||
this.stopSequences = fields?.stopSequences ?? this.stopSequences
|
||||
|
||||
this.apiKey = fields?.apiKey ?? process.env['GOOGLE_API_KEY']
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'Please set an API key for Google GenerativeAI ' +
|
||||
'in the environment variable GOOGLE_API_KEY ' +
|
||||
'or in the `apiKey` field of the ' +
|
||||
'ChatGoogleGenerativeAI constructor'
|
||||
)
|
||||
}
|
||||
|
||||
this.safetySettings = fields?.safetySettings ?? this.safetySettings
|
||||
if (this.safetySettings && this.safetySettings.length > 0) {
|
||||
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category))
|
||||
if (safetySettingsSet.size !== this.safetySettings.length) {
|
||||
throw new Error('The categories in `safetySettings` array must be unique')
|
||||
}
|
||||
}
|
||||
|
||||
this.streaming = fields?.streaming ?? this.streaming
|
||||
|
||||
this.streamUsage = fields?.streamUsage ?? this.streamUsage
|
||||
|
||||
this.getClient()
|
||||
}
|
||||
|
||||
async getClient(prompt?: Content[], tools?: Tool[]) {
|
||||
this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel(
|
||||
{
|
||||
model: this.modelName,
|
||||
tools,
|
||||
safetySettings: this.safetySettings as SafetySetting[],
|
||||
generationConfig: {
|
||||
candidateCount: 1,
|
||||
stopSequences: this.stopSequences,
|
||||
maxOutputTokens: this.maxOutputTokens,
|
||||
temperature: this.temperature,
|
||||
topP: this.topP,
|
||||
topK: this.topK
|
||||
}
|
||||
},
|
||||
{
|
||||
baseUrl: this.baseUrl
|
||||
}
|
||||
)
|
||||
if (this.contextCache) {
|
||||
const cachedContent = await this.contextCache.lookup({
|
||||
contents: prompt ? [{ ...prompt[0], parts: prompt[0].parts.slice(0, 1) }] : [],
|
||||
model: this.modelName,
|
||||
tools
|
||||
})
|
||||
this.client.cachedContent = cachedContent as any
|
||||
}
|
||||
}
|
||||
|
||||
_combineLLMOutput() {
|
||||
return []
|
||||
}
|
||||
|
||||
_llmType() {
|
||||
return 'googlegenerativeai'
|
||||
}
|
||||
|
||||
override bindTools(tools: (StructuredToolInterface | Record<string, unknown>)[], kwargs?: Partial<ICommonObject>) {
|
||||
//@ts-ignore
|
||||
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs })
|
||||
}
|
||||
|
||||
invocationParams(options?: this['ParsedCallOptions']): Omit<GenerateContentRequest, 'contents'> {
|
||||
const tools = options?.tools as GoogleGenerativeAIFunctionDeclarationsTool[] | StructuredToolInterface[] | undefined
|
||||
if (Array.isArray(tools) && !tools.some((t: any) => !('lc_namespace' in t))) {
|
||||
return {
|
||||
tools: convertToGeminiTools(options?.tools as StructuredToolInterface[]) as any
|
||||
}
|
||||
}
|
||||
return {
|
||||
tools: options?.tools as GoogleGenerativeAIFunctionDeclarationsTool[] | undefined
|
||||
}
|
||||
}
|
||||
|
||||
convertFunctionResponse(prompts: Content[]) {
|
||||
for (let i = 0; i < prompts.length; i += 1) {
|
||||
if (prompts[i].role === 'function') {
|
||||
if (prompts[i - 1].role === 'model') {
|
||||
const toolName = prompts[i - 1].parts[0].functionCall?.name ?? ''
|
||||
prompts[i].parts = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: toolName,
|
||||
response: {
|
||||
name: toolName,
|
||||
content: prompts[i].parts[0].text
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
setContextCache(contextCache: FlowiseGoogleAICacheManager): void {
|
||||
this.contextCache = contextCache
|
||||
}
|
||||
|
||||
async getNumTokens(prompt: BaseMessage[]) {
|
||||
const contents = convertBaseMessagesToContent(prompt, this._isMultimodalModel)
|
||||
const { totalTokens } = await this.client.countTokens({ contents })
|
||||
return totalTokens
|
||||
}
|
||||
|
||||
async _generateNonStreaming(
|
||||
prompt: Content[],
|
||||
options: this['ParsedCallOptions'],
|
||||
_runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
//@ts-ignore
|
||||
const tools = options.tools ?? []
|
||||
|
||||
this.convertFunctionResponse(prompt)
|
||||
|
||||
if (tools.length > 0) {
|
||||
await this.getClient(prompt, tools as Tool[])
|
||||
} else {
|
||||
await this.getClient(prompt)
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
let output
|
||||
try {
|
||||
output = await this.client.generateContent({
|
||||
contents: prompt
|
||||
})
|
||||
} catch (e: any) {
|
||||
if (e.message?.includes('400 Bad Request')) {
|
||||
e.status = 400
|
||||
}
|
||||
throw e
|
||||
}
|
||||
return output
|
||||
})
|
||||
const generationResult = mapGenerateContentResultToChatResult(res.response)
|
||||
await _runManager?.handleLLMNewToken(generationResult.generations?.length ? generationResult.generations[0].text : '')
|
||||
return generationResult
|
||||
}
|
||||
|
||||
async _generate(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
|
||||
prompt = checkIfEmptyContentAndSameRole(prompt)
|
||||
|
||||
// Handle streaming
|
||||
if (this.streaming) {
|
||||
const tokenUsage: TokenUsage = {}
|
||||
const stream = this._streamResponseChunks(messages, options, runManager)
|
||||
const finalChunks: Record<number, ChatGenerationChunk> = {}
|
||||
|
||||
for await (const chunk of stream) {
|
||||
const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0
|
||||
if (finalChunks[index] === undefined) {
|
||||
finalChunks[index] = chunk
|
||||
} else {
|
||||
finalChunks[index] = finalChunks[index].concat(chunk)
|
||||
}
|
||||
}
|
||||
const generations = Object.entries(finalChunks)
|
||||
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
|
||||
.map(([_, value]) => value)
|
||||
|
||||
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }
|
||||
}
|
||||
return this._generateNonStreaming(prompt, options, runManager)
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
|
||||
prompt = checkIfEmptyContentAndSameRole(prompt)
|
||||
|
||||
const parameters = this.invocationParams(options)
|
||||
const request = {
|
||||
...parameters,
|
||||
contents: prompt
|
||||
}
|
||||
|
||||
const tools = options.tools ?? []
|
||||
if (tools.length > 0) {
|
||||
await this.getClient(prompt, tools as Tool[])
|
||||
} else {
|
||||
await this.getClient(prompt)
|
||||
}
|
||||
|
||||
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
const { stream } = await this.client.generateContentStream(request)
|
||||
return stream
|
||||
})
|
||||
|
||||
let usageMetadata: UsageMetadata | ICommonObject | undefined
|
||||
let index = 0
|
||||
for await (const response of stream) {
|
||||
if ('usageMetadata' in response && this.streamUsage !== false && options.streamUsage !== false) {
|
||||
const genAIUsageMetadata = response.usageMetadata as {
|
||||
promptTokenCount: number
|
||||
candidatesTokenCount: number
|
||||
totalTokenCount: number
|
||||
}
|
||||
if (!usageMetadata) {
|
||||
usageMetadata = {
|
||||
input_tokens: genAIUsageMetadata.promptTokenCount,
|
||||
output_tokens: genAIUsageMetadata.candidatesTokenCount,
|
||||
total_tokens: genAIUsageMetadata.totalTokenCount
|
||||
}
|
||||
} else {
|
||||
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
|
||||
// total each time, so we need to find the difference between the tokens.
|
||||
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount - (usageMetadata as ICommonObject).output_tokens
|
||||
usageMetadata = {
|
||||
input_tokens: 0,
|
||||
output_tokens: outputTokenDiff,
|
||||
total_tokens: outputTokenDiff
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const chunk = convertResponseContentToChatGenerationChunk(response, {
|
||||
usageMetadata: usageMetadata as UsageMetadata,
|
||||
index
|
||||
})
|
||||
index += 1
|
||||
if (!chunk) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield chunk
|
||||
await runManager?.handleLLMNewToken(chunk.text ?? '')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: GoogleGenerativeAIChatInput) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxOutputTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.modelName = this.configuredModel
|
||||
this.maxOutputTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
if (this.modelName === 'gemini-1.0-pro-latest') {
|
||||
this.modelName = DEFAULT_IMAGE_MODEL
|
||||
this.maxOutputTokens = this.configuredMaxToken ? this.configuredMaxToken : DEFAULT_IMAGE_MAX_TOKEN
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function messageContentMedia(content: MessageContentComplex): Part {
|
||||
if ('mimeType' in content && 'data' in content) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: content.mimeType,
|
||||
data: content.data
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Invalid media content')
|
||||
}
|
||||
|
||||
function getMessageAuthor(message: BaseMessage) {
|
||||
const type = message._getType()
|
||||
if (ChatMessage.isInstance(message)) {
|
||||
return message.role
|
||||
}
|
||||
return message.name ?? type
|
||||
}
|
||||
|
||||
function convertAuthorToRole(author: string) {
|
||||
switch (author.toLowerCase()) {
|
||||
case 'ai':
|
||||
case 'assistant':
|
||||
case 'model':
|
||||
return 'model'
|
||||
case 'function':
|
||||
case 'tool':
|
||||
return 'function'
|
||||
case 'system':
|
||||
case 'human':
|
||||
default:
|
||||
return 'user'
|
||||
}
|
||||
}
|
||||
|
||||
function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean): Part[] {
|
||||
if (typeof message.content === 'string' && message.content !== '') {
|
||||
return [{ text: message.content }]
|
||||
}
|
||||
|
||||
let functionCalls: FunctionCallPart[] = []
|
||||
let functionResponses: FunctionResponsePart[] = []
|
||||
let messageParts: Part[] = []
|
||||
|
||||
if ('tool_calls' in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0) {
|
||||
functionCalls = message.tool_calls.map((tc) => ({
|
||||
functionCall: {
|
||||
name: tc.name,
|
||||
args: tc.args
|
||||
}
|
||||
}))
|
||||
} else if (message._getType() === 'tool' && message.name && message.content) {
|
||||
functionResponses = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: message.name,
|
||||
response: message.content
|
||||
}
|
||||
}
|
||||
]
|
||||
} else if (Array.isArray(message.content)) {
|
||||
messageParts = message.content.map((c) => {
|
||||
if (c.type === 'text') {
|
||||
return {
|
||||
text: c.text
|
||||
}
|
||||
}
|
||||
|
||||
if (c.type === 'image_url') {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error(`This model does not support images`)
|
||||
}
|
||||
let source
|
||||
if (typeof c.image_url === 'string') {
|
||||
source = c.image_url
|
||||
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
|
||||
source = c.image_url.url
|
||||
} else {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
const [dm, data] = source.split(',')
|
||||
if (!dm.startsWith('data:')) {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
|
||||
if (encoding !== 'base64') {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
return {
|
||||
inlineData: {
|
||||
data,
|
||||
mimeType
|
||||
}
|
||||
}
|
||||
} else if (c.type === 'media') {
|
||||
return messageContentMedia(c)
|
||||
} else if (c.type === 'tool_use') {
|
||||
return {
|
||||
functionCall: {
|
||||
name: c.name,
|
||||
args: c.input
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new Error(`Unknown content type ${(c as { type: string }).type}`)
|
||||
})
|
||||
}
|
||||
|
||||
return [...messageParts, ...functionCalls, ...functionResponses]
|
||||
}
|
||||
|
||||
// This is a dedicated logic for Multi Agent Supervisor to handle the case where the content is empty, and the role is the same
|
||||
function checkIfEmptyContentAndSameRole(contents: Content[]) {
|
||||
let prevRole = ''
|
||||
const validContents: Content[] = []
|
||||
|
||||
for (const content of contents) {
|
||||
// Skip only if completely empty
|
||||
if (!content.parts || !content.parts.length) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Ensure role is always either 'user' or 'model'
|
||||
content.role = content.role === 'model' ? 'model' : 'user'
|
||||
|
||||
// Handle consecutive messages
|
||||
if (content.role === prevRole && validContents.length > 0) {
|
||||
// Merge with previous content if same role
|
||||
validContents[validContents.length - 1].parts.push(...content.parts)
|
||||
continue
|
||||
}
|
||||
|
||||
validContents.push(content)
|
||||
prevRole = content.role
|
||||
}
|
||||
|
||||
return validContents
|
||||
}
|
||||
|
||||
function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
|
||||
return messages.reduce<{
|
||||
content: Content[]
|
||||
mergeWithPreviousContent: boolean
|
||||
}>(
|
||||
(acc, message, index) => {
|
||||
if (!isBaseMessage(message)) {
|
||||
throw new Error('Unsupported message input')
|
||||
}
|
||||
const author = getMessageAuthor(message)
|
||||
if (author === 'system' && index !== 0) {
|
||||
throw new Error('System message should be the first one')
|
||||
}
|
||||
const role = convertAuthorToRole(author)
|
||||
|
||||
const prevContent = acc.content[acc.content.length]
|
||||
if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) {
|
||||
throw new Error('Google Generative AI requires alternate messages between authors')
|
||||
}
|
||||
|
||||
const parts = convertMessageContentToParts(message, isMultimodalModel)
|
||||
|
||||
if (acc.mergeWithPreviousContent) {
|
||||
const prevContent = acc.content[acc.content.length - 1]
|
||||
if (!prevContent) {
|
||||
throw new Error('There was a problem parsing your system message. Please try a prompt without one.')
|
||||
}
|
||||
prevContent.parts.push(...parts)
|
||||
|
||||
return {
|
||||
mergeWithPreviousContent: false,
|
||||
content: acc.content
|
||||
}
|
||||
}
|
||||
let actualRole = role
|
||||
if (actualRole === 'function' || actualRole === 'tool') {
|
||||
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
||||
actualRole = 'user'
|
||||
}
|
||||
const content: Content = {
|
||||
role: actualRole,
|
||||
parts
|
||||
}
|
||||
return {
|
||||
mergeWithPreviousContent: author === 'system',
|
||||
content: [...acc.content, content]
|
||||
}
|
||||
},
|
||||
{ content: [], mergeWithPreviousContent: false }
|
||||
).content
|
||||
}
|
||||
|
||||
function mapGenerateContentResultToChatResult(
|
||||
response: EnhancedGenerateContentResponse,
|
||||
extra?: {
|
||||
usageMetadata: UsageMetadata | undefined
|
||||
}
|
||||
): ChatResult {
|
||||
// if rejected or error, return empty generations with reason in filters
|
||||
if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) {
|
||||
return {
|
||||
generations: [],
|
||||
llmOutput: {
|
||||
filters: response.promptFeedback
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const functionCalls = response.functionCalls()
|
||||
const [candidate] = response.candidates
|
||||
const { content, ...generationInfo } = candidate
|
||||
const text = content?.parts[0]?.text ?? ''
|
||||
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
content: text,
|
||||
tool_calls: functionCalls,
|
||||
additional_kwargs: {
|
||||
...generationInfo
|
||||
},
|
||||
usage_metadata: extra?.usageMetadata as any
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
|
||||
return {
|
||||
generations: [generation]
|
||||
}
|
||||
}
|
||||
|
||||
function convertResponseContentToChatGenerationChunk(
|
||||
response: EnhancedGenerateContentResponse,
|
||||
extra: {
|
||||
usageMetadata?: UsageMetadata | undefined
|
||||
index: number
|
||||
}
|
||||
): ChatGenerationChunk | null {
|
||||
if (!response || !response.candidates || response.candidates.length === 0) {
|
||||
return null
|
||||
}
|
||||
const functionCalls = response.functionCalls()
|
||||
const [candidate] = response.candidates
|
||||
const { content, ...generationInfo } = candidate
|
||||
const text = content?.parts?.[0]?.text ?? ''
|
||||
|
||||
const toolCallChunks: ToolCallChunk[] = []
|
||||
if (functionCalls) {
|
||||
toolCallChunks.push(
|
||||
...functionCalls.map((fc) => ({
|
||||
...fc,
|
||||
args: JSON.stringify(fc.args),
|
||||
index: extra.index
|
||||
}))
|
||||
)
|
||||
}
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
tool_call_chunks: toolCallChunks,
|
||||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {},
|
||||
usage_metadata: extra.usageMetadata as any
|
||||
}),
|
||||
generationInfo
|
||||
})
|
||||
}
|
||||
|
||||
function zodToGeminiParameters(zodObj: any) {
|
||||
// Gemini doesn't accept either the $schema or additionalProperties
|
||||
// attributes, so we need to explicitly remove them.
|
||||
const jsonSchema: any = zodToJsonSchema(zodObj)
|
||||
// eslint-disable-next-line unused-imports/no-unused-vars
|
||||
const { $schema, additionalProperties, ...rest } = jsonSchema
|
||||
|
||||
// Ensure all properties have type specified
|
||||
if (rest.properties) {
|
||||
Object.keys(rest.properties).forEach((key) => {
|
||||
const prop = rest.properties[key]
|
||||
|
||||
// Handle enum types
|
||||
if (prop.enum?.length) {
|
||||
rest.properties[key] = {
|
||||
type: 'string',
|
||||
format: 'enum',
|
||||
enum: prop.enum
|
||||
}
|
||||
}
|
||||
// Handle missing type
|
||||
else if (!prop.type && !prop.oneOf && !prop.anyOf && !prop.allOf) {
|
||||
// Infer type from other properties
|
||||
if (prop.minimum !== undefined || prop.maximum !== undefined) {
|
||||
prop.type = 'number'
|
||||
} else if (prop.format === 'date-time') {
|
||||
prop.type = 'string'
|
||||
} else if (prop.items) {
|
||||
prop.type = 'array'
|
||||
} else if (prop.properties) {
|
||||
prop.type = 'object'
|
||||
} else {
|
||||
// Default to string if type can't be inferred
|
||||
prop.type = 'string'
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return rest
|
||||
}
|
||||
|
||||
function convertToGeminiTools(structuredTools: (StructuredToolInterface | Record<string, unknown>)[]) {
|
||||
return [
|
||||
{
|
||||
functionDeclarations: structuredTools.map((structuredTool) => {
|
||||
if (isStructuredTool(structuredTool)) {
|
||||
const jsonSchema = zodToGeminiParameters(structuredTool.schema)
|
||||
return {
|
||||
name: structuredTool.name,
|
||||
description: structuredTool.description,
|
||||
parameters: jsonSchema
|
||||
}
|
||||
}
|
||||
return structuredTool
|
||||
})
|
||||
}
|
||||
]
|
||||
}
|
||||
*/
|
||||
|
|
@ -0,0 +1,673 @@
|
|||
import {
|
||||
EnhancedGenerateContentResponse,
|
||||
Content,
|
||||
Part,
|
||||
type FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,
|
||||
type FunctionDeclaration as GenerativeAIFunctionDeclaration,
|
||||
POSSIBLE_ROLES,
|
||||
FunctionCallPart,
|
||||
TextPart,
|
||||
FileDataPart,
|
||||
InlineDataPart
|
||||
} from '@google/generative-ai'
|
||||
import {
|
||||
AIMessage,
|
||||
AIMessageChunk,
|
||||
BaseMessage,
|
||||
ChatMessage,
|
||||
ToolMessage,
|
||||
ToolMessageChunk,
|
||||
MessageContent,
|
||||
MessageContentComplex,
|
||||
UsageMetadata,
|
||||
isAIMessage,
|
||||
isBaseMessage,
|
||||
isToolMessage,
|
||||
StandardContentBlockConverter,
|
||||
parseBase64DataUrl,
|
||||
convertToProviderContentBlock,
|
||||
isDataContentBlock
|
||||
} from '@langchain/core/messages'
|
||||
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
|
||||
import { isLangChainTool } from '@langchain/core/utils/function_calling'
|
||||
import { isOpenAITool } from '@langchain/core/language_models/base'
|
||||
import { ToolCallChunk } from '@langchain/core/messages/tool'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { jsonSchemaToGeminiParameters, schemaToGenerativeAIParameters } from './zod_to_genai_parameters.js'
|
||||
import { GoogleGenerativeAIToolType } from './types.js'
|
||||
|
||||
export function getMessageAuthor(message: BaseMessage) {
|
||||
const type = message._getType()
|
||||
if (ChatMessage.isInstance(message)) {
|
||||
return message.role
|
||||
}
|
||||
if (type === 'tool') {
|
||||
return type
|
||||
}
|
||||
return message.name ?? type
|
||||
}
|
||||
|
||||
/**
|
||||
* !!! IMPORTANT: Must return 'user' as default instead of throwing error
|
||||
* https://github.com/FlowiseAI/Flowise/issues/4743
|
||||
* Maps a message type to a Google Generative AI chat author.
|
||||
* @param message The message to map.
|
||||
* @param model The model to use for mapping.
|
||||
* @returns The message type mapped to a Google Generative AI chat author.
|
||||
*/
|
||||
export function convertAuthorToRole(author: string): (typeof POSSIBLE_ROLES)[number] {
|
||||
switch (author) {
|
||||
/**
|
||||
* Note: Gemini currently is not supporting system messages
|
||||
* we will convert them to human messages and merge with following
|
||||
* */
|
||||
case 'supervisor':
|
||||
case 'ai':
|
||||
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
||||
return 'model'
|
||||
case 'system':
|
||||
return 'system'
|
||||
case 'human':
|
||||
return 'user'
|
||||
case 'tool':
|
||||
case 'function':
|
||||
return 'function'
|
||||
default:
|
||||
return 'user' // return user as default instead of throwing error
|
||||
}
|
||||
}
|
||||
|
||||
function messageContentMedia(content: MessageContentComplex): Part {
|
||||
if ('mimeType' in content && 'data' in content) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: content.mimeType,
|
||||
data: content.data
|
||||
}
|
||||
}
|
||||
}
|
||||
if ('mimeType' in content && 'fileUri' in content) {
|
||||
return {
|
||||
fileData: {
|
||||
mimeType: content.mimeType,
|
||||
fileUri: content.fileUri
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Invalid media content')
|
||||
}
|
||||
|
||||
function inferToolNameFromPreviousMessages(message: ToolMessage | ToolMessageChunk, previousMessages: BaseMessage[]): string | undefined {
|
||||
return previousMessages
|
||||
.map((msg) => {
|
||||
if (isAIMessage(msg)) {
|
||||
return msg.tool_calls ?? []
|
||||
}
|
||||
return []
|
||||
})
|
||||
.flat()
|
||||
.find((toolCall) => {
|
||||
return toolCall.id === message.tool_call_id
|
||||
})?.name
|
||||
}
|
||||
|
||||
function _getStandardContentBlockConverter(isMultimodalModel: boolean) {
|
||||
const standardContentBlockConverter: StandardContentBlockConverter<{
|
||||
text: TextPart
|
||||
image: FileDataPart | InlineDataPart
|
||||
audio: FileDataPart | InlineDataPart
|
||||
file: FileDataPart | InlineDataPart | TextPart
|
||||
}> = {
|
||||
providerName: 'Google Gemini',
|
||||
|
||||
fromStandardTextBlock(block) {
|
||||
return {
|
||||
text: block.text
|
||||
}
|
||||
},
|
||||
|
||||
fromStandardImageBlock(block): FileDataPart | InlineDataPart {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error('This model does not support images')
|
||||
}
|
||||
if (block.source_type === 'url') {
|
||||
const data = parseBase64DataUrl({ dataUrl: block.url })
|
||||
if (data) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: data.mime_type,
|
||||
data: data.data
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
fileData: {
|
||||
mimeType: block.mime_type ?? '',
|
||||
fileUri: block.url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (block.source_type === 'base64') {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: block.mime_type ?? '',
|
||||
data: block.data
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported source type: ${block.source_type}`)
|
||||
},
|
||||
|
||||
fromStandardAudioBlock(block): FileDataPart | InlineDataPart {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error('This model does not support audio')
|
||||
}
|
||||
if (block.source_type === 'url') {
|
||||
const data = parseBase64DataUrl({ dataUrl: block.url })
|
||||
if (data) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: data.mime_type,
|
||||
data: data.data
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
fileData: {
|
||||
mimeType: block.mime_type ?? '',
|
||||
fileUri: block.url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (block.source_type === 'base64') {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: block.mime_type ?? '',
|
||||
data: block.data
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Unsupported source type: ${block.source_type}`)
|
||||
},
|
||||
|
||||
fromStandardFileBlock(block): FileDataPart | InlineDataPart | TextPart {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error('This model does not support files')
|
||||
}
|
||||
if (block.source_type === 'text') {
|
||||
return {
|
||||
text: block.text
|
||||
}
|
||||
}
|
||||
if (block.source_type === 'url') {
|
||||
const data = parseBase64DataUrl({ dataUrl: block.url })
|
||||
if (data) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: data.mime_type,
|
||||
data: data.data
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return {
|
||||
fileData: {
|
||||
mimeType: block.mime_type ?? '',
|
||||
fileUri: block.url
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (block.source_type === 'base64') {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: block.mime_type ?? '',
|
||||
data: block.data
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new Error(`Unsupported source type: ${block.source_type}`)
|
||||
}
|
||||
}
|
||||
return standardContentBlockConverter
|
||||
}
|
||||
|
||||
function _convertLangChainContentToPart(content: MessageContentComplex, isMultimodalModel: boolean): Part | undefined {
|
||||
if (isDataContentBlock(content)) {
|
||||
return convertToProviderContentBlock(content, _getStandardContentBlockConverter(isMultimodalModel))
|
||||
}
|
||||
|
||||
if (content.type === 'text') {
|
||||
return { text: content.text }
|
||||
} else if (content.type === 'executableCode') {
|
||||
return { executableCode: content.executableCode }
|
||||
} else if (content.type === 'codeExecutionResult') {
|
||||
return { codeExecutionResult: content.codeExecutionResult }
|
||||
} else if (content.type === 'image_url') {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error(`This model does not support images`)
|
||||
}
|
||||
let source
|
||||
if (typeof content.image_url === 'string') {
|
||||
source = content.image_url
|
||||
} else if (typeof content.image_url === 'object' && 'url' in content.image_url) {
|
||||
source = content.image_url.url
|
||||
} else {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
const [dm, data] = source.split(',')
|
||||
if (!dm.startsWith('data:')) {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
|
||||
if (encoding !== 'base64') {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
return {
|
||||
inlineData: {
|
||||
data,
|
||||
mimeType
|
||||
}
|
||||
}
|
||||
} else if (content.type === 'media') {
|
||||
return messageContentMedia(content)
|
||||
} else if (content.type === 'tool_use') {
|
||||
return {
|
||||
functionCall: {
|
||||
name: content.name,
|
||||
args: content.input
|
||||
}
|
||||
}
|
||||
} else if (
|
||||
content.type?.includes('/') &&
|
||||
// Ensure it's a single slash.
|
||||
content.type.split('/').length === 2 &&
|
||||
'data' in content &&
|
||||
typeof content.data === 'string'
|
||||
) {
|
||||
return {
|
||||
inlineData: {
|
||||
mimeType: content.type,
|
||||
data: content.data
|
||||
}
|
||||
}
|
||||
} else if ('functionCall' in content) {
|
||||
// No action needed here — function calls will be added later from message.tool_calls
|
||||
return undefined
|
||||
} else {
|
||||
if ('type' in content) {
|
||||
throw new Error(`Unknown content type ${content.type}`)
|
||||
} else {
|
||||
throw new Error(`Unknown content ${JSON.stringify(content)}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean, previousMessages: BaseMessage[]): Part[] {
|
||||
if (isToolMessage(message)) {
|
||||
const messageName = message.name ?? inferToolNameFromPreviousMessages(message, previousMessages)
|
||||
if (messageName === undefined) {
|
||||
throw new Error(
|
||||
`Google requires a tool name for each tool call response, and we could not infer a called tool name for ToolMessage "${message.id}" from your passed messages. Please populate a "name" field on that ToolMessage explicitly.`
|
||||
)
|
||||
}
|
||||
|
||||
const result = Array.isArray(message.content)
|
||||
? (message.content.map((c) => _convertLangChainContentToPart(c, isMultimodalModel)).filter((p) => p !== undefined) as Part[])
|
||||
: message.content
|
||||
|
||||
if (message.status === 'error') {
|
||||
return [
|
||||
{
|
||||
functionResponse: {
|
||||
name: messageName,
|
||||
// The API expects an object with an `error` field if the function call fails.
|
||||
// `error` must be a valid object (not a string or array), so we wrap `message.content` here
|
||||
response: { error: { details: result } }
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
return [
|
||||
{
|
||||
functionResponse: {
|
||||
name: messageName,
|
||||
// again, can't have a string or array value for `response`, so we wrap it as an object here
|
||||
response: { result }
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
let functionCalls: FunctionCallPart[] = []
|
||||
const messageParts: Part[] = []
|
||||
|
||||
if (typeof message.content === 'string' && message.content) {
|
||||
messageParts.push({ text: message.content })
|
||||
}
|
||||
|
||||
if (Array.isArray(message.content)) {
|
||||
messageParts.push(
|
||||
...(message.content.map((c) => _convertLangChainContentToPart(c, isMultimodalModel)).filter((p) => p !== undefined) as Part[])
|
||||
)
|
||||
}
|
||||
|
||||
if (isAIMessage(message) && message.tool_calls?.length) {
|
||||
functionCalls = message.tool_calls.map((tc) => {
|
||||
return {
|
||||
functionCall: {
|
||||
name: tc.name,
|
||||
args: tc.args
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
return [...messageParts, ...functionCalls]
|
||||
}
|
||||
|
||||
export function convertBaseMessagesToContent(
|
||||
messages: BaseMessage[],
|
||||
isMultimodalModel: boolean,
|
||||
convertSystemMessageToHumanContent: boolean = false
|
||||
) {
|
||||
return messages.reduce<{
|
||||
content: Content[]
|
||||
mergeWithPreviousContent: boolean
|
||||
}>(
|
||||
(acc, message, index) => {
|
||||
if (!isBaseMessage(message)) {
|
||||
throw new Error('Unsupported message input')
|
||||
}
|
||||
const author = getMessageAuthor(message)
|
||||
if (author === 'system' && index !== 0) {
|
||||
throw new Error('System message should be the first one')
|
||||
}
|
||||
const role = convertAuthorToRole(author)
|
||||
|
||||
const prevContent = acc.content[acc.content.length]
|
||||
if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) {
|
||||
throw new Error('Google Generative AI requires alternate messages between authors')
|
||||
}
|
||||
|
||||
const parts = convertMessageContentToParts(message, isMultimodalModel, messages.slice(0, index))
|
||||
|
||||
if (acc.mergeWithPreviousContent) {
|
||||
const prevContent = acc.content[acc.content.length - 1]
|
||||
if (!prevContent) {
|
||||
throw new Error('There was a problem parsing your system message. Please try a prompt without one.')
|
||||
}
|
||||
prevContent.parts.push(...parts)
|
||||
|
||||
return {
|
||||
mergeWithPreviousContent: false,
|
||||
content: acc.content
|
||||
}
|
||||
}
|
||||
let actualRole = role
|
||||
if (actualRole === 'function' || (actualRole === 'system' && !convertSystemMessageToHumanContent)) {
|
||||
// GenerativeAI API will throw an error if the role is not "user" or "model."
|
||||
actualRole = 'user'
|
||||
}
|
||||
const content: Content = {
|
||||
role: actualRole,
|
||||
parts
|
||||
}
|
||||
return {
|
||||
mergeWithPreviousContent: author === 'system' && !convertSystemMessageToHumanContent,
|
||||
content: [...acc.content, content]
|
||||
}
|
||||
},
|
||||
{ content: [], mergeWithPreviousContent: false }
|
||||
).content
|
||||
}
|
||||
|
||||
export function mapGenerateContentResultToChatResult(
|
||||
response: EnhancedGenerateContentResponse,
|
||||
extra?: {
|
||||
usageMetadata: UsageMetadata | undefined
|
||||
}
|
||||
): ChatResult {
|
||||
// if rejected or error, return empty generations with reason in filters
|
||||
if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) {
|
||||
return {
|
||||
generations: [],
|
||||
llmOutput: {
|
||||
filters: response.promptFeedback
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const functionCalls = response.functionCalls()
|
||||
const [candidate] = response.candidates
|
||||
const { content: candidateContent, ...generationInfo } = candidate
|
||||
let content: MessageContent | undefined
|
||||
const inlineDataItems: any[] = []
|
||||
|
||||
if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length === 1 && candidateContent.parts[0].text) {
|
||||
content = candidateContent.parts[0].text
|
||||
} else if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length > 0) {
|
||||
content = candidateContent.parts.map((p) => {
|
||||
if ('text' in p) {
|
||||
return {
|
||||
type: 'text',
|
||||
text: p.text
|
||||
}
|
||||
} else if ('executableCode' in p) {
|
||||
return {
|
||||
type: 'executableCode',
|
||||
executableCode: p.executableCode
|
||||
}
|
||||
} else if ('codeExecutionResult' in p) {
|
||||
return {
|
||||
type: 'codeExecutionResult',
|
||||
codeExecutionResult: p.codeExecutionResult
|
||||
}
|
||||
} else if ('inlineData' in p && p.inlineData) {
|
||||
// Extract inline image data for processing by Agent
|
||||
inlineDataItems.push({
|
||||
type: 'gemini_inline_data',
|
||||
mimeType: p.inlineData.mimeType,
|
||||
data: p.inlineData.data
|
||||
})
|
||||
// Return the inline data as part of the content structure
|
||||
return {
|
||||
type: 'inlineData',
|
||||
inlineData: p.inlineData
|
||||
}
|
||||
}
|
||||
return p
|
||||
})
|
||||
} else {
|
||||
// no content returned - likely due to abnormal stop reason, e.g. malformed function call
|
||||
content = []
|
||||
}
|
||||
|
||||
let text = ''
|
||||
if (typeof content === 'string') {
|
||||
text = content
|
||||
} else if (Array.isArray(content) && content.length > 0) {
|
||||
const block = content.find((b) => 'text' in b) as { text: string } | undefined
|
||||
text = block?.text ?? text
|
||||
}
|
||||
|
||||
// Build response_metadata with inline data if present
|
||||
const response_metadata: any = {}
|
||||
if (inlineDataItems.length > 0) {
|
||||
response_metadata.inlineData = inlineDataItems
|
||||
}
|
||||
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
content: content ?? '',
|
||||
tool_calls: functionCalls?.map((fc) => {
|
||||
return {
|
||||
...fc,
|
||||
type: 'tool_call',
|
||||
id: 'id' in fc && typeof fc.id === 'string' ? fc.id : uuidv4()
|
||||
}
|
||||
}),
|
||||
additional_kwargs: {
|
||||
...generationInfo
|
||||
},
|
||||
usage_metadata: extra?.usageMetadata,
|
||||
response_metadata: Object.keys(response_metadata).length > 0 ? response_metadata : undefined
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
|
||||
return {
|
||||
generations: [generation],
|
||||
llmOutput: {
|
||||
tokenUsage: {
|
||||
promptTokens: extra?.usageMetadata?.input_tokens,
|
||||
completionTokens: extra?.usageMetadata?.output_tokens,
|
||||
totalTokens: extra?.usageMetadata?.total_tokens
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function convertResponseContentToChatGenerationChunk(
|
||||
response: EnhancedGenerateContentResponse,
|
||||
extra: {
|
||||
usageMetadata?: UsageMetadata | undefined
|
||||
index: number
|
||||
}
|
||||
): ChatGenerationChunk | null {
|
||||
if (!response.candidates || response.candidates.length === 0) {
|
||||
return null
|
||||
}
|
||||
const functionCalls = response.functionCalls()
|
||||
const [candidate] = response.candidates
|
||||
const { content: candidateContent, ...generationInfo } = candidate
|
||||
let content: MessageContent | undefined
|
||||
const inlineDataItems: any[] = []
|
||||
|
||||
// Checks if some parts do not have text. If false, it means that the content is a string.
|
||||
if (Array.isArray(candidateContent?.parts) && candidateContent.parts.every((p) => 'text' in p)) {
|
||||
content = candidateContent.parts.map((p) => p.text).join('')
|
||||
} else if (Array.isArray(candidateContent?.parts)) {
|
||||
content = candidateContent.parts.map((p) => {
|
||||
if ('text' in p) {
|
||||
return {
|
||||
type: 'text',
|
||||
text: p.text
|
||||
}
|
||||
} else if ('executableCode' in p) {
|
||||
return {
|
||||
type: 'executableCode',
|
||||
executableCode: p.executableCode
|
||||
}
|
||||
} else if ('codeExecutionResult' in p) {
|
||||
return {
|
||||
type: 'codeExecutionResult',
|
||||
codeExecutionResult: p.codeExecutionResult
|
||||
}
|
||||
} else if ('inlineData' in p && p.inlineData) {
|
||||
// Extract inline image data for processing by Agent
|
||||
inlineDataItems.push({
|
||||
type: 'gemini_inline_data',
|
||||
mimeType: p.inlineData.mimeType,
|
||||
data: p.inlineData.data
|
||||
})
|
||||
// Return the inline data as part of the content structure
|
||||
return {
|
||||
type: 'inlineData',
|
||||
inlineData: p.inlineData
|
||||
}
|
||||
}
|
||||
return p
|
||||
})
|
||||
} else {
|
||||
// no content returned - likely due to abnormal stop reason, e.g. malformed function call
|
||||
content = []
|
||||
}
|
||||
|
||||
let text = ''
|
||||
if (content && typeof content === 'string') {
|
||||
text = content
|
||||
} else if (Array.isArray(content)) {
|
||||
const block = content.find((b) => 'text' in b) as { text: string } | undefined
|
||||
text = block?.text ?? ''
|
||||
}
|
||||
|
||||
const toolCallChunks: ToolCallChunk[] = []
|
||||
if (functionCalls) {
|
||||
toolCallChunks.push(
|
||||
...functionCalls.map((fc) => ({
|
||||
...fc,
|
||||
args: JSON.stringify(fc.args),
|
||||
index: extra.index,
|
||||
type: 'tool_call_chunk' as const,
|
||||
id: 'id' in fc && typeof fc.id === 'string' ? fc.id : uuidv4()
|
||||
}))
|
||||
)
|
||||
}
|
||||
|
||||
// Build response_metadata with inline data if present
|
||||
const response_metadata: any = {}
|
||||
if (inlineDataItems.length > 0) {
|
||||
response_metadata.inlineData = inlineDataItems
|
||||
}
|
||||
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
content: content || '',
|
||||
name: !candidateContent ? undefined : candidateContent.role,
|
||||
tool_call_chunks: toolCallChunks,
|
||||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {},
|
||||
usage_metadata: extra.usageMetadata,
|
||||
response_metadata: Object.keys(response_metadata).length > 0 ? response_metadata : undefined
|
||||
}),
|
||||
generationInfo
|
||||
})
|
||||
}
|
||||
|
||||
export function convertToGenerativeAITools(tools: GoogleGenerativeAIToolType[]): GoogleGenerativeAIFunctionDeclarationsTool[] {
|
||||
if (tools.every((tool) => 'functionDeclarations' in tool && Array.isArray(tool.functionDeclarations))) {
|
||||
return tools as GoogleGenerativeAIFunctionDeclarationsTool[]
|
||||
}
|
||||
return [
|
||||
{
|
||||
functionDeclarations: tools.map((tool): GenerativeAIFunctionDeclaration => {
|
||||
if (isLangChainTool(tool)) {
|
||||
const jsonSchema = schemaToGenerativeAIParameters(tool.schema)
|
||||
if (jsonSchema.type === 'object' && 'properties' in jsonSchema && Object.keys(jsonSchema.properties).length === 0) {
|
||||
return {
|
||||
name: tool.name,
|
||||
description: tool.description
|
||||
}
|
||||
}
|
||||
return {
|
||||
name: tool.name,
|
||||
description: tool.description,
|
||||
parameters: jsonSchema
|
||||
}
|
||||
}
|
||||
if (isOpenAITool(tool)) {
|
||||
return {
|
||||
name: tool.function.name,
|
||||
description: tool.function.description ?? `A function available to call.`,
|
||||
parameters: jsonSchemaToGeminiParameters(tool.function.parameters)
|
||||
}
|
||||
}
|
||||
return tool as unknown as GenerativeAIFunctionDeclaration
|
||||
})
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
import { BaseLLMOutputParser, OutputParserException } from '@langchain/core/output_parsers'
|
||||
import { ChatGeneration } from '@langchain/core/outputs'
|
||||
import { ToolCall } from '@langchain/core/messages/tool'
|
||||
import { InteropZodType, interopSafeParseAsync } from '@langchain/core/utils/types'
|
||||
import { JsonOutputKeyToolsParserParamsInterop } from '@langchain/core/output_parsers/openai_tools'
|
||||
|
||||
interface GoogleGenerativeAIToolsOutputParserParams<T extends Record<string, any>> extends JsonOutputKeyToolsParserParamsInterop<T> {}
|
||||
|
||||
export class GoogleGenerativeAIToolsOutputParser<T extends Record<string, any> = Record<string, any>> extends BaseLLMOutputParser<T> {
|
||||
static lc_name() {
|
||||
return 'GoogleGenerativeAIToolsOutputParser'
|
||||
}
|
||||
|
||||
lc_namespace = ['langchain', 'google_genai', 'output_parsers']
|
||||
|
||||
returnId = false
|
||||
|
||||
/** The type of tool calls to return. */
|
||||
keyName: string
|
||||
|
||||
/** Whether to return only the first tool call. */
|
||||
returnSingle = false
|
||||
|
||||
zodSchema?: InteropZodType<T>
|
||||
|
||||
constructor(params: GoogleGenerativeAIToolsOutputParserParams<T>) {
|
||||
super(params)
|
||||
this.keyName = params.keyName
|
||||
this.returnSingle = params.returnSingle ?? this.returnSingle
|
||||
this.zodSchema = params.zodSchema
|
||||
}
|
||||
|
||||
protected async _validateResult(result: unknown): Promise<T> {
|
||||
if (this.zodSchema === undefined) {
|
||||
return result as T
|
||||
}
|
||||
const zodParsedResult = await interopSafeParseAsync(this.zodSchema, result)
|
||||
if (zodParsedResult.success) {
|
||||
return zodParsedResult.data
|
||||
} else {
|
||||
throw new OutputParserException(
|
||||
`Failed to parse. Text: "${JSON.stringify(result, null, 2)}". Error: ${JSON.stringify(zodParsedResult.error.issues)}`,
|
||||
JSON.stringify(result, null, 2)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
async parseResult(generations: ChatGeneration[]): Promise<T> {
|
||||
const tools = generations.flatMap((generation) => {
|
||||
const { message } = generation
|
||||
if (!('tool_calls' in message) || !Array.isArray(message.tool_calls)) {
|
||||
return []
|
||||
}
|
||||
return message.tool_calls as ToolCall[]
|
||||
})
|
||||
if (tools[0] === undefined) {
|
||||
throw new Error('No parseable tool calls provided to GoogleGenerativeAIToolsOutputParser.')
|
||||
}
|
||||
const [tool] = tools
|
||||
const validatedResult = await this._validateResult(tool.args)
|
||||
return validatedResult
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
import {
|
||||
Tool as GenerativeAITool,
|
||||
ToolConfig,
|
||||
FunctionCallingMode,
|
||||
FunctionDeclaration,
|
||||
FunctionDeclarationsTool,
|
||||
FunctionDeclarationSchema
|
||||
} from '@google/generative-ai'
|
||||
import { ToolChoice } from '@langchain/core/language_models/chat_models'
|
||||
import { StructuredToolInterface } from '@langchain/core/tools'
|
||||
import { isLangChainTool } from '@langchain/core/utils/function_calling'
|
||||
import { isOpenAITool, ToolDefinition } from '@langchain/core/language_models/base'
|
||||
import { convertToGenerativeAITools } from './common.js'
|
||||
import { GoogleGenerativeAIToolType } from './types.js'
|
||||
import { removeAdditionalProperties } from './zod_to_genai_parameters.js'
|
||||
|
||||
export function convertToolsToGenAI(
|
||||
tools: GoogleGenerativeAIToolType[],
|
||||
extra?: {
|
||||
toolChoice?: ToolChoice
|
||||
allowedFunctionNames?: string[]
|
||||
}
|
||||
): {
|
||||
tools: GenerativeAITool[]
|
||||
toolConfig?: ToolConfig
|
||||
} {
|
||||
// Extract function declaration processing to a separate function
|
||||
const genAITools = processTools(tools)
|
||||
|
||||
// Simplify tool config creation
|
||||
const toolConfig = createToolConfig(genAITools, extra)
|
||||
|
||||
return { tools: genAITools, toolConfig }
|
||||
}
|
||||
|
||||
function processTools(tools: GoogleGenerativeAIToolType[]): GenerativeAITool[] {
|
||||
let functionDeclarationTools: FunctionDeclaration[] = []
|
||||
const genAITools: GenerativeAITool[] = []
|
||||
|
||||
tools.forEach((tool) => {
|
||||
if (isLangChainTool(tool)) {
|
||||
const [convertedTool] = convertToGenerativeAITools([tool as StructuredToolInterface])
|
||||
if (convertedTool.functionDeclarations) {
|
||||
functionDeclarationTools.push(...convertedTool.functionDeclarations)
|
||||
}
|
||||
} else if (isOpenAITool(tool)) {
|
||||
const { functionDeclarations } = convertOpenAIToolToGenAI(tool)
|
||||
if (functionDeclarations) {
|
||||
functionDeclarationTools.push(...functionDeclarations)
|
||||
} else {
|
||||
throw new Error('Failed to convert OpenAI structured tool to GenerativeAI tool')
|
||||
}
|
||||
} else {
|
||||
genAITools.push(tool as GenerativeAITool)
|
||||
}
|
||||
})
|
||||
|
||||
const genAIFunctionDeclaration = genAITools.find((t) => 'functionDeclarations' in t)
|
||||
if (genAIFunctionDeclaration) {
|
||||
return genAITools.map((tool) => {
|
||||
if (functionDeclarationTools?.length > 0 && 'functionDeclarations' in tool) {
|
||||
const newTool = {
|
||||
functionDeclarations: [...(tool.functionDeclarations || []), ...functionDeclarationTools]
|
||||
}
|
||||
// Clear the functionDeclarationTools array so it is not passed again
|
||||
functionDeclarationTools = []
|
||||
return newTool
|
||||
}
|
||||
return tool
|
||||
})
|
||||
}
|
||||
|
||||
return [
|
||||
...genAITools,
|
||||
...(functionDeclarationTools.length > 0
|
||||
? [
|
||||
{
|
||||
functionDeclarations: functionDeclarationTools
|
||||
}
|
||||
]
|
||||
: [])
|
||||
]
|
||||
}
|
||||
|
||||
function convertOpenAIToolToGenAI(tool: ToolDefinition): FunctionDeclarationsTool {
|
||||
return {
|
||||
functionDeclarations: [
|
||||
{
|
||||
name: tool.function.name,
|
||||
description: tool.function.description,
|
||||
parameters: removeAdditionalProperties(tool.function.parameters) as FunctionDeclarationSchema
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
function createToolConfig(
|
||||
genAITools: GenerativeAITool[],
|
||||
extra?: {
|
||||
toolChoice?: ToolChoice
|
||||
allowedFunctionNames?: string[]
|
||||
}
|
||||
): ToolConfig | undefined {
|
||||
if (!genAITools.length || !extra) return undefined
|
||||
|
||||
const { toolChoice, allowedFunctionNames } = extra
|
||||
|
||||
const modeMap: Record<string, FunctionCallingMode> = {
|
||||
any: FunctionCallingMode.ANY,
|
||||
auto: FunctionCallingMode.AUTO,
|
||||
none: FunctionCallingMode.NONE
|
||||
}
|
||||
|
||||
if (toolChoice && ['any', 'auto', 'none'].includes(toolChoice as string)) {
|
||||
return {
|
||||
functionCallingConfig: {
|
||||
mode: modeMap[toolChoice as keyof typeof modeMap] ?? 'MODE_UNSPECIFIED',
|
||||
allowedFunctionNames
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof toolChoice === 'string' || allowedFunctionNames) {
|
||||
return {
|
||||
functionCallingConfig: {
|
||||
mode: FunctionCallingMode.ANY,
|
||||
allowedFunctionNames: [
|
||||
...(allowedFunctionNames ?? []),
|
||||
...(toolChoice && typeof toolChoice === 'string' ? [toolChoice] : [])
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
import {
|
||||
CodeExecutionTool,
|
||||
FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,
|
||||
GoogleSearchRetrievalTool
|
||||
} from '@google/generative-ai'
|
||||
import { BindToolsInput } from '@langchain/core/language_models/chat_models'
|
||||
|
||||
export type GoogleGenerativeAIToolType =
|
||||
| BindToolsInput
|
||||
| GoogleGenerativeAIFunctionDeclarationsTool
|
||||
| CodeExecutionTool
|
||||
| GoogleSearchRetrievalTool
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
import {
|
||||
type FunctionDeclarationSchema as GenerativeAIFunctionDeclarationSchema,
|
||||
type SchemaType as FunctionDeclarationSchemaType
|
||||
} from '@google/generative-ai'
|
||||
import { InteropZodType, isInteropZodSchema } from '@langchain/core/utils/types'
|
||||
import { type JsonSchema7Type, toJsonSchema } from '@langchain/core/utils/json_schema'
|
||||
|
||||
export interface GenerativeAIJsonSchema extends Record<string, unknown> {
|
||||
properties?: Record<string, GenerativeAIJsonSchema>
|
||||
type: FunctionDeclarationSchemaType
|
||||
}
|
||||
|
||||
export interface GenerativeAIJsonSchemaDirty extends GenerativeAIJsonSchema {
|
||||
properties?: Record<string, GenerativeAIJsonSchemaDirty>
|
||||
additionalProperties?: boolean
|
||||
}
|
||||
|
||||
export function removeAdditionalProperties(obj: Record<string, any>): GenerativeAIJsonSchema {
|
||||
if (typeof obj === 'object' && obj !== null) {
|
||||
const newObj = { ...obj }
|
||||
|
||||
if ('additionalProperties' in newObj) {
|
||||
delete newObj.additionalProperties
|
||||
}
|
||||
if ('$schema' in newObj) {
|
||||
delete newObj.$schema
|
||||
}
|
||||
if ('strict' in newObj) {
|
||||
delete newObj.strict
|
||||
}
|
||||
|
||||
for (const key in newObj) {
|
||||
if (key in newObj) {
|
||||
if (Array.isArray(newObj[key])) {
|
||||
newObj[key] = newObj[key].map(removeAdditionalProperties)
|
||||
} else if (typeof newObj[key] === 'object' && newObj[key] !== null) {
|
||||
newObj[key] = removeAdditionalProperties(newObj[key])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newObj as GenerativeAIJsonSchema
|
||||
}
|
||||
|
||||
return obj as GenerativeAIJsonSchema
|
||||
}
|
||||
|
||||
export function schemaToGenerativeAIParameters<RunOutput extends Record<string, any> = Record<string, any>>(
|
||||
schema: InteropZodType<RunOutput> | JsonSchema7Type
|
||||
): GenerativeAIFunctionDeclarationSchema {
|
||||
// GenerativeAI doesn't accept either the $schema or additionalProperties
|
||||
// attributes, so we need to explicitly remove them.
|
||||
const jsonSchema = removeAdditionalProperties(isInteropZodSchema(schema) ? toJsonSchema(schema) : schema)
|
||||
const { _schema, ...rest } = jsonSchema
|
||||
|
||||
return rest as GenerativeAIFunctionDeclarationSchema
|
||||
}
|
||||
|
||||
export function jsonSchemaToGeminiParameters(schema: Record<string, any>): GenerativeAIFunctionDeclarationSchema {
|
||||
// Gemini doesn't accept either the $schema or additionalProperties
|
||||
// attributes, so we need to explicitly remove them.
|
||||
|
||||
const jsonSchema = removeAdditionalProperties(schema as GenerativeAIJsonSchemaDirty)
|
||||
const { _schema, ...rest } = jsonSchema
|
||||
|
||||
return rest as GenerativeAIFunctionDeclarationSchema
|
||||
}
|
||||
|
|
@ -1,5 +1,6 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatVertexAI as LcChatVertexAI, ChatVertexAIInput } from '@langchain/google-vertexai'
|
||||
import { ChatVertexAIInput, ChatVertexAI as LcChatVertexAI } from '@langchain/google-vertexai'
|
||||
import { buildGoogleCredentials } from '../../../src/google-utils'
|
||||
import {
|
||||
ICommonObject,
|
||||
IMultiModalOption,
|
||||
|
|
@ -9,8 +10,8 @@ import {
|
|||
INodeParams,
|
||||
IVisionChatModal
|
||||
} from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { getModels, getRegions, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
|
||||
const DEFAULT_IMAGE_MAX_TOKEN = 8192
|
||||
const DEFAULT_IMAGE_MODEL = 'gemini-1.5-flash-latest'
|
||||
|
|
@ -65,7 +66,7 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGoogleVertexAI'
|
||||
this.name = 'chatGoogleVertexAI'
|
||||
this.version = 5.1
|
||||
this.version = 5.3
|
||||
this.type = 'ChatGoogleVertexAI'
|
||||
this.icon = 'GoogleVertex.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -87,6 +88,14 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Region',
|
||||
description: 'Region to use for the model.',
|
||||
name: 'region',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRegions',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
|
|
@ -151,6 +160,16 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Thinking Budget',
|
||||
name: 'thinkingBudget',
|
||||
type: 'number',
|
||||
description: 'Number of tokens to use for thinking process (0 to disable)',
|
||||
step: 1,
|
||||
placeholder: '1024',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -159,31 +178,13 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.CHAT, 'chatGoogleVertexAI')
|
||||
},
|
||||
async listRegions(): Promise<INodeOptionsValue[]> {
|
||||
return await getRegions(MODEL_TYPE.CHAT, 'chatGoogleVertexAI')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const googleApplicationCredentialFilePath = getCredentialParam('googleApplicationCredentialFilePath', credentialData, nodeData)
|
||||
const googleApplicationCredential = getCredentialParam('googleApplicationCredential', credentialData, nodeData)
|
||||
const projectID = getCredentialParam('projectID', credentialData, nodeData)
|
||||
|
||||
const authOptions: ICommonObject = {}
|
||||
if (Object.keys(credentialData).length !== 0) {
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error('Please specify your Google Application Credential')
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error(
|
||||
'Error: More than one component has been inputted. Please use only one of the following: Google Application Credential File Path or Google Credential JSON Object'
|
||||
)
|
||||
if (googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
authOptions.keyFile = googleApplicationCredentialFilePath
|
||||
else if (!googleApplicationCredentialFilePath && googleApplicationCredential)
|
||||
authOptions.credentials = JSON.parse(googleApplicationCredential)
|
||||
|
||||
if (projectID) authOptions.projectId = projectID
|
||||
}
|
||||
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const customModelName = nodeData.inputs?.customModelName as string
|
||||
|
|
@ -192,6 +193,8 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const thinkingBudget = nodeData.inputs?.thinkingBudget as string
|
||||
const region = nodeData.inputs?.region as string
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
|
|
@ -206,11 +209,16 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
modelName: customModelName || modelName,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
if (Object.keys(authOptions).length !== 0) obj.authOptions = authOptions
|
||||
|
||||
const authOptions = await buildGoogleCredentials(nodeData, options)
|
||||
if (authOptions && Object.keys(authOptions).length !== 0) obj.authOptions = authOptions
|
||||
|
||||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (cache) obj.cache = cache
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (thinkingBudget) obj.thinkingBudget = parseInt(thinkingBudget, 10)
|
||||
if (region) obj.location = region
|
||||
|
||||
const model = new ChatVertexAI(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
|
|
|
|||
|
|
@ -41,15 +41,17 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
label: 'Model',
|
||||
name: 'model',
|
||||
type: 'string',
|
||||
description: 'If using own inference endpoint, leave this blank',
|
||||
placeholder: 'gpt2'
|
||||
description:
|
||||
'Model name (e.g., deepseek-ai/DeepSeek-V3.2-Exp:novita). If model includes provider (:) or using router endpoint, leave Endpoint blank.',
|
||||
placeholder: 'deepseek-ai/DeepSeek-V3.2-Exp:novita'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'endpoint',
|
||||
type: 'string',
|
||||
placeholder: 'https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2',
|
||||
description: 'Using your own inference endpoint',
|
||||
description:
|
||||
'Custom inference endpoint (optional). Not needed for models with providers (:) or router endpoints. Leave blank to use Inference Providers.',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
|
|
@ -103,7 +105,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder: 'AI assistant:',
|
||||
description: 'Sets the stop sequences to use. Use comma to seperate different sequences.',
|
||||
description: 'Sets the stop sequences to use. Use comma to separate different sequences.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
|
|
@ -124,6 +126,15 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
|
||||
|
||||
if (!huggingFaceApiKey) {
|
||||
console.error('[ChatHuggingFace] API key validation failed: No API key found')
|
||||
throw new Error('HuggingFace API key is required. Please configure it in the credential settings.')
|
||||
}
|
||||
|
||||
if (!huggingFaceApiKey.startsWith('hf_')) {
|
||||
console.warn('[ChatHuggingFace] API key format warning: Key does not start with "hf_"')
|
||||
}
|
||||
|
||||
const obj: Partial<HFInput> = {
|
||||
model,
|
||||
apiKey: huggingFaceApiKey
|
||||
|
|
|
|||
|
|
@ -56,9 +56,9 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpointUrl = fields?.endpointUrl
|
||||
this.includeCredentials = fields?.includeCredentials
|
||||
if (!this.apiKey) {
|
||||
if (!this.apiKey || this.apiKey.trim() === '') {
|
||||
throw new Error(
|
||||
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
|
||||
'Please set an API key for HuggingFace Hub. Either configure it in the credential settings in the UI, or set the environment variable HUGGINGFACEHUB_API_KEY.'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
@ -68,19 +68,21 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
}
|
||||
|
||||
invocationParams(options?: this['ParsedCallOptions']) {
|
||||
return {
|
||||
model: this.model,
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
return_full_text: false,
|
||||
temperature: this.temperature,
|
||||
max_new_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP,
|
||||
top_k: this.topK,
|
||||
repetition_penalty: this.frequencyPenalty
|
||||
}
|
||||
// Return parameters compatible with chatCompletion API (OpenAI-compatible format)
|
||||
const params: any = {
|
||||
temperature: this.temperature,
|
||||
max_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP
|
||||
}
|
||||
// Include optional parameters if they are defined
|
||||
if (this.topK !== undefined) {
|
||||
params.top_k = this.topK
|
||||
}
|
||||
if (this.frequencyPenalty !== undefined) {
|
||||
params.frequency_penalty = this.frequencyPenalty
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
|
|
@ -88,51 +90,109 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<GenerationChunk> {
|
||||
const hfi = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
hfi.textGenerationStream({
|
||||
...this.invocationParams(options),
|
||||
inputs: prompt
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.token.text
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token ?? '')
|
||||
|
||||
// stream is done
|
||||
if (chunk.generated_text)
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
try {
|
||||
const client = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
client.chatCompletionStream({
|
||||
model: this.model,
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
...this.invocationParams(options)
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.choices[0]?.delta?.content || ''
|
||||
if (token) {
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token)
|
||||
}
|
||||
// stream is done when finish_reason is set
|
||||
if (chunk.choices[0]?.finish_reason) {
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('[ChatHuggingFace] Error in _streamResponseChunks:', error)
|
||||
// Provide more helpful error messages
|
||||
if (error?.message?.includes('endpointUrl') || error?.message?.includes('third-party provider')) {
|
||||
throw new Error(
|
||||
`Cannot use custom endpoint with model "${this.model}" that includes a provider. Please leave the Endpoint field blank in the UI. Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
const hfi = await this._prepareHFInference()
|
||||
const args = { ...this.invocationParams(options), inputs: prompt }
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfi.textGeneration.bind(hfi), args)
|
||||
return res.generated_text
|
||||
try {
|
||||
const client = await this._prepareHFInference()
|
||||
// Use chatCompletion for chat models (v4 supports conversational models via Inference Providers)
|
||||
const args = {
|
||||
model: this.model,
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
...this.invocationParams(options)
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, client.chatCompletion.bind(client), args)
|
||||
const content = res.choices[0]?.message?.content || ''
|
||||
if (!content) {
|
||||
console.error('[ChatHuggingFace] No content in response:', JSON.stringify(res))
|
||||
throw new Error(`No content received from HuggingFace API. Response: ${JSON.stringify(res)}`)
|
||||
}
|
||||
return content
|
||||
} catch (error: any) {
|
||||
console.error('[ChatHuggingFace] Error in _call:', error.message)
|
||||
// Provide more helpful error messages
|
||||
if (error?.message?.includes('endpointUrl') || error?.message?.includes('third-party provider')) {
|
||||
throw new Error(
|
||||
`Cannot use custom endpoint with model "${this.model}" that includes a provider. Please leave the Endpoint field blank in the UI. Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
if (error?.message?.includes('Invalid username or password') || error?.message?.includes('authentication')) {
|
||||
throw new Error(
|
||||
`HuggingFace API authentication failed. Please verify your API key is correct and starts with "hf_". Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
private async _prepareHFInference() {
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hfi = new HfInference(this.apiKey, {
|
||||
includeCredentials: this.includeCredentials
|
||||
})
|
||||
return this.endpointUrl ? hfi.endpoint(this.endpointUrl) : hfi
|
||||
if (!this.apiKey || this.apiKey.trim() === '') {
|
||||
console.error('[ChatHuggingFace] API key validation failed: Empty or undefined')
|
||||
throw new Error('HuggingFace API key is required. Please configure it in the credential settings.')
|
||||
}
|
||||
|
||||
const { InferenceClient } = await HuggingFaceInference.imports()
|
||||
// Use InferenceClient for chat models (works better with Inference Providers)
|
||||
const client = new InferenceClient(this.apiKey)
|
||||
|
||||
// Don't override endpoint if model uses a provider (contains ':') or if endpoint is router-based
|
||||
// When using Inference Providers, endpoint should be left blank - InferenceClient handles routing automatically
|
||||
if (
|
||||
this.endpointUrl &&
|
||||
!this.model.includes(':') &&
|
||||
!this.endpointUrl.includes('/v1/chat/completions') &&
|
||||
!this.endpointUrl.includes('router.huggingface.co')
|
||||
) {
|
||||
return client.endpoint(this.endpointUrl)
|
||||
}
|
||||
|
||||
// Return client without endpoint override - InferenceClient will use Inference Providers automatically
|
||||
return client
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
static async imports(): Promise<{
|
||||
HfInference: typeof import('@huggingface/inference').HfInference
|
||||
InferenceClient: typeof import('@huggingface/inference').InferenceClient
|
||||
}> {
|
||||
try {
|
||||
const { HfInference } = await import('@huggingface/inference')
|
||||
return { HfInference }
|
||||
const { InferenceClient } = await import('@huggingface/inference')
|
||||
return { InferenceClient }
|
||||
} catch (e) {
|
||||
throw new Error('Please install huggingface as a dependency with, e.g. `pnpm install @huggingface/inference`')
|
||||
}
|
||||
|
|
|
|||
|
|
@ -124,7 +124,10 @@ class ChatLitellm_ChatModels implements INode {
|
|||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
if (apiKey) obj.openAIApiKey = apiKey
|
||||
if (apiKey) {
|
||||
obj.openAIApiKey = apiKey
|
||||
obj.apiKey = apiKey
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
|
||||
|
|
|
|||
|
|
@ -111,6 +111,7 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey: 'sk-',
|
||||
apiKey: 'sk-',
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
|
|
@ -118,7 +119,10 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
|
||||
if (localAIApiKey) {
|
||||
obj.openAIApiKey = localAIApiKey
|
||||
obj.apiKey = localAIApiKey
|
||||
}
|
||||
if (basePath) obj.configuration = { baseURL: basePath }
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
|
|
|
|||
|
|
@ -17,9 +17,9 @@ class ChatNvdiaNIM_ChatModels implements INode {
|
|||
|
||||
constructor() {
|
||||
this.label = 'Chat NVIDIA NIM'
|
||||
this.name = 'Chat NVIDIA NIM'
|
||||
this.name = 'chatNvidiaNIM'
|
||||
this.version = 1.1
|
||||
this.type = 'Chat NVIDIA NIM'
|
||||
this.type = 'ChatNvidiaNIM'
|
||||
this.icon = 'nvdia.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around NVIDIA NIM Inference API'
|
||||
|
|
@ -137,6 +137,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey: nvidiaNIMApiKey ?? 'sk-',
|
||||
apiKey: nvidiaNIMApiKey ?? 'sk-',
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
|
||||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenAI } from './FlowiseChatOpenAI'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { HttpsProxyAgent } from 'https-proxy-agent'
|
||||
import { OpenAI as OpenAIClient } from 'openai'
|
||||
|
||||
class ChatOpenAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -21,7 +22,7 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatOpenAI'
|
||||
this.name = 'chatOpenAI'
|
||||
this.version = 8.2
|
||||
this.version = 8.3
|
||||
this.type = 'ChatOpenAI'
|
||||
this.icon = 'openai.svg'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -176,9 +177,18 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
allowImageUploads: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Reasoning',
|
||||
description: 'Whether the model supports reasoning. Only applicable for reasoning models.',
|
||||
name: 'reasoning',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Reasoning Effort',
|
||||
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 and o3 models.',
|
||||
description: 'Constrains effort on reasoning for reasoning models',
|
||||
name: 'reasoningEffort',
|
||||
type: 'options',
|
||||
options: [
|
||||
|
|
@ -195,9 +205,34 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
name: 'high'
|
||||
}
|
||||
],
|
||||
default: 'medium',
|
||||
optional: false,
|
||||
additionalParams: true
|
||||
additionalParams: true,
|
||||
show: {
|
||||
reasoning: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Reasoning Summary',
|
||||
description: `A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process`,
|
||||
name: 'reasoningSummary',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
},
|
||||
{
|
||||
label: 'Concise',
|
||||
name: 'concise'
|
||||
},
|
||||
{
|
||||
label: 'Detailed',
|
||||
name: 'detailed'
|
||||
}
|
||||
],
|
||||
additionalParams: true,
|
||||
show: {
|
||||
reasoning: true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -223,7 +258,8 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
const basePath = nodeData.inputs?.basepath as string
|
||||
const proxyUrl = nodeData.inputs?.proxyUrl as string
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
|
||||
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.ReasoningEffort | null
|
||||
const reasoningSummary = nodeData.inputs?.reasoningSummary as 'auto' | 'concise' | 'detailed' | null
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
|
@ -240,15 +276,10 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
apiKey: openAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (modelName.includes('o3') || modelName.includes('o1')) {
|
||||
delete obj.temperature
|
||||
}
|
||||
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
|
||||
obj.reasoningEffort = reasoningEffort
|
||||
}
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
|
||||
|
|
@ -261,6 +292,19 @@ class ChatOpenAI_ChatModels implements INode {
|
|||
}
|
||||
if (strictToolCalling) obj.supportsStrictToolCalling = strictToolCalling
|
||||
|
||||
if (modelName.includes('o1') || modelName.includes('o3') || modelName.includes('gpt-5')) {
|
||||
delete obj.temperature
|
||||
delete obj.stop
|
||||
const reasoning: OpenAIClient.Reasoning = {}
|
||||
if (reasoningEffort) {
|
||||
reasoning.effort = reasoningEffort
|
||||
}
|
||||
if (reasoningSummary) {
|
||||
reasoning.summary = reasoningSummary
|
||||
}
|
||||
obj.reasoning = reasoning
|
||||
}
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
if (baseOptions) {
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
|
|||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
builtInTools: Record<string, any>[] = []
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: ChatOpenAIFields) {
|
||||
|
|
@ -15,7 +16,7 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
|
|||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.modelName = this.configuredModel
|
||||
this.model = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
|
|
@ -26,4 +27,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
|
|||
setVisionModel(): void {
|
||||
// pass
|
||||
}
|
||||
|
||||
addBuiltInTools(builtInTool: Record<string, any>): void {
|
||||
this.builtInTools.push(builtInTool)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -137,6 +137,7 @@ class ChatOpenAICustom_ChatModels implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
apiKey: openAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenRouter } from './FlowiseChatOpenRouter'
|
||||
|
||||
class ChatOpenRouter_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -23,7 +24,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
this.icon = 'openRouter.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Open Router Inference API'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -114,6 +115,40 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Allow image input. Refer to the <a href="https://docs.flowiseai.com/using-flowise/uploads#image" target="_blank">docs</a> for more details.',
|
||||
default: false,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Image Resolution',
|
||||
description: 'This parameter controls the resolution in which the model views the image.',
|
||||
name: 'imageResolution',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Low',
|
||||
name: 'low'
|
||||
},
|
||||
{
|
||||
label: 'High',
|
||||
name: 'high'
|
||||
},
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
}
|
||||
],
|
||||
default: 'low',
|
||||
optional: false,
|
||||
show: {
|
||||
allowImageUploads: true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -130,6 +165,8 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
const basePath = (nodeData.inputs?.basepath as string) || 'https://openrouter.ai/api/v1'
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openRouterApiKey = getCredentialParam('openRouterApiKey', credentialData, nodeData)
|
||||
|
|
@ -138,6 +175,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey: openRouterApiKey,
|
||||
apiKey: openRouterApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
|
|
@ -154,7 +192,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
} catch (exception) {
|
||||
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
|
||||
throw new Error("Invalid JSON in the ChatOpenRouter's BaseOptions: " + exception)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -165,7 +203,15 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false,
|
||||
imageResolution
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenRouter(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,29 @@
|
|||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
|
||||
export class ChatOpenRouter extends LangchainChatOpenAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: ChatOpenAIFields) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.model = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
// pass - OpenRouter models don't need model switching
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class ChatSambanova_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'ChatSambanova'
|
||||
this.name = 'chatSambanova'
|
||||
this.version = 1.0
|
||||
this.type = 'ChatSambanova'
|
||||
this.icon = 'sambanova.png'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Sambanova Chat Endpoints'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['sambanovaApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'Meta-Llama-3.3-70B-Instruct',
|
||||
placeholder: 'Meta-Llama-3.3-70B-Instruct'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
name: 'temperature',
|
||||
type: 'number',
|
||||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Streaming',
|
||||
name: 'streaming',
|
||||
type: 'boolean',
|
||||
default: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'BasePath',
|
||||
name: 'basepath',
|
||||
type: 'string',
|
||||
optional: true,
|
||||
default: 'htps://api.sambanova.ai/v1',
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'BaseOptions',
|
||||
name: 'baseOptions',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const basePath = nodeData.inputs?.basepath as string
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const sambanovaApiKey = getCredentialParam('sambanovaApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: ChatOpenAIFields = {
|
||||
temperature: temperature ? parseFloat(temperature) : undefined,
|
||||
model: modelName,
|
||||
apiKey: sambanovaApiKey,
|
||||
openAIApiKey: sambanovaApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
let parsedBaseOptions: any | undefined = undefined
|
||||
|
||||
if (baseOptions) {
|
||||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
} catch (exception) {
|
||||
throw new Error("Invalid JSON in the ChatSambanova's BaseOptions: " + exception)
|
||||
}
|
||||
}
|
||||
|
||||
if (basePath || parsedBaseOptions) {
|
||||
obj.configuration = {
|
||||
baseURL: basePath,
|
||||
defaultHeaders: parsedBaseOptions
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatSambanova_ChatModels }
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 12 KiB |
|
|
@ -1,7 +1,8 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatXAI, ChatXAIInput } from '@langchain/xai'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ChatXAIInput } from '@langchain/xai'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatXAI } from './FlowiseChatXAI'
|
||||
|
||||
class ChatXAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -18,7 +19,7 @@ class ChatXAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatXAI'
|
||||
this.name = 'chatXAI'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'ChatXAI'
|
||||
this.icon = 'xai.png'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -74,6 +75,15 @@ class ChatXAI_ChatModels implements INode {
|
|||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Allow image input. Refer to the <a href="https://docs.flowiseai.com/using-flowise/uploads#image" target="_blank">docs</a> for more details.',
|
||||
default: false,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -84,6 +94,7 @@ class ChatXAI_ChatModels implements INode {
|
|||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const xaiApiKey = getCredentialParam('xaiApiKey', credentialData, nodeData)
|
||||
|
|
@ -97,7 +108,15 @@ class ChatXAI_ChatModels implements INode {
|
|||
if (cache) obj.cache = cache
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
|
||||
const model = new ChatXAI(obj)
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatXAI(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,29 @@
|
|||
import { ChatXAI as LCChatXAI, ChatXAIInput } from '@langchain/xai'
|
||||
import { IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
|
||||
export class ChatXAI extends LCChatXAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: ChatXAIInput) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.model ?? ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.modelName = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
// pass
|
||||
}
|
||||
}
|
||||
|
|
@ -153,6 +153,7 @@ class Deepseek_ChatModels implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey,
|
||||
apiKey: openAIApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ class API_DocumentLoaders implements INode {
|
|||
type: 'string',
|
||||
rows: 4,
|
||||
description:
|
||||
'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, seperated by comma. Use * to omit all metadata keys execept the ones you specify in the Additional Metadata field',
|
||||
'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, separated by comma. Use * to omit all metadata keys except the ones you specify in the Additional Metadata field',
|
||||
placeholder: 'key1, key2, key3.nestedKey1',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
|
|
|
|||
|
|
@ -1,5 +1,12 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4.72492 9.35559L6.5 24L15 5.5L6.33616 7.15025C5.30261 7.34712 4.59832 8.3111 4.72492 9.35559Z" fill="#97D700" stroke="#97D700" stroke-width="2" stroke-linejoin="round"/>
|
||||
<path d="M26.6204 20.5943L26.5699 20.6161L19.5 4.5L24.0986 4.14626C25.163 4.06438 26.1041 4.83296 26.2365 5.8923L27.8137 18.5094C27.9241 19.3925 27.4377 20.2422 26.6204 20.5943Z" fill="#71C5E8" stroke="#71C5E8" stroke-width="2" stroke-linejoin="round"/>
|
||||
<path d="M17.5 10L9.5 28L23 22L17.5 10Z" fill="#FF9114" stroke="#FF9114" stroke-width="2" stroke-linejoin="round"/>
|
||||
<svg width="200" height="200" viewBox="0 0 200 200" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_267_4154)">
|
||||
<path d="M114.695 0H196.97C198.643 0 200 1.35671 200 3.03031V128.766C200 131.778 196.083 132.945 194.434 130.425L112.159 4.68953C110.841 2.67412 112.287 0 114.695 0Z" fill="#246DFF"/>
|
||||
<path d="M85.3048 0H3.0303C1.35671 0 0 1.35671 0 3.03031V128.766C0 131.778 3.91698 132.945 5.566 130.425L87.8405 4.68953C89.1593 2.67412 87.7134 0 85.3048 0Z" fill="#20A34E"/>
|
||||
<path d="M98.5909 100.668L5.12683 194.835C3.22886 196.747 4.58334 200 7.27759 200H192.8C195.483 200 196.842 196.77 194.967 194.852L102.908 100.685C101.726 99.4749 99.7824 99.4676 98.5909 100.668Z" fill="#F86606"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_267_4154">
|
||||
<rect width="200" height="200" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 653 B After Width: | Height: | Size: 827 B |
|
|
@ -2,7 +2,7 @@ import { TextLoader } from 'langchain/document_loaders/fs/text'
|
|||
import Papa from 'papaparse'
|
||||
|
||||
type CSVLoaderOptions = {
|
||||
// Return specifific column from key (string) or index (integer)
|
||||
// Return specific column from key (string) or index (integer)
|
||||
column?: string | number
|
||||
// Force separator (default: auto detect)
|
||||
separator?: string
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { NodeVM } from '@flowiseai/nodevm'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { availableDependencies, defaultAllowBuiltInDep, getVars, handleEscapeCharacters, prepareSandboxVars } from '../../../src/utils'
|
||||
import { getVars, handleEscapeCharacters, executeJavaScriptCode, createCodeExecutionSandbox } from '../../../src/utils'
|
||||
|
||||
class CustomDocumentLoader_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -106,44 +105,22 @@ class CustomDocumentLoader_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
let sandbox: any = {
|
||||
$input: input,
|
||||
util: undefined,
|
||||
Symbol: undefined,
|
||||
child_process: undefined,
|
||||
fs: undefined,
|
||||
process: undefined
|
||||
}
|
||||
sandbox['$vars'] = prepareSandboxVars(variables)
|
||||
sandbox['$flow'] = flow
|
||||
// Create additional sandbox variables
|
||||
const additionalSandbox: ICommonObject = {}
|
||||
|
||||
// Add input variables to sandbox
|
||||
if (Object.keys(inputVars).length) {
|
||||
for (const item in inputVars) {
|
||||
sandbox[`$${item}`] = inputVars[item]
|
||||
additionalSandbox[`$${item}`] = inputVars[item]
|
||||
}
|
||||
}
|
||||
|
||||
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
|
||||
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
|
||||
: defaultAllowBuiltInDep
|
||||
const externalDeps = process.env.TOOL_FUNCTION_EXTERNAL_DEP ? process.env.TOOL_FUNCTION_EXTERNAL_DEP.split(',') : []
|
||||
const deps = availableDependencies.concat(externalDeps)
|
||||
const sandbox = createCodeExecutionSandbox(input, variables, flow, additionalSandbox)
|
||||
|
||||
const nodeVMOptions = {
|
||||
console: 'inherit',
|
||||
sandbox,
|
||||
require: {
|
||||
external: { modules: deps },
|
||||
builtin: builtinDeps
|
||||
},
|
||||
eval: false,
|
||||
wasm: false,
|
||||
timeout: 10000
|
||||
} as any
|
||||
|
||||
const vm = new NodeVM(nodeVMOptions)
|
||||
try {
|
||||
const response = await vm.run(`module.exports = async function() {${javascriptFunction}}()`, __dirname)
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox, {
|
||||
libraries: ['axios']
|
||||
})
|
||||
|
||||
if (output === 'document' && Array.isArray(response)) {
|
||||
if (response.length === 0) return response
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ class File_DocumentLoaders implements INode {
|
|||
this.type = 'Document'
|
||||
this.icon = 'file.svg'
|
||||
this.category = 'Document Loaders'
|
||||
this.description = `A generic file loader that can load txt, json, csv, docx, pdf, and other files`
|
||||
this.description = `A generic file loader that can load different file types`
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = [
|
||||
{
|
||||
|
|
@ -136,9 +136,10 @@ class File_DocumentLoaders implements INode {
|
|||
|
||||
let files: string[] = []
|
||||
const fileBlobs: { blob: Blob; ext: string }[] = []
|
||||
const processRaw = options.processRaw
|
||||
|
||||
//FILE-STORAGE::["CONTRIBUTING.md","LICENSE.md","README.md"]
|
||||
const totalFiles = getOverrideFileInputs(nodeData) || fileBase64
|
||||
const totalFiles = getOverrideFileInputs(nodeData, processRaw) || fileBase64
|
||||
if (totalFiles.startsWith('FILE-STORAGE::')) {
|
||||
const fileName = totalFiles.replace('FILE-STORAGE::', '')
|
||||
if (fileName.startsWith('[') && fileName.endsWith(']')) {
|
||||
|
|
@ -214,6 +215,11 @@ class File_DocumentLoaders implements INode {
|
|||
json: (blob) => new JSONLoader(blob),
|
||||
jsonl: (blob) => new JSONLinesLoader(blob, '/' + pointerName.trim()),
|
||||
txt: (blob) => new TextLoader(blob),
|
||||
html: (blob) => new TextLoader(blob),
|
||||
css: (blob) => new TextLoader(blob),
|
||||
js: (blob) => new TextLoader(blob),
|
||||
xml: (blob) => new TextLoader(blob),
|
||||
md: (blob) => new TextLoader(blob),
|
||||
csv: (blob) => new CSVLoader(blob),
|
||||
xls: (blob) => new LoadOfSheet(blob),
|
||||
xlsx: (blob) => new LoadOfSheet(blob),
|
||||
|
|
@ -293,7 +299,7 @@ class File_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const getOverrideFileInputs = (nodeData: INodeData) => {
|
||||
const getOverrideFileInputs = (nodeData: INodeData, processRaw: boolean) => {
|
||||
const txtFileBase64 = nodeData.inputs?.txtFile as string
|
||||
const pdfFileBase64 = nodeData.inputs?.pdfFile as string
|
||||
const jsonFileBase64 = nodeData.inputs?.jsonFile as string
|
||||
|
|
@ -301,6 +307,8 @@ const getOverrideFileInputs = (nodeData: INodeData) => {
|
|||
const jsonlinesFileBase64 = nodeData.inputs?.jsonlinesFile as string
|
||||
const docxFileBase64 = nodeData.inputs?.docxFile as string
|
||||
const yamlFileBase64 = nodeData.inputs?.yamlFile as string
|
||||
const excelFileBase64 = nodeData.inputs?.excelFile as string
|
||||
const powerpointFileBase64 = nodeData.inputs?.powerpointFile as string
|
||||
|
||||
const removePrefix = (storageFile: string): string[] => {
|
||||
const fileName = storageFile.replace('FILE-STORAGE::', '')
|
||||
|
|
@ -333,6 +341,16 @@ const getOverrideFileInputs = (nodeData: INodeData) => {
|
|||
if (yamlFileBase64) {
|
||||
files.push(...removePrefix(yamlFileBase64))
|
||||
}
|
||||
if (excelFileBase64) {
|
||||
files.push(...removePrefix(excelFileBase64))
|
||||
}
|
||||
if (powerpointFileBase64) {
|
||||
files.push(...removePrefix(powerpointFileBase64))
|
||||
}
|
||||
|
||||
if (processRaw) {
|
||||
return files.length ? JSON.stringify(files) : ''
|
||||
}
|
||||
|
||||
return files.length ? `FILE-STORAGE::${JSON.stringify(files)}` : ''
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'
|
|||
import { LoadOfSheet } from '../MicrosoftExcel/ExcelLoader'
|
||||
import { PowerpointLoader } from '../MicrosoftPowerpoint/PowerpointLoader'
|
||||
import { handleEscapeCharacters } from '../../../src/utils'
|
||||
import { isPathTraversal } from '../../../src/validator'
|
||||
|
||||
class Folder_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -125,6 +126,14 @@ class Folder_DocumentLoaders implements INode {
|
|||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
if (!folderPath) {
|
||||
throw new Error('Folder path is required')
|
||||
}
|
||||
|
||||
if (isPathTraversal(folderPath)) {
|
||||
throw new Error('Invalid folder path: Path traversal detected. Please provide a safe folder path.')
|
||||
}
|
||||
|
||||
let omitMetadataKeys: string[] = []
|
||||
if (_omitMetadataKeys) {
|
||||
omitMetadataKeys = _omitMetadataKeys.split(',').map((key) => key.trim())
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class Json_DocumentLoaders implements INode {
|
|||
constructor() {
|
||||
this.label = 'Json File'
|
||||
this.name = 'jsonFile'
|
||||
this.version = 3.0
|
||||
this.version = 3.1
|
||||
this.type = 'Document'
|
||||
this.icon = 'json.svg'
|
||||
this.category = 'Document Loaders'
|
||||
|
|
@ -66,6 +66,14 @@ class Json_DocumentLoaders implements INode {
|
|||
type: 'TextSplitter',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Separate by JSON Object (JSON Array)',
|
||||
name: 'separateByObject',
|
||||
type: 'boolean',
|
||||
description: 'If enabled and the file is a JSON Array, each JSON object will be extracted as a chunk',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Pointers Extraction (separated by commas)',
|
||||
name: 'pointersName',
|
||||
|
|
@ -73,7 +81,10 @@ class Json_DocumentLoaders implements INode {
|
|||
description:
|
||||
'Ex: { "key": "value" }, Pointer Extraction = "key", "value" will be extracted as pageContent of the chunk. Use comma to separate multiple pointers',
|
||||
placeholder: 'key1, key2',
|
||||
optional: true
|
||||
optional: true,
|
||||
hide: {
|
||||
separateByObject: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
|
|
@ -122,6 +133,7 @@ class Json_DocumentLoaders implements INode {
|
|||
const pointersName = nodeData.inputs?.pointersName as string
|
||||
const metadata = nodeData.inputs?.metadata
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const separateByObject = nodeData.inputs?.separateByObject as boolean
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
let omitMetadataKeys: string[] = []
|
||||
|
|
@ -153,7 +165,7 @@ class Json_DocumentLoaders implements INode {
|
|||
if (!file) continue
|
||||
const fileData = await getFileFromStorage(file, orgId, chatflowid)
|
||||
const blob = new Blob([fileData])
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata)
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata, separateByObject)
|
||||
|
||||
if (textSplitter) {
|
||||
let splittedDocs = await loader.load()
|
||||
|
|
@ -176,7 +188,7 @@ class Json_DocumentLoaders implements INode {
|
|||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
const blob = new Blob([bf])
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata)
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata, separateByObject)
|
||||
|
||||
if (textSplitter) {
|
||||
let splittedDocs = await loader.load()
|
||||
|
|
@ -306,13 +318,20 @@ class TextLoader extends BaseDocumentLoader {
|
|||
class JSONLoader extends TextLoader {
|
||||
public pointers: string[]
|
||||
private metadataMapping: Record<string, string>
|
||||
private separateByObject: boolean
|
||||
|
||||
constructor(filePathOrBlob: string | Blob, pointers: string | string[] = [], metadataMapping: Record<string, string> = {}) {
|
||||
constructor(
|
||||
filePathOrBlob: string | Blob,
|
||||
pointers: string | string[] = [],
|
||||
metadataMapping: Record<string, string> = {},
|
||||
separateByObject: boolean = false
|
||||
) {
|
||||
super(filePathOrBlob)
|
||||
this.pointers = Array.isArray(pointers) ? pointers : [pointers]
|
||||
if (metadataMapping) {
|
||||
this.metadataMapping = typeof metadataMapping === 'object' ? metadataMapping : JSON.parse(metadataMapping)
|
||||
}
|
||||
this.separateByObject = separateByObject
|
||||
}
|
||||
|
||||
protected async parse(raw: string): Promise<Document[]> {
|
||||
|
|
@ -323,14 +342,24 @@ class JSONLoader extends TextLoader {
|
|||
const jsonArray = Array.isArray(json) ? json : [json]
|
||||
|
||||
for (const item of jsonArray) {
|
||||
const content = this.extractContent(item)
|
||||
const metadata = this.extractMetadata(item)
|
||||
|
||||
for (const pageContent of content) {
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
if (this.separateByObject) {
|
||||
if (typeof item === 'object' && item !== null && !Array.isArray(item)) {
|
||||
const metadata = this.extractMetadata(item)
|
||||
const pageContent = this.formatObjectAsKeyValue(item)
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const content = this.extractContent(item)
|
||||
const metadata = this.extractMetadata(item)
|
||||
for (const pageContent of content) {
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -370,6 +399,30 @@ class JSONLoader extends TextLoader {
|
|||
return metadata
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a JSON object as readable key-value pairs
|
||||
*/
|
||||
private formatObjectAsKeyValue(obj: any, prefix: string = ''): string {
|
||||
const lines: string[] = []
|
||||
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const fullKey = prefix ? `${prefix}.${key}` : key
|
||||
|
||||
if (value === null || value === undefined) {
|
||||
lines.push(`${fullKey}: ${value}`)
|
||||
} else if (Array.isArray(value)) {
|
||||
lines.push(`${fullKey}: ${JSON.stringify(value)}`)
|
||||
} else if (typeof value === 'object') {
|
||||
// Recursively format nested objects
|
||||
lines.push(this.formatObjectAsKeyValue(value, fullKey))
|
||||
} else {
|
||||
lines.push(`${fullKey}: ${value}`)
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
/**
|
||||
* If JSON pointers are specified, return all strings below any of them
|
||||
* and exclude all other nodes expect if they match a JSON pointer.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,336 @@
|
|||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { DocumentInterface } from '@langchain/core/documents'
|
||||
import { BaseDocumentLoader } from 'langchain/document_loaders/base'
|
||||
import { INode, INodeData, INodeParams, ICommonObject, INodeOutputsValue } from '../../../src/Interface'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils'
|
||||
import axios, { AxiosResponse } from 'axios'
|
||||
|
||||
interface OxylabsDocument extends DocumentInterface {}
|
||||
|
||||
interface OxylabsResponse {
|
||||
results: Result[]
|
||||
job: Job
|
||||
}
|
||||
|
||||
interface Result {
|
||||
content: any
|
||||
created_at: string
|
||||
updated_at: string
|
||||
page: number
|
||||
url: string
|
||||
job_id: string
|
||||
is_render_forced: boolean
|
||||
status_code: number
|
||||
parser_type: string
|
||||
}
|
||||
|
||||
interface Job {
|
||||
callback_url: string
|
||||
client_id: number
|
||||
context: any
|
||||
created_at: string
|
||||
domain: string
|
||||
geo_location: any
|
||||
id: string
|
||||
limit: number
|
||||
locale: any
|
||||
pages: number
|
||||
parse: boolean
|
||||
parser_type: any
|
||||
parser_preset: any
|
||||
parsing_instructions: any
|
||||
browser_instructions: any
|
||||
render: any
|
||||
url: any
|
||||
query: string
|
||||
source: string
|
||||
start_page: number
|
||||
status: string
|
||||
storage_type: any
|
||||
storage_url: any
|
||||
subdomain: string
|
||||
content_encoding: string
|
||||
updated_at: string
|
||||
user_agent_type: string
|
||||
is_premium_domain: boolean
|
||||
}
|
||||
|
||||
interface OxylabsLoaderParameters {
|
||||
username: string
|
||||
password: string
|
||||
query: string
|
||||
source: string
|
||||
geo_location: string
|
||||
render: boolean
|
||||
parse: boolean
|
||||
user_agent_type: string
|
||||
}
|
||||
|
||||
export class OxylabsLoader extends BaseDocumentLoader {
|
||||
private params: OxylabsLoaderParameters
|
||||
|
||||
constructor(loaderParams: OxylabsLoaderParameters) {
|
||||
super()
|
||||
this.params = loaderParams
|
||||
}
|
||||
|
||||
private async sendAPIRequest<R>(params: any): Promise<AxiosResponse<R, any>> {
|
||||
params = Object.fromEntries(Object.entries(params).filter(([_, value]) => value !== null && value !== '' && value !== undefined))
|
||||
|
||||
const auth = Buffer.from(`${this.params.username}:${this.params.password}`).toString('base64')
|
||||
|
||||
const response = await axios.post<R>('https://realtime.oxylabs.io/v1/queries', params, {
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-oxylabs-sdk': 'oxylabs-integration-flowise/1.0.0 (1.0.0; 64bit)',
|
||||
Authorization: `Basic ${auth}`
|
||||
}
|
||||
})
|
||||
|
||||
if (response.status >= 400) {
|
||||
throw new Error(`Oxylabs: Failed to call Oxylabs API: ${response.status}`)
|
||||
}
|
||||
|
||||
return response
|
||||
}
|
||||
|
||||
public async load(): Promise<DocumentInterface[]> {
|
||||
let isUrlSource = this.params.source == 'universal'
|
||||
|
||||
const params = {
|
||||
source: this.params.source,
|
||||
geo_location: this.params.geo_location,
|
||||
render: this.params.render ? 'html' : null,
|
||||
parse: this.params.parse,
|
||||
user_agent_type: this.params.user_agent_type,
|
||||
markdown: !this.params.parse,
|
||||
url: isUrlSource ? this.params.query : null,
|
||||
query: !isUrlSource ? this.params.query : null
|
||||
}
|
||||
|
||||
const response = await this.sendAPIRequest<OxylabsResponse>(params)
|
||||
|
||||
const docs: OxylabsDocument[] = response.data.results.map((result, index) => {
|
||||
const content = typeof result.content === 'string' ? result.content : JSON.stringify(result.content)
|
||||
return {
|
||||
id: `${response.data.job.id.toString()}-${index}`,
|
||||
pageContent: content,
|
||||
metadata: {}
|
||||
}
|
||||
})
|
||||
|
||||
return docs
|
||||
}
|
||||
}
|
||||
|
||||
class Oxylabs_DocumentLoaders implements INode {
|
||||
label: string
|
||||
name: string
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
version: number
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Oxylabs'
|
||||
this.name = 'oxylabs'
|
||||
this.type = 'Document'
|
||||
this.icon = 'oxylabs.svg'
|
||||
this.version = 1.0
|
||||
this.category = 'Document Loaders'
|
||||
this.description = 'Extract data from URLs using Oxylabs'
|
||||
this.baseClasses = [this.type]
|
||||
this.credential = {
|
||||
label: 'Oxylabs API',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['oxylabsApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Text Splitter',
|
||||
name: 'textSplitter',
|
||||
type: 'TextSplitter',
|
||||
optional: false
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Website URL of query keyword.'
|
||||
},
|
||||
{
|
||||
label: 'Source',
|
||||
name: 'source',
|
||||
type: 'options',
|
||||
description: 'Target website to scrape.',
|
||||
options: [
|
||||
{
|
||||
label: 'Universal',
|
||||
name: 'universal'
|
||||
},
|
||||
{
|
||||
label: 'Google Search',
|
||||
name: 'google_search'
|
||||
},
|
||||
{
|
||||
label: 'Amazon Product',
|
||||
name: 'amazon_product'
|
||||
},
|
||||
{
|
||||
label: 'Amazon Search',
|
||||
name: 'amazon_search'
|
||||
}
|
||||
],
|
||||
default: 'universal'
|
||||
},
|
||||
{
|
||||
label: 'Geolocation',
|
||||
name: 'geo_location',
|
||||
type: 'string',
|
||||
description: "Sets the proxy's geo location to retrieve data. Check Oxylabs documentation for more details.",
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Render',
|
||||
name: 'render',
|
||||
type: 'boolean',
|
||||
description: 'Enables JavaScript rendering when set to true.',
|
||||
optional: true,
|
||||
default: false
|
||||
},
|
||||
{
|
||||
label: 'Parse',
|
||||
name: 'parse',
|
||||
type: 'boolean',
|
||||
description:
|
||||
"Returns parsed data when set to true, as long as a dedicated parser exists for the submitted URL's page type.",
|
||||
optional: true,
|
||||
default: false
|
||||
},
|
||||
{
|
||||
label: 'User Agent Type',
|
||||
name: 'user_agent_type',
|
||||
type: 'options',
|
||||
description: 'Device type and browser.',
|
||||
options: [
|
||||
{
|
||||
label: 'Desktop',
|
||||
name: 'desktop'
|
||||
},
|
||||
{
|
||||
label: 'Desktop Chrome',
|
||||
name: 'desktop_chrome'
|
||||
},
|
||||
{
|
||||
label: 'Desktop Edge',
|
||||
name: 'desktop_edge'
|
||||
},
|
||||
{
|
||||
label: 'Desktop Firefox',
|
||||
name: 'desktop_firefox'
|
||||
},
|
||||
{
|
||||
label: 'Desktop Opera',
|
||||
name: 'desktop_opera'
|
||||
},
|
||||
{
|
||||
label: 'Desktop Safari',
|
||||
name: 'desktop_safari'
|
||||
},
|
||||
{
|
||||
label: 'Mobile',
|
||||
name: 'mobile'
|
||||
},
|
||||
{
|
||||
label: 'Mobile Android',
|
||||
name: 'mobile_android'
|
||||
},
|
||||
{
|
||||
label: 'Mobile iOS',
|
||||
name: 'mobile_ios'
|
||||
},
|
||||
{
|
||||
label: 'Tablet',
|
||||
name: 'tablet'
|
||||
},
|
||||
{
|
||||
label: 'Tablet Android',
|
||||
name: 'tablet_android'
|
||||
},
|
||||
{
|
||||
label: 'Tablet iOS',
|
||||
name: 'tablet_ios'
|
||||
}
|
||||
],
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
description: 'Array of document objects containing metadata and pageContent',
|
||||
baseClasses: [...this.baseClasses, 'json']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
description: 'Concatenated string from pageContent of documents',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const query = nodeData.inputs?.query as string
|
||||
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
|
||||
const source = nodeData.inputs?.source as string
|
||||
const geo_location = nodeData.inputs?.geo_location as string
|
||||
const render = nodeData.inputs?.render as boolean
|
||||
const parse = nodeData.inputs?.parse as boolean
|
||||
const user_agent_type = nodeData.inputs?.user_agent_type as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const username = getCredentialParam('username', credentialData, nodeData)
|
||||
const password = getCredentialParam('password', credentialData, nodeData)
|
||||
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
const input: OxylabsLoaderParameters = {
|
||||
username,
|
||||
password,
|
||||
query,
|
||||
source,
|
||||
geo_location,
|
||||
render,
|
||||
parse,
|
||||
user_agent_type
|
||||
}
|
||||
|
||||
const loader = new OxylabsLoader(input)
|
||||
|
||||
let docs: OxylabsDocument[] = await loader.load()
|
||||
|
||||
if (textSplitter && docs.length > 0) {
|
||||
docs = await textSplitter.splitDocuments(docs)
|
||||
}
|
||||
|
||||
if (output === 'document') {
|
||||
return docs
|
||||
} else {
|
||||
let finaltext = ''
|
||||
for (const doc of docs) {
|
||||
finaltext += `${doc.pageContent}\n`
|
||||
}
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Oxylabs_DocumentLoaders }
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
<svg width="120" height="120" viewBox="0 0 120 120" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path opacity="0.4" fill-rule="evenodd" clip-rule="evenodd" d="M75.136 59.9998L93.145 40.7848C97.129 36.5408 96.918 29.8708 92.674 25.8868C92.658 25.8728 92.644 25.8578 92.628 25.8438C88.342 21.8598 81.624 22.0898 77.623 26.3578L52.855 52.7878C49.049 56.8438 49.049 63.1578 52.855 67.2128L77.623 93.6418C81.623 97.9088 88.343 98.1418 92.628 94.1568C96.896 90.1978 97.147 83.5288 93.188 79.2618C93.173 79.2468 93.159 79.2308 93.145 79.2158L75.136 59.9998Z" fill="#23E6A8"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M43.8646 59.9998L25.8546 40.7848C21.8716 36.5408 22.0826 29.8708 26.3266 25.8868C26.3416 25.8728 26.3566 25.8578 26.3726 25.8438C30.6586 21.8598 37.3766 22.0898 41.3776 26.3578L66.1446 52.7878C69.9506 56.8438 69.9506 63.1578 66.1446 67.2128L41.3776 93.6418C37.3776 97.9088 30.6576 98.1418 26.3726 94.1568C22.1046 90.1978 21.8536 83.5288 25.8126 79.2618C25.8266 79.2468 25.8416 79.2308 25.8546 79.2158L43.8646 59.9998Z" fill="#23E6A8"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.0 KiB |
|
|
@ -1,14 +1,15 @@
|
|||
import { omit } from 'lodash'
|
||||
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import {
|
||||
Browser,
|
||||
Page,
|
||||
PlaywrightWebBaseLoader,
|
||||
PlaywrightWebBaseLoaderOptions
|
||||
} from '@langchain/community/document_loaders/web/playwright'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { test } from 'linkifyjs'
|
||||
import { omit } from 'lodash'
|
||||
import { handleEscapeCharacters, INodeOutputsValue, webCrawl, xmlScrape } from '../../../src'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class Playwright_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -113,6 +114,14 @@ class Playwright_DocumentLoaders implements INode {
|
|||
additionalParams: true,
|
||||
description: 'CSS selectors like .div or #div'
|
||||
},
|
||||
{
|
||||
label: 'CSS Selector (Optional)',
|
||||
name: 'cssSelector',
|
||||
type: 'string',
|
||||
description: 'Only content inside this selector will be extracted. Leave empty to use the entire page body.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
name: 'metadata',
|
||||
|
|
@ -155,8 +164,14 @@ class Playwright_DocumentLoaders implements INode {
|
|||
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
|
||||
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
|
||||
let limit = parseInt(nodeData.inputs?.limit as string)
|
||||
let waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as 'load' | 'domcontentloaded' | 'networkidle' | 'commit' | undefined
|
||||
let waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as
|
||||
| 'load'
|
||||
| 'domcontentloaded'
|
||||
| 'networkidle'
|
||||
| 'commit'
|
||||
| undefined
|
||||
const waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const cssSelector = nodeData.inputs?.cssSelector as string
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
const orgId = options.orgId
|
||||
|
|
@ -172,13 +187,17 @@ class Playwright_DocumentLoaders implements INode {
|
|||
throw new Error('Invalid URL')
|
||||
}
|
||||
|
||||
async function playwrightLoader(url: string): Promise<any> {
|
||||
async function playwrightLoader(url: string): Promise<Document[] | undefined> {
|
||||
try {
|
||||
let docs = []
|
||||
|
||||
const executablePath = process.env.PLAYWRIGHT_EXECUTABLE_PATH
|
||||
|
||||
const config: PlaywrightWebBaseLoaderOptions = {
|
||||
launchOptions: {
|
||||
args: ['--no-sandbox'],
|
||||
headless: true
|
||||
headless: true,
|
||||
executablePath: executablePath
|
||||
}
|
||||
}
|
||||
if (waitUntilGoToOption) {
|
||||
|
|
@ -186,12 +205,22 @@ class Playwright_DocumentLoaders implements INode {
|
|||
waitUntil: waitUntilGoToOption
|
||||
}
|
||||
}
|
||||
if (waitForSelector) {
|
||||
if (cssSelector || waitForSelector) {
|
||||
config['evaluate'] = async (page: Page, _: Browser): Promise<string> => {
|
||||
await page.waitForSelector(waitForSelector)
|
||||
if (waitForSelector) {
|
||||
await page.waitForSelector(waitForSelector)
|
||||
}
|
||||
|
||||
const result = await page.evaluate(() => document.body.innerHTML)
|
||||
return result
|
||||
if (cssSelector) {
|
||||
const selectorHandle = await page.$(cssSelector)
|
||||
const result = await page.evaluate(
|
||||
(htmlSelection) => htmlSelection?.innerHTML ?? document.body.innerHTML,
|
||||
selectorHandle
|
||||
)
|
||||
return result
|
||||
} else {
|
||||
return await page.evaluate(() => document.body.innerHTML)
|
||||
}
|
||||
}
|
||||
}
|
||||
const loader = new PlaywrightWebBaseLoader(url, config)
|
||||
|
|
@ -208,7 +237,7 @@ class Playwright_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
let docs: IDocument[] = []
|
||||
let docs: Document[] = []
|
||||
if (relativeLinksMethod) {
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Start PlaywrightWebBaseLoader ${relativeLinksMethod}`)
|
||||
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
|
||||
|
|
@ -225,7 +254,10 @@ class Playwright_DocumentLoaders implements INode {
|
|||
options.logger.info(`[${orgId}]: PlaywrightWebBaseLoader pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
|
||||
if (!pages || pages.length === 0) throw new Error('No relative links found')
|
||||
for (const page of pages) {
|
||||
docs.push(...(await playwrightLoader(page)))
|
||||
const result = await playwrightLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
}
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Finish PlaywrightWebBaseLoader ${relativeLinksMethod}`)
|
||||
} else if (selectedLinks && selectedLinks.length > 0) {
|
||||
|
|
@ -234,10 +266,16 @@ class Playwright_DocumentLoaders implements INode {
|
|||
`[${orgId}]: PlaywrightWebBaseLoader pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`
|
||||
)
|
||||
for (const page of selectedLinks.slice(0, limit)) {
|
||||
docs.push(...(await playwrightLoader(page)))
|
||||
const result = await playwrightLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
docs = await playwrightLoader(url)
|
||||
const result = await playwrightLoader(url)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
import { omit } from 'lodash'
|
||||
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { Browser, Page, PuppeteerWebBaseLoader, PuppeteerWebBaseLoaderOptions } from '@langchain/community/document_loaders/web/puppeteer'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { TextSplitter } from 'langchain/text_splitter'
|
||||
import { test } from 'linkifyjs'
|
||||
import { handleEscapeCharacters, INodeOutputsValue, webCrawl, xmlScrape } from '../../../src'
|
||||
import { omit } from 'lodash'
|
||||
import { PuppeteerLifeCycleEvent } from 'puppeteer'
|
||||
import { handleEscapeCharacters, INodeOutputsValue, webCrawl, xmlScrape } from '../../../src'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class Puppeteer_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -109,6 +110,14 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
additionalParams: true,
|
||||
description: 'CSS selectors like .div or #div'
|
||||
},
|
||||
{
|
||||
label: 'CSS Selector (Optional)',
|
||||
name: 'cssSelector',
|
||||
type: 'string',
|
||||
description: 'Only content inside this selector will be extracted. Leave empty to use the entire page body.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
name: 'metadata',
|
||||
|
|
@ -151,8 +160,9 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
const relativeLinksMethod = nodeData.inputs?.relativeLinksMethod as string
|
||||
const selectedLinks = nodeData.inputs?.selectedLinks as string[]
|
||||
let limit = parseInt(nodeData.inputs?.limit as string)
|
||||
let waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as PuppeteerLifeCycleEvent
|
||||
let waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const waitUntilGoToOption = nodeData.inputs?.waitUntilGoToOption as PuppeteerLifeCycleEvent
|
||||
const waitForSelector = nodeData.inputs?.waitForSelector as string
|
||||
const cssSelector = nodeData.inputs?.cssSelector as string
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
const orgId = options.orgId
|
||||
|
|
@ -168,13 +178,17 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
throw new Error('Invalid URL')
|
||||
}
|
||||
|
||||
async function puppeteerLoader(url: string): Promise<any> {
|
||||
async function puppeteerLoader(url: string): Promise<Document[] | undefined> {
|
||||
try {
|
||||
let docs = []
|
||||
let docs: Document[] = []
|
||||
|
||||
const executablePath = process.env.PUPPETEER_EXECUTABLE_PATH
|
||||
|
||||
const config: PuppeteerWebBaseLoaderOptions = {
|
||||
launchOptions: {
|
||||
args: ['--no-sandbox'],
|
||||
headless: 'new'
|
||||
headless: 'new',
|
||||
executablePath: executablePath
|
||||
}
|
||||
}
|
||||
if (waitUntilGoToOption) {
|
||||
|
|
@ -182,12 +196,22 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
waitUntil: waitUntilGoToOption
|
||||
}
|
||||
}
|
||||
if (waitForSelector) {
|
||||
if (cssSelector || waitForSelector) {
|
||||
config['evaluate'] = async (page: Page, _: Browser): Promise<string> => {
|
||||
await page.waitForSelector(waitForSelector)
|
||||
if (waitForSelector) {
|
||||
await page.waitForSelector(waitForSelector)
|
||||
}
|
||||
|
||||
const result = await page.evaluate(() => document.body.innerHTML)
|
||||
return result
|
||||
if (cssSelector) {
|
||||
const selectorHandle = await page.$(cssSelector)
|
||||
const result = await page.evaluate(
|
||||
(htmlSelection) => htmlSelection?.innerHTML ?? document.body.innerHTML,
|
||||
selectorHandle
|
||||
)
|
||||
return result
|
||||
} else {
|
||||
return await page.evaluate(() => document.body.innerHTML)
|
||||
}
|
||||
}
|
||||
}
|
||||
const loader = new PuppeteerWebBaseLoader(url, config)
|
||||
|
|
@ -204,7 +228,7 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
let docs: IDocument[] = []
|
||||
let docs: Document[] = []
|
||||
if (relativeLinksMethod) {
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Start PuppeteerWebBaseLoader ${relativeLinksMethod}`)
|
||||
// if limit is 0 we don't want it to default to 10 so we check explicitly for null or undefined
|
||||
|
|
@ -221,7 +245,10 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
options.logger.info(`[${orgId}]: PuppeteerWebBaseLoader pages: ${JSON.stringify(pages)}, length: ${pages.length}`)
|
||||
if (!pages || pages.length === 0) throw new Error('No relative links found')
|
||||
for (const page of pages) {
|
||||
docs.push(...(await puppeteerLoader(page)))
|
||||
const result = await puppeteerLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
}
|
||||
if (process.env.DEBUG === 'true') options.logger.info(`[${orgId}]: Finish PuppeteerWebBaseLoader ${relativeLinksMethod}`)
|
||||
} else if (selectedLinks && selectedLinks.length > 0) {
|
||||
|
|
@ -230,10 +257,16 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
`[${orgId}]: PuppeteerWebBaseLoader pages: ${JSON.stringify(selectedLinks)}, length: ${selectedLinks.length}`
|
||||
)
|
||||
for (const page of selectedLinks.slice(0, limit)) {
|
||||
docs.push(...(await puppeteerLoader(page)))
|
||||
const result = await puppeteerLoader(page)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
docs = await puppeteerLoader(url)
|
||||
const result = await puppeteerLoader(url)
|
||||
if (result) {
|
||||
docs.push(...result)
|
||||
}
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -27,8 +27,6 @@ type Element = {
|
|||
}
|
||||
|
||||
export class UnstructuredLoader extends BaseDocumentLoader {
|
||||
public filePath: string
|
||||
|
||||
private apiUrl = process.env.UNSTRUCTURED_API_URL || 'https://api.unstructuredapp.io/general/v0/general'
|
||||
|
||||
private apiKey: string | undefined = process.env.UNSTRUCTURED_API_KEY
|
||||
|
|
@ -138,7 +136,7 @@ export class UnstructuredLoader extends BaseDocumentLoader {
|
|||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to partition file ${this.filePath} with error ${response.status} and message ${await response.text()}`)
|
||||
throw new Error(`Failed to partition file with error ${response.status} and message ${await response.text()}`)
|
||||
}
|
||||
|
||||
const elements = await response.json()
|
||||
|
|
|
|||
|
|
@ -4,8 +4,7 @@ import {
|
|||
UnstructuredLoaderOptions,
|
||||
UnstructuredLoaderStrategy,
|
||||
SkipInferTableTypes,
|
||||
HiResModelName,
|
||||
UnstructuredLoader as LCUnstructuredLoader
|
||||
HiResModelName
|
||||
} from '@langchain/community/document_loaders/fs/unstructured'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { getFileFromStorage, INodeOutputsValue } from '../../../src'
|
||||
|
|
@ -41,17 +40,6 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
/** Deprecated
|
||||
{
|
||||
label: 'File Path',
|
||||
name: 'filePath',
|
||||
type: 'string',
|
||||
placeholder: '',
|
||||
optional: true,
|
||||
warning:
|
||||
'Use the File Upload instead of File path. If file is uploaded, this path is ignored. Path will be deprecated in future releases.'
|
||||
},
|
||||
*/
|
||||
{
|
||||
label: 'Files Upload',
|
||||
name: 'fileObject',
|
||||
|
|
@ -452,7 +440,6 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const filePath = nodeData.inputs?.filePath as string
|
||||
const unstructuredAPIUrl = nodeData.inputs?.unstructuredAPIUrl as string
|
||||
const strategy = nodeData.inputs?.strategy as UnstructuredLoaderStrategy
|
||||
const encoding = nodeData.inputs?.encoding as string
|
||||
|
|
@ -557,12 +544,8 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
docs.push(...loaderDocs)
|
||||
}
|
||||
}
|
||||
} else if (filePath) {
|
||||
const loader = new LCUnstructuredLoader(filePath, obj)
|
||||
const loaderDocs = await loader.load()
|
||||
docs.push(...loaderDocs)
|
||||
} else {
|
||||
throw new Error('File path or File upload is required')
|
||||
throw new Error('File upload is required')
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
/*
|
||||
* Uncomment this if you want to use the UnstructuredFolder to load a folder from the file system
|
||||
|
||||
import { omit } from 'lodash'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
|
|
@ -516,3 +519,4 @@ class UnstructuredFolder_DocumentLoaders implements INode {
|
|||
}
|
||||
|
||||
module.exports = { nodeClass: UnstructuredFolder_DocumentLoaders }
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ class AWSBedrockEmbedding_Embeddings implements INode {
|
|||
{
|
||||
label: 'Max AWS API retries',
|
||||
name: 'maxRetries',
|
||||
description: 'This will limit the nubmer of AWS API for Titan model embeddings call retries. Used to avoid throttling.',
|
||||
description: 'This will limit the number of AWS API for Titan model embeddings call retries. Used to avoid throttling.',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
default: 5,
|
||||
|
|
|
|||
|
|
@ -4,6 +4,25 @@ import { GoogleGenerativeAIEmbeddings, GoogleGenerativeAIEmbeddingsParams } from
|
|||
import { TaskType } from '@google/generative-ai'
|
||||
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
|
||||
|
||||
class GoogleGenerativeAIEmbeddingsWithStripNewLines extends GoogleGenerativeAIEmbeddings {
|
||||
stripNewLines: boolean
|
||||
|
||||
constructor(params: GoogleGenerativeAIEmbeddingsParams & { stripNewLines?: boolean }) {
|
||||
super(params)
|
||||
this.stripNewLines = params.stripNewLines ?? false
|
||||
}
|
||||
|
||||
async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
const processedTexts = this.stripNewLines ? texts.map((text) => text.replace(/\n/g, ' ')) : texts
|
||||
return super.embedDocuments(processedTexts)
|
||||
}
|
||||
|
||||
async embedQuery(text: string): Promise<number[]> {
|
||||
const processedText = this.stripNewLines ? text.replace(/\n/g, ' ') : text
|
||||
return super.embedQuery(processedText)
|
||||
}
|
||||
}
|
||||
|
||||
class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -24,7 +43,7 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
this.icon = 'GoogleGemini.svg'
|
||||
this.category = 'Embeddings'
|
||||
this.description = 'Google Generative API to generate embeddings for a given text'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(GoogleGenerativeAIEmbeddings)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(GoogleGenerativeAIEmbeddingsWithStripNewLines)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -55,6 +74,14 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
{ label: 'CLUSTERING', name: 'CLUSTERING' }
|
||||
],
|
||||
default: 'TASK_TYPE_UNSPECIFIED'
|
||||
},
|
||||
{
|
||||
label: 'Strip New Lines',
|
||||
name: 'stripNewLines',
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
description: 'Remove new lines from input text before embedding to reduce token count'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -71,6 +98,7 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
const modelName = nodeData.inputs?.modelName as string
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('googleGenerativeAPIKey', credentialData, nodeData)
|
||||
const stripNewLines = nodeData.inputs?.stripNewLines as boolean
|
||||
|
||||
let taskType: TaskType
|
||||
switch (nodeData.inputs?.tasktype as string) {
|
||||
|
|
@ -93,13 +121,14 @@ class GoogleGenerativeAIEmbedding_Embeddings implements INode {
|
|||
taskType = TaskType.TASK_TYPE_UNSPECIFIED
|
||||
break
|
||||
}
|
||||
const obj: GoogleGenerativeAIEmbeddingsParams = {
|
||||
const obj: GoogleGenerativeAIEmbeddingsParams & { stripNewLines?: boolean } = {
|
||||
apiKey: apiKey,
|
||||
modelName: modelName,
|
||||
taskType: taskType
|
||||
taskType: taskType,
|
||||
stripNewLines
|
||||
}
|
||||
|
||||
const model = new GoogleGenerativeAIEmbeddings(obj)
|
||||
const model = new GoogleGenerativeAIEmbeddingsWithStripNewLines(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,27 @@
|
|||
import { VertexAIEmbeddings, GoogleVertexAIEmbeddingsInput } from '@langchain/google-vertexai'
|
||||
import { GoogleVertexAIEmbeddingsInput, VertexAIEmbeddings } from '@langchain/google-vertexai'
|
||||
import { buildGoogleCredentials } from '../../../src/google-utils'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
|
||||
import { MODEL_TYPE, getModels, getRegions } from '../../../src/modelLoader'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
|
||||
class VertexAIEmbeddingsWithStripNewLines extends VertexAIEmbeddings {
|
||||
stripNewLines: boolean
|
||||
|
||||
constructor(params: GoogleVertexAIEmbeddingsInput & { stripNewLines?: boolean }) {
|
||||
super(params)
|
||||
this.stripNewLines = params.stripNewLines ?? false
|
||||
}
|
||||
|
||||
async embedDocuments(texts: string[]): Promise<number[][]> {
|
||||
const processedTexts = this.stripNewLines ? texts.map((text) => text.replace(/\n/g, ' ')) : texts
|
||||
return super.embedDocuments(processedTexts)
|
||||
}
|
||||
|
||||
async embedQuery(text: string): Promise<number[]> {
|
||||
const processedText = this.stripNewLines ? text.replace(/\n/g, ' ') : text
|
||||
return super.embedQuery(processedText)
|
||||
}
|
||||
}
|
||||
|
||||
class GoogleVertexAIEmbedding_Embeddings implements INode {
|
||||
label: string
|
||||
|
|
@ -18,12 +38,12 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
constructor() {
|
||||
this.label = 'GoogleVertexAI Embeddings'
|
||||
this.name = 'googlevertexaiEmbeddings'
|
||||
this.version = 2.0
|
||||
this.version = 2.1
|
||||
this.type = 'GoogleVertexAIEmbeddings'
|
||||
this.icon = 'GoogleVertex.svg'
|
||||
this.category = 'Embeddings'
|
||||
this.description = 'Google vertexAI API to generate embeddings for a given text'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(VertexAIEmbeddings)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(VertexAIEmbeddingsWithStripNewLines)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -39,7 +59,23 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
name: 'modelName',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
default: 'textembedding-gecko@001'
|
||||
default: 'text-embedding-004'
|
||||
},
|
||||
{
|
||||
label: 'Region',
|
||||
description: 'Region to use for the model.',
|
||||
name: 'region',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRegions',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Strip New Lines',
|
||||
name: 'stripNewLines',
|
||||
type: 'boolean',
|
||||
optional: true,
|
||||
additionalParams: true,
|
||||
description: 'Remove new lines from input text before embedding to reduce token count'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -48,38 +84,28 @@ class GoogleVertexAIEmbedding_Embeddings implements INode {
|
|||
loadMethods = {
|
||||
async listModels(): Promise<INodeOptionsValue[]> {
|
||||
return await getModels(MODEL_TYPE.EMBEDDING, 'googlevertexaiEmbeddings')
|
||||
},
|
||||
async listRegions(): Promise<INodeOptionsValue[]> {
|
||||
return await getRegions(MODEL_TYPE.EMBEDDING, 'googlevertexaiEmbeddings')
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const googleApplicationCredentialFilePath = getCredentialParam('googleApplicationCredentialFilePath', credentialData, nodeData)
|
||||
const googleApplicationCredential = getCredentialParam('googleApplicationCredential', credentialData, nodeData)
|
||||
const projectID = getCredentialParam('projectID', credentialData, nodeData)
|
||||
const region = nodeData.inputs?.region as string
|
||||
const stripNewLines = nodeData.inputs?.stripNewLines as boolean
|
||||
|
||||
const authOptions: any = {}
|
||||
if (Object.keys(credentialData).length !== 0) {
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error('Please specify your Google Application Credential')
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error(
|
||||
'Error: More than one component has been inputted. Please use only one of the following: Google Application Credential File Path or Google Credential JSON Object'
|
||||
)
|
||||
|
||||
if (googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
authOptions.keyFile = googleApplicationCredentialFilePath
|
||||
else if (!googleApplicationCredentialFilePath && googleApplicationCredential)
|
||||
authOptions.credentials = JSON.parse(googleApplicationCredential)
|
||||
|
||||
if (projectID) authOptions.projectId = projectID
|
||||
const obj: GoogleVertexAIEmbeddingsInput & { stripNewLines?: boolean } = {
|
||||
model: modelName,
|
||||
stripNewLines
|
||||
}
|
||||
const obj: GoogleVertexAIEmbeddingsInput = {
|
||||
model: modelName
|
||||
}
|
||||
if (Object.keys(authOptions).length !== 0) obj.authOptions = authOptions
|
||||
|
||||
const model = new VertexAIEmbeddings(obj)
|
||||
const authOptions = await buildGoogleCredentials(nodeData, options)
|
||||
if (authOptions && Object.keys(authOptions).length !== 0) obj.authOptions = authOptions
|
||||
|
||||
if (region) obj.location = region
|
||||
|
||||
const model = new VertexAIEmbeddingsWithStripNewLines(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,24 +23,22 @@ export class HuggingFaceInferenceEmbeddings extends Embeddings implements Huggin
|
|||
this.model = fields?.model ?? 'sentence-transformers/distilbert-base-nli-mean-tokens'
|
||||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpoint = fields?.endpoint ?? ''
|
||||
this.client = new HfInference(this.apiKey)
|
||||
if (this.endpoint) this.client.endpoint(this.endpoint)
|
||||
const hf = new HfInference(this.apiKey)
|
||||
// v4 uses Inference Providers by default; only override if custom endpoint provided
|
||||
this.client = this.endpoint ? hf.endpoint(this.endpoint) : hf
|
||||
}
|
||||
|
||||
async _embed(texts: string[]): Promise<number[][]> {
|
||||
// replace newlines, which can negatively affect performance.
|
||||
const clean = texts.map((text) => text.replace(/\n/g, ' '))
|
||||
const hf = new HfInference(this.apiKey)
|
||||
const obj: any = {
|
||||
inputs: clean
|
||||
}
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
if (!this.endpoint) {
|
||||
obj.model = this.model
|
||||
}
|
||||
|
||||
const res = await this.caller.callWithOptions({}, hf.featureExtraction.bind(hf), obj)
|
||||
const res = await this.caller.callWithOptions({}, this.client.featureExtraction.bind(this.client), obj)
|
||||
return res as number[][]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class SubQuestionQueryEngine_LlamaIndex implements INode {
|
|||
this.icon = 'subQueryEngine.svg'
|
||||
this.category = 'Engine'
|
||||
this.description =
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response'
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate responses and synthesizes a final response'
|
||||
this.baseClasses = [this.type, 'BaseQueryEngine']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { VertexAI, VertexAIInput } from '@langchain/google-vertexai'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { buildGoogleCredentials } from '../../../src/google-utils'
|
||||
|
||||
class GoogleVertexAI_LLMs implements INode {
|
||||
label: string
|
||||
|
|
@ -83,28 +84,6 @@ class GoogleVertexAI_LLMs implements INode {
|
|||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const googleApplicationCredentialFilePath = getCredentialParam('googleApplicationCredentialFilePath', credentialData, nodeData)
|
||||
const googleApplicationCredential = getCredentialParam('googleApplicationCredential', credentialData, nodeData)
|
||||
const projectID = getCredentialParam('projectID', credentialData, nodeData)
|
||||
|
||||
const authOptions: any = {}
|
||||
if (Object.keys(credentialData).length !== 0) {
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error('Please specify your Google Application Credential')
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error(
|
||||
'Error: More than one component has been inputted. Please use only one of the following: Google Application Credential File Path or Google Credential JSON Object'
|
||||
)
|
||||
|
||||
if (googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
authOptions.keyFile = googleApplicationCredentialFilePath
|
||||
else if (!googleApplicationCredentialFilePath && googleApplicationCredential)
|
||||
authOptions.credentials = JSON.parse(googleApplicationCredential)
|
||||
|
||||
if (projectID) authOptions.projectId = projectID
|
||||
}
|
||||
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
|
||||
|
|
@ -115,7 +94,9 @@ class GoogleVertexAI_LLMs implements INode {
|
|||
temperature: parseFloat(temperature),
|
||||
model: modelName
|
||||
}
|
||||
if (Object.keys(authOptions).length !== 0) obj.authOptions = authOptions
|
||||
|
||||
const authOptions = await buildGoogleCredentials(nodeData, options)
|
||||
if (authOptions && Object.keys(authOptions).length !== 0) obj.authOptions = authOptions
|
||||
|
||||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
|
|
|
|||
|
|
@ -78,6 +78,8 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hf = new HfInference(this.apiKey)
|
||||
// v4 uses Inference Providers by default; only override if custom endpoint provided
|
||||
const hfClient = this.endpoint ? hf.endpoint(this.endpoint) : hf
|
||||
const obj: any = {
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
|
|
@ -90,12 +92,10 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
},
|
||||
inputs: prompt
|
||||
}
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
if (!this.endpoint) {
|
||||
obj.model = this.model
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), obj)
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfClient.textGeneration.bind(hfClient), obj)
|
||||
return res.generated_text
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,71 @@
|
|||
import { getBaseClasses, getCredentialData, getCredentialParam, ICommonObject, INode, INodeData, INodeParams } from '../../../src'
|
||||
import { OpenAI } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
|
||||
class Sambanova_LLMs implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Sambanova'
|
||||
this.name = 'sambanova'
|
||||
this.version = 1.0
|
||||
this.type = 'Sambanova'
|
||||
this.icon = 'sambanova.png'
|
||||
this.category = 'LLMs'
|
||||
this.description = 'Wrapper around Sambanova API for large language models'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(OpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['sambanovaApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
name: 'cache',
|
||||
type: 'BaseCache',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'string',
|
||||
default: 'Meta-Llama-3.3-70B-Instruct',
|
||||
description: 'For more details see https://docs.sambanova.ai/cloud/docs/get-started/supported-models',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const sambanovaKey = getCredentialParam('sambanovaApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: any = {
|
||||
model: modelName,
|
||||
configuration: {
|
||||
baseURL: 'https://api.sambanova.ai/v1',
|
||||
apiKey: sambanovaKey
|
||||
}
|
||||
}
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const sambanova = new OpenAI(obj)
|
||||
return sambanova
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Sambanova_LLMs }
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 12 KiB |
|
|
@ -21,6 +21,7 @@ import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
|||
import { ChatAnthropic } from '../../chatmodels/ChatAnthropic/FlowiseChatAnthropic'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { ChatGoogleGenerativeAI } from '../../chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI'
|
||||
import { AzureChatOpenAI } from '../../chatmodels/AzureChatOpenAI/FlowiseAzureChatOpenAI'
|
||||
|
||||
const sysPrompt = `You are a supervisor tasked with managing a conversation between the following workers: {team_members}.
|
||||
Given the following user request, respond with the worker to act next.
|
||||
|
|
@ -242,7 +243,7 @@ class Supervisor_MultiAgents implements INode {
|
|||
}
|
||||
}
|
||||
})
|
||||
} else if (llm instanceof ChatOpenAI) {
|
||||
} else if (llm instanceof ChatOpenAI || llm instanceof AzureChatOpenAI) {
|
||||
let prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemPrompt],
|
||||
new MessagesPlaceholder('messages'),
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue