Compare commits
400 Commits
flowise@2.
...
main
| Author | SHA1 | Date |
|---|---|---|
|
|
465005a503 | |
|
|
e6e0c2d07b | |
|
|
660a8e357a | |
|
|
113180d03b | |
|
|
069ba28bc0 | |
|
|
20db1597a4 | |
|
|
478a294095 | |
|
|
6a59af11e6 | |
|
|
562370b8e2 | |
|
|
4e92db6910 | |
|
|
7cc2c13694 | |
|
|
3ad2b3a559 | |
|
|
da32fc7167 | |
|
|
315e3aedc3 | |
|
|
9dbb4bf623 | |
|
|
1f3f7a7194 | |
|
|
4d79653741 | |
|
|
03ef28afbc | |
|
|
0cc7b3036e | |
|
|
097404f24a | |
|
|
2029588d4d | |
|
|
c9db81096a | |
|
|
b5f7fac015 | |
|
|
ca22160361 | |
|
|
ffe69936dc | |
|
|
b8f7a200fb | |
|
|
2f2b6e1713 | |
|
|
4e1fac501f | |
|
|
888994bc8f | |
|
|
3cab803918 | |
|
|
366d38b861 | |
|
|
2414057c08 | |
|
|
4a642f02d0 | |
|
|
ceb0512e2f | |
|
|
94cae3b66f | |
|
|
3fafd15a80 | |
|
|
9ff3d653ae | |
|
|
0dc14b5cd3 | |
|
|
b9a020dc70 | |
|
|
761ffe6851 | |
|
|
6d3755d16e | |
|
|
faf0a0a315 | |
|
|
4624e15c2e | |
|
|
a7b6f9b208 | |
|
|
2bd96090f0 | |
|
|
346a55b6d8 | |
|
|
03c1750d73 | |
|
|
ec1762b10f | |
|
|
02bb2ba62b | |
|
|
fdb6422aef | |
|
|
fe6f5f88a5 | |
|
|
82124d4871 | |
|
|
3b8b21342d | |
|
|
679a0409f5 | |
|
|
1fa9303d7c | |
|
|
75eb5f57aa | |
|
|
3d731664f9 | |
|
|
0f8d45d25c | |
|
|
3e8db185dd | |
|
|
6f5b0d9906 | |
|
|
fd7fc2f4d7 | |
|
|
a92f7dfc3f | |
|
|
80224275d9 | |
|
|
4417102f6c | |
|
|
0149688a16 | |
|
|
f3d5b7766d | |
|
|
97515989a2 | |
|
|
601de76aea | |
|
|
c99d870c82 | |
|
|
5df09a15b8 | |
|
|
e925801b63 | |
|
|
eed7581d0e | |
|
|
1ae1638ed9 | |
|
|
0a3c8b94ab | |
|
|
9554b1a8e3 | |
|
|
ac565b8981 | |
|
|
37ef6ffa50 | |
|
|
2ae4678da4 | |
|
|
6f94d61f22 | |
|
|
62d34066c9 | |
|
|
f3f2eabb89 | |
|
|
bff859520a | |
|
|
4111ec31b0 | |
|
|
7ab586c865 | |
|
|
ac794ab6eb | |
|
|
1fb12cd931 | |
|
|
a0dca552a2 | |
|
|
a38d37f4b5 | |
|
|
1a410d84ac | |
|
|
7a50755546 | |
|
|
ac252516f8 | |
|
|
6fe5b98d6f | |
|
|
9b8fee3d8f | |
|
|
8d0a198e2f | |
|
|
580957e4aa | |
|
|
a86f618186 | |
|
|
8c1175225f | |
|
|
28b0174eea | |
|
|
b501932491 | |
|
|
6890ced939 | |
|
|
0065e8f1a0 | |
|
|
31434e52ce | |
|
|
84a0a45ff7 | |
|
|
dd284e37c3 | |
|
|
b5da234ce7 | |
|
|
e48f28d13d | |
|
|
cf6539cd3f | |
|
|
011d60332e | |
|
|
e9d4c3b54b | |
|
|
41131dfac3 | |
|
|
42152dd036 | |
|
|
fc50f2308b | |
|
|
f560768133 | |
|
|
c4322ce70b | |
|
|
79023c8909 | |
|
|
05763db8d3 | |
|
|
6e291cf05d | |
|
|
89a0f23fe5 | |
|
|
c00ae78488 | |
|
|
b2dcdab5b9 | |
|
|
6885c38d18 | |
|
|
6e2f2df269 | |
|
|
4af067a444 | |
|
|
e002e617df | |
|
|
4987a2880d | |
|
|
736c2b11a1 | |
|
|
6fb9bb559f | |
|
|
32bf030924 | |
|
|
099cf481b4 | |
|
|
113086a2fb | |
|
|
c17dd1f141 | |
|
|
42fed5713e | |
|
|
449e8113e0 | |
|
|
9e178d6887 | |
|
|
2ab20f71d9 | |
|
|
b026671887 | |
|
|
23cb5f7801 | |
|
|
763e33b073 | |
|
|
a5a728fd06 | |
|
|
9b3971d8d8 | |
|
|
af1464f7c2 | |
|
|
bf1ddc3be5 | |
|
|
ad0679801a | |
|
|
9cac8d7a00 | |
|
|
e5381f5090 | |
|
|
b126472816 | |
|
|
4ce0851858 | |
|
|
44087bc706 | |
|
|
55f8f69060 | |
|
|
6e44051bea | |
|
|
7a74e33be1 | |
|
|
e99aecb473 | |
|
|
ba6a602cbe | |
|
|
fbae51b260 | |
|
|
114a844964 | |
|
|
68dc041d02 | |
|
|
32cd06cd28 | |
|
|
32e5b13c46 | |
|
|
db4de4552a | |
|
|
9c070c7205 | |
|
|
fddd40a5cd | |
|
|
bbcfb5ab63 | |
|
|
fa15b6873d | |
|
|
9181ae2879 | |
|
|
3b1b4dc5f9 | |
|
|
b608219642 | |
|
|
3187377c61 | |
|
|
feb899ab19 | |
|
|
9e743e4aa1 | |
|
|
141c49013a | |
|
|
b024cd61f4 | |
|
|
78144f37b5 | |
|
|
e3e4d6a904 | |
|
|
5930f1119c | |
|
|
8aa2507ed9 | |
|
|
ff9a2a65b5 | |
|
|
d29db16bfc | |
|
|
28fec16873 | |
|
|
3a33bfadf0 | |
|
|
89a806f722 | |
|
|
ed27ad0c58 | |
|
|
049596a7b5 | |
|
|
5259bab778 | |
|
|
9b54aa8879 | |
|
|
0998bf4327 | |
|
|
e8dac2048f | |
|
|
498129e9d2 | |
|
|
46816c7c1e | |
|
|
bbb03b7b3b | |
|
|
aea2b184da | |
|
|
8846fd14e6 | |
|
|
5ae6ae2916 | |
|
|
9a6fd97f2c | |
|
|
221ac9b25d | |
|
|
caffad0fb0 | |
|
|
8562d4a563 | |
|
|
d272683a98 | |
|
|
00342bde88 | |
|
|
a3f47af027 | |
|
|
d081221a97 | |
|
|
f2bd83252d | |
|
|
910a3c5229 | |
|
|
d77919ba50 | |
|
|
e8c36b6894 | |
|
|
efc9ac222f | |
|
|
dca91b979b | |
|
|
9a06a85a8d | |
|
|
96a57a58e7 | |
|
|
fbe9f34a60 | |
|
|
2b7a074c8b | |
|
|
cc4a773010 | |
|
|
d584c0b700 | |
|
|
ebf222731e | |
|
|
2605a1f74e | |
|
|
2e1999e6f1 | |
|
|
5e5b2a18e2 | |
|
|
cf965f3d8e | |
|
|
0ac01d3cbb | |
|
|
1bed5a264e | |
|
|
8a6b95ef0e | |
|
|
9839009823 | |
|
|
791c1e3274 | |
|
|
e3eeb5d8a8 | |
|
|
9d438529a6 | |
|
|
ee5ab1bd6d | |
|
|
849b94b049 | |
|
|
14fc1b4d20 | |
|
|
bf05f25f7e | |
|
|
6baec93860 | |
|
|
30e8317327 | |
|
|
aea2801b8c | |
|
|
a25c5c4514 | |
|
|
768de6140c | |
|
|
0627693133 | |
|
|
bbf6970600 | |
|
|
9b60cf1234 | |
|
|
be7599542b | |
|
|
4c3b729b79 | |
|
|
e326bc8f49 | |
|
|
e7553a1c4e | |
|
|
9efb70e04c | |
|
|
c78b5326b6 | |
|
|
7a5368c6f6 | |
|
|
d66e40e2e7 | |
|
|
81699a1e56 | |
|
|
5dd30b1a70 | |
|
|
d5bc718246 | |
|
|
3f26569e6e | |
|
|
397ba63d60 | |
|
|
4038eb13fc | |
|
|
24426266ba | |
|
|
a872d77f38 | |
|
|
2e42dfb635 | |
|
|
f50a817bf4 | |
|
|
602054e509 | |
|
|
543800562e | |
|
|
035b5555a9 | |
|
|
9a60b7b223 | |
|
|
a107aa7a77 | |
|
|
15dd28356b | |
|
|
8ba1a09077 | |
|
|
306b6fbb31 | |
|
|
3d2c5c90e9 | |
|
|
dfb401ad83 | |
|
|
21caedde72 | |
|
|
e17994d8fe | |
|
|
f644c47251 | |
|
|
f45ca72df2 | |
|
|
8272283618 | |
|
|
12b4259a01 | |
|
|
2387a06ce4 | |
|
|
6495c64dac | |
|
|
02a6753498 | |
|
|
7dfa269502 | |
|
|
0c5f7ea003 | |
|
|
30c4180d97 | |
|
|
6dcb65cedb | |
|
|
2cd8db0c53 | |
|
|
8793ed628c | |
|
|
5f7f83a5d2 | |
|
|
d134b66bd8 | |
|
|
aa0984e802 | |
|
|
63ff703e7a | |
|
|
a88337cc83 | |
|
|
eb69b23d73 | |
|
|
82e21d1fed | |
|
|
60b18353a7 | |
|
|
7c803f4e0b | |
|
|
979920ff7f | |
|
|
6f8079f6ee | |
|
|
954e6c88f4 | |
|
|
9682a0ccd9 | |
|
|
27bc47ed57 | |
|
|
da8d0f12d6 | |
|
|
3d6bf72e73 | |
|
|
2baa43d66f | |
|
|
7ef0e99eb2 | |
|
|
82d60c7d15 | |
|
|
4326cbe6b5 | |
|
|
7e7ff24941 | |
|
|
729043bcba | |
|
|
895b810f87 | |
|
|
d06b7d7aef | |
|
|
07b251b4bc | |
|
|
01dab4365a | |
|
|
572fb31a1c | |
|
|
eca190dca6 | |
|
|
0dd6c5b2b9 | |
|
|
66bf0749af | |
|
|
5a37227d14 | |
|
|
e35a126b46 | |
|
|
9bc6bfed69 | |
|
|
cf67afb078 | |
|
|
86782e9971 | |
|
|
6cf1c82f04 | |
|
|
9170cac58b | |
|
|
cd36924bf4 | |
|
|
4786aafddc | |
|
|
0a4570ecda | |
|
|
a6e64230b4 | |
|
|
624143ad15 | |
|
|
a27826cdc0 | |
|
|
761a6416ab | |
|
|
82d16458e4 | |
|
|
eadf1b11b3 | |
|
|
98e75ad7d6 | |
|
|
a8f990c242 | |
|
|
4a0e86b30f | |
|
|
10f85ef47e | |
|
|
0c3329b81b | |
|
|
7924fbce0d | |
|
|
82e6f43b5c | |
|
|
e467d0615c | |
|
|
ac9d732550 | |
|
|
d75e847091 | |
|
|
c3610ff3c7 | |
|
|
df26e8aef9 | |
|
|
b55fe07511 | |
|
|
8157dce8ee | |
|
|
f5b9c6907e | |
|
|
e1979e42c3 | |
|
|
5b4693cca3 | |
|
|
925ca7be81 | |
|
|
6dcc7bb152 | |
|
|
ddeb59169b | |
|
|
fc6eea7653 | |
|
|
ac0450523a | |
|
|
f8ca105822 | |
|
|
68d3c83980 | |
|
|
9c1652570e | |
|
|
ddba891dcb | |
|
|
9d9135bed5 | |
|
|
a7b4ae733f | |
|
|
e75c831beb | |
|
|
416e57380e | |
|
|
54d1b5e3bb | |
|
|
9d9b40a326 | |
|
|
e9ece5ce1e | |
|
|
a6506b3bf7 | |
|
|
27ad522b8d | |
|
|
4009eb227b | |
|
|
d71369c3b7 | |
|
|
d3510d1054 | |
|
|
654bd48849 | |
|
|
c318fc57e9 | |
|
|
d53b1b657f | |
|
|
5faff52053 | |
|
|
36870e94d4 | |
|
|
4277819c5f | |
|
|
3098c8e75f | |
|
|
f963e5aa48 | |
|
|
ca69a39b82 | |
|
|
b988cae58c | |
|
|
8e63b999b8 | |
|
|
dd56d03b78 | |
|
|
a07546145d | |
|
|
3bd2d63a19 | |
|
|
d7c0858424 | |
|
|
9957184680 | |
|
|
cb06df4584 | |
|
|
0e1b1ee251 | |
|
|
13fce45856 | |
|
|
4fa2672c9d | |
|
|
7867489727 | |
|
|
145a3bb415 | |
|
|
6a0b8be422 | |
|
|
c5455137f9 | |
|
|
2b9a1ae316 | |
|
|
c2b830f279 | |
|
|
c6968ff385 | |
|
|
829d2b1597 | |
|
|
4130156397 | |
|
|
9d9aaaa886 | |
|
|
c3ea5a9b8f | |
|
|
641df313d4 | |
|
|
bde9e543d4 | |
|
|
93cf47ce40 | |
|
|
69a272201a | |
|
|
a369f0c1cc | |
|
|
ac7cf30e01 |
|
|
@ -1,37 +0,0 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: '[BUG]'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**To Reproduce**
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots**
|
||||
If applicable, add screenshots to help explain your problem.
|
||||
|
||||
**Flow**
|
||||
If applicable, add exported flow in order to help replicating the problem.
|
||||
|
||||
**Setup**
|
||||
|
||||
- Installation [e.g. docker, `npx flowise start`, `pnpm start`]
|
||||
- Flowise Version [e.g. 1.2.11]
|
||||
- OS: [e.g. macOS, Windows, Linux]
|
||||
- Browser [e.g. chrome, safari]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
name: Bug Report
|
||||
description: File a bug report to help us improve
|
||||
labels: ['bug']
|
||||
assignees: []
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Make sure to have a proper title and description.
|
||||
|
||||
- type: textarea
|
||||
id: bug-description
|
||||
attributes:
|
||||
label: Describe the bug
|
||||
description: A clear and concise description of what the bug is.
|
||||
placeholder: Tell us what you see!
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: To Reproduce
|
||||
description: Steps to reproduce the behavior
|
||||
placeholder: |
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: A clear and concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: screenshots
|
||||
attributes:
|
||||
label: Screenshots
|
||||
description: If applicable, add screenshots to help explain your problem.
|
||||
placeholder: Drag and drop or paste screenshots here
|
||||
|
||||
- type: textarea
|
||||
id: flow
|
||||
attributes:
|
||||
label: Flow
|
||||
description: If applicable, add exported flow in order to help replicating the problem.
|
||||
placeholder: Paste your exported flow here
|
||||
|
||||
- type: dropdown
|
||||
id: method
|
||||
attributes:
|
||||
label: Use Method
|
||||
description: How did you use Flowise?
|
||||
options:
|
||||
- Flowise Cloud
|
||||
- Docker
|
||||
- npx flowise start
|
||||
- pnpm start
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: Flowise Version
|
||||
description: What version of Flowise are you running?
|
||||
placeholder: e.g., 1.2.11
|
||||
|
||||
- type: dropdown
|
||||
id: os
|
||||
attributes:
|
||||
label: Operating System
|
||||
description: What operating system are you using?
|
||||
options:
|
||||
- Windows
|
||||
- macOS
|
||||
- Linux
|
||||
- Other
|
||||
|
||||
- type: dropdown
|
||||
id: browser
|
||||
attributes:
|
||||
label: Browser
|
||||
description: What browser are you using?
|
||||
options:
|
||||
- Chrome
|
||||
- Firefox
|
||||
- Safari
|
||||
- Edge
|
||||
- Other
|
||||
|
||||
- type: textarea
|
||||
id: context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here.
|
||||
placeholder: Any additional information that might be helpful
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: '[FEATURE]'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Describe the feature you'd like**
|
||||
A clear and concise description of what you would like Flowise to have.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
name: Feature Request
|
||||
description: Suggest a new feature or enhancement for Flowise
|
||||
labels: ['enhancement']
|
||||
assignees: []
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for suggesting a new feature! Please provide as much detail as possible to help us understand your request.
|
||||
|
||||
- type: textarea
|
||||
id: feature-description
|
||||
attributes:
|
||||
label: Feature Description
|
||||
description: A clear and concise description of the feature you'd like to see in Flowise.
|
||||
placeholder: Describe what you want to be added or improved...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: dropdown
|
||||
id: feature-category
|
||||
attributes:
|
||||
label: Feature Category
|
||||
description: What category does this feature belong to?
|
||||
options:
|
||||
- UI/UX Improvement
|
||||
- New Node/Component
|
||||
- Integration
|
||||
- Performance
|
||||
- Security
|
||||
- Documentation
|
||||
- API Enhancement
|
||||
- Workflow/Flow Management
|
||||
- Authentication/Authorization
|
||||
- Database/Storage
|
||||
- Deployment/DevOps
|
||||
- Other
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: problem-statement
|
||||
attributes:
|
||||
label: Problem Statement
|
||||
description: What problem does this feature solve? What's the current pain point?
|
||||
placeholder: Describe the problem or limitation you're facing...
|
||||
|
||||
- type: textarea
|
||||
id: proposed-solution
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: How would you like this feature to work? Be as specific as possible.
|
||||
placeholder: Describe your ideal solution in detail...
|
||||
|
||||
- type: textarea
|
||||
id: mockups-references
|
||||
attributes:
|
||||
label: Mockups or References
|
||||
description: Any mockups, screenshots, or references to similar features in other tools?
|
||||
placeholder: Upload images or provide links to examples...
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Any other information, context, or examples that would help us understand this request.
|
||||
placeholder: Add any other relevant information...
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
name: autoSyncMergedPullRequest
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- closed
|
||||
branches: ['main']
|
||||
jobs:
|
||||
autoSyncMergedPullRequest:
|
||||
if: github.event.pull_request.merged == true
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Show PR info
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
echo The PR #${{ github.event.pull_request.number }} was merged on main branch!
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
event-type: ${{ secrets.AUTOSYNC_PR_EVENT_TYPE }}
|
||||
client-payload: >-
|
||||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"prNumber": "${{ github.event.pull_request.number }}",
|
||||
"prTitle": "${{ github.event.pull_request.title }}",
|
||||
"prDescription": "",
|
||||
"sha": "${{ github.sha }}"
|
||||
}
|
||||
|
|
@ -1,36 +0,0 @@
|
|||
name: autoSyncSingleCommit
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
doNotAutoSyncSingleCommit:
|
||||
if: github.event.commits[1] != null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: IGNORE autoSyncSingleCommit
|
||||
run: |
|
||||
echo This single commit has came from a merged commit. We will ignore it. This case is handled in autoSyncMergedPullRequest workflow for merge commits comming from merged pull requests only! Beware, the regular merge commits are not handled by any workflow for the moment.
|
||||
autoSyncSingleCommit:
|
||||
if: github.event.commits[1] == null
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: autoSyncSingleCommit
|
||||
env:
|
||||
GITHUB_CONTEXT: ${{ toJSON(github) }}
|
||||
run: |
|
||||
echo Autosync a single commit with id: ${{ github.sha }} from openSource main branch towards cloud hosted version.
|
||||
- name: Repository Dispatch
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.AUTOSYNC_TOKEN }}
|
||||
repository: ${{ secrets.AUTOSYNC_CH_URL }}
|
||||
event-type: ${{ secrets.AUTOSYNC_SC_EVENT_TYPE }}
|
||||
client-payload: >-
|
||||
{
|
||||
"ref": "${{ github.ref }}",
|
||||
"sha": "${{ github.sha }}",
|
||||
"commitMessage": "${{ github.event.commits[0].id }}"
|
||||
}
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
name: Docker Image CI - Docker Hub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise:${{ steps.defaults.outputs.tag_version }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push worker image
|
||||
# -------------------------
|
||||
- name: Build and push worker image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: docker/worker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise-worker:${{ steps.defaults.outputs.tag_version }}
|
||||
|
|
@ -0,0 +1,73 @@
|
|||
name: Docker Image CI - AWS ECR
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ format('{0}.dkr.ecr.{1}.amazonaws.com/flowise:{2}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.tag_version) }}
|
||||
|
|
@ -1,43 +0,0 @@
|
|||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{github.event.inputs.node_version}}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: flowiseai/flowise:${{github.event.inputs.tag_version}}
|
||||
|
|
@ -6,6 +6,7 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
workflow_dispatch:
|
||||
permissions:
|
||||
contents: read
|
||||
jobs:
|
||||
|
|
@ -31,6 +32,8 @@ jobs:
|
|||
- run: pnpm install
|
||||
- run: pnpm lint
|
||||
- run: pnpm build
|
||||
env:
|
||||
NODE_OPTIONS: '--max_old_space_size=4096'
|
||||
- name: Cypress install
|
||||
run: pnpm cypress install
|
||||
- name: Install dependencies (Cypress Action)
|
||||
|
|
|
|||
|
|
@ -8,13 +8,12 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PUPPETEER_SKIP_DOWNLOAD: true
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- run: docker build --no-cache -t flowise .
|
||||
|
|
|
|||
|
|
@ -114,51 +114,52 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
to make sure everything works fine in production.
|
||||
|
||||
11. Commit code and submit Pull Request from forked branch pointing to [Flowise master](https://github.com/FlowiseAI/Flowise/tree/master).
|
||||
11. Commit code and submit Pull Request from forked branch pointing to [Flowise main](https://github.com/FlowiseAI/Flowise/tree/main).
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_USERNAME | Username to login | String | |
|
||||
| FLOWISE_PASSWORD | Password to login | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| APIKEY_STORAGE_TYPE | To store api keys on a JSON file or database. Default is `json` | Enum String: `json`, `db` | `json` |
|
||||
| APIKEY_PATH | Location where api keys are saved when `APIKEY_STORAGE_TYPE` is `json` | String | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Tool Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Tool Function | String | |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
|
||||
| DISABLE_FLOWISE_TELEMETRY | Turn off telemetry | Boolean | |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
|
||||
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
|
||||
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
|
||||
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
|
||||
| Variable | Description | Type | Default |
|
||||
| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
|
||||
| PORT | The HTTP port Flowise runs on | Number | 3000 |
|
||||
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
|
||||
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | Upload File Size Limit | String | 50mb |
|
||||
| DEBUG | Print logs from components | Boolean | |
|
||||
| LOG_PATH | Location where log files are stored | String | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | Different levels of logs | Enum String: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| LOG_JSON_SPACES | Spaces to beautify JSON logs | | 2 |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | NodeJS built-in modules to be used for Custom Tool or Function | String | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | External modules to be used for Custom Tool or Function | String | |
|
||||
| ALLOW_BUILTIN_DEP | Allow project dependencies to be used for Custom Tool or Function | Boolean | false |
|
||||
| DATABASE_TYPE | Type of database to store the flowise data | Enum String: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | Location where database is saved (When DATABASE_TYPE is sqlite) | String | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | Host URL or IP address (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PORT | Database port (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_USER | Database username (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_PASSWORD | Database password (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_NAME | Database name (When DATABASE_TYPE is not sqlite) | String | |
|
||||
| DATABASE_SSL_KEY_BASE64 | Database SSL client cert in base64 (takes priority over DATABASE_SSL) | Boolean | false |
|
||||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | |
|
||||
| MODEL_LIST_CONFIG_JSON | File path to load list of models from your local config file | String | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | Type of storage for uploaded files. default is `local` | Enum String: `s3`, `local`, `gcs` | `local` |
|
||||
| BLOB_STORAGE_PATH | Local folder path where uploaded files are stored when `STORAGE_TYPE` is `local` | String | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `s3` | String | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS Access Key | String | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS Secret Key | String | |
|
||||
| S3_STORAGE_REGION | Region for S3 bucket | String | |
|
||||
| S3_ENDPOINT_URL | Custom Endpoint for S3 | String | |
|
||||
| S3_FORCE_PATH_STYLE | Set this to true to force the request to use path-style addressing | Boolean | false |
|
||||
| GOOGLE_CLOUD_STORAGE_PROJ_ID | The GCP project id for cloud storage & logging when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_CREDENTIAL | The credential key file path when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_STORAGE_BUCKET_NAME | Bucket name to hold the uploaded files when `STORAGE_TYPE` is `gcs` | String | |
|
||||
| GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS | Enable uniform bucket level access when `STORAGE_TYPE` is `gcs` | Boolean | true |
|
||||
| SHOW_COMMUNITY_NODES | Show nodes created by community | Boolean | |
|
||||
| DISABLED_NODES | Hide nodes from UI (comma separated list of node names) | String | |
|
||||
| TRUST_PROXY | Configure proxy trust settings for proper IP detection. Values: 'true' (trust all), 'false' (disable), number (hop count), or Express proxy values (e.g., 'loopback', 'linklocal', 'uniquelocal', IP addresses). [Learn More](https://expressjs.com/en/guide/behind-proxies.html) | Boolean/String/Number | true |
|
||||
|
||||
You can also specify the env variables when using `npx`. For example:
|
||||
|
||||
|
|
|
|||
35
Dockerfile
35
Dockerfile
|
|
@ -5,30 +5,41 @@
|
|||
# docker run -d -p 3000:3000 flowise
|
||||
|
||||
FROM node:20-alpine
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
# Install system dependencies and build tools
|
||||
RUN apk update && \
|
||||
apk add --no-cache \
|
||||
libc6-compat \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
build-base \
|
||||
cairo-dev \
|
||||
pango-dev \
|
||||
chromium \
|
||||
curl && \
|
||||
npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
|
||||
WORKDIR /usr/src
|
||||
WORKDIR /usr/src/flowise
|
||||
|
||||
# Copy app source
|
||||
COPY . .
|
||||
|
||||
RUN pnpm install
|
||||
# Install dependencies and build
|
||||
RUN pnpm install && \
|
||||
pnpm build
|
||||
|
||||
RUN pnpm build
|
||||
# Give the node user ownership of the application files
|
||||
RUN chown -R node:node .
|
||||
|
||||
# Switch to non-root user (node user already exists in node:20-alpine)
|
||||
USER node
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD [ "pnpm", "start" ]
|
||||
CMD [ "pnpm", "start" ]
|
||||
14
LICENSE.md
14
LICENSE.md
|
|
@ -1,6 +1,14 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
Copyright (c) 2023-present FlowiseAI, Inc.
|
||||
|
||||
Portions of this software are licensed as follows:
|
||||
|
||||
- All content that resides under https://github.com/FlowiseAI/Flowise/tree/main/packages/server/src/enterprise directory and files with explicit copyright notice such as [IdentityManager.ts](https://github.com/FlowiseAI/Flowise/tree/main/packages/server/src/IdentityManager.ts) are licensed under [Commercial License](https://github.com/FlowiseAI/Flowise/tree/main/packages/server/src/enterprise/LICENSE.md).
|
||||
- All third party components incorporated into the FlowiseAI Software are licensed under the original license provided by the owner of the applicable component.
|
||||
- Content outside of the above mentioned directories or restrictions above is available under the "Apache 2.0" license as defined below.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
|
|
|
|||
93
README.md
93
README.md
|
|
@ -1,8 +1,11 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
<p align="center">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
# Flowise - Build LLM Apps Easily
|
||||
<div align="center">
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
|
|
@ -10,11 +13,26 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
English | [繁體中文](./i18n/README-TW.md) | [簡體中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
|
||||
English | [繁體中文](./i18n/README-TW.md) | [简体中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
|
||||
|
||||
<h3>Drag & drop UI to build your customized LLM flow</h3>
|
||||
</div>
|
||||
|
||||
<h3>Build AI Agents, Visually</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
## 📚 Table of Contents
|
||||
|
||||
- [⚡ Quick Start](#-quick-start)
|
||||
- [🐳 Docker](#-docker)
|
||||
- [👨💻 Developers](#-developers)
|
||||
- [🌱 Env Variables](#-env-variables)
|
||||
- [📖 Documentation](#-documentation)
|
||||
- [🌐 Self Host](#-self-host)
|
||||
- [☁️ Flowise Cloud](#️-flowise-cloud)
|
||||
- [🙋 Support](#-support)
|
||||
- [🙌 Contributing](#-contributing)
|
||||
- [📄 License](#-license)
|
||||
|
||||
## ⚡Quick Start
|
||||
|
||||
|
|
@ -30,12 +48,6 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
npx flowise start
|
||||
```
|
||||
|
||||
With username & password
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. Open [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
## 🐳 Docker
|
||||
|
|
@ -52,9 +64,11 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
### Docker Image
|
||||
|
||||
1. Build the image locally:
|
||||
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
|
||||
2. Run image:
|
||||
|
||||
```bash
|
||||
|
|
@ -62,6 +76,7 @@ Download and Install [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
|||
```
|
||||
|
||||
3. Stop image:
|
||||
|
||||
```bash
|
||||
docker stop flowise
|
||||
```
|
||||
|
|
@ -84,13 +99,13 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
### Setup
|
||||
|
||||
1. Clone the repository
|
||||
1. Clone the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. Go into repository folder
|
||||
2. Go into repository folder:
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
|
|
@ -110,10 +125,24 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
<details>
|
||||
<summary>Exit code 134 (JavaScript heap out of memory)</summary>
|
||||
If you get this error when running the above `build` script, try increasing the Node.js heap size and run the script again:
|
||||
If you get this error when running the above `build` script, try increasing the Node.js heap size and run the script again:
|
||||
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
pnpm build
|
||||
```bash
|
||||
# macOS / Linux / Git Bash
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Windows PowerShell
|
||||
$env:NODE_OPTIONS="--max-old-space-size=4096"
|
||||
|
||||
# Windows CMD
|
||||
set NODE_OPTIONS=--max-old-space-size=4096
|
||||
```
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
|
@ -129,7 +158,7 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
- Create `.env` file and specify the `VITE_PORT` (refer to `.env.example`) in `packages/ui`
|
||||
- Create `.env` file and specify the `PORT` (refer to `.env.example`) in `packages/server`
|
||||
- Run
|
||||
- Run:
|
||||
|
||||
```bash
|
||||
pnpm dev
|
||||
|
|
@ -137,22 +166,13 @@ Flowise has 3 different modules in a single mono repository.
|
|||
|
||||
Any code changes will reload the app automatically on [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
## 🔒 Authentication
|
||||
|
||||
To enable app level authentication, add `FLOWISE_USERNAME` and `FLOWISE_PASSWORD` to the `.env` file in `packages/server`:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
Flowise supports different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 Documentation
|
||||
|
||||
[Flowise Docs](https://docs.flowiseai.com/)
|
||||
You can view the Flowise Docs [here](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 Self Host
|
||||
|
||||
|
|
@ -170,6 +190,10 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
- [Northflank](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
[](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
- [Render](https://docs.flowiseai.com/configuration/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/configuration/deployment/render)
|
||||
|
|
@ -182,9 +206,9 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
- [Sealos](https://template.sealos.io/deploy?templateName=flowise)
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
[](https://template.sealos.io/deploy?templateName=flowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
|
|
@ -194,11 +218,11 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
|
||||
## ☁️ Flowise Cloud
|
||||
|
||||
[Get Started with Flowise Cloud](https://flowiseai.com/)
|
||||
Get Started with [Flowise Cloud](https://flowiseai.com/).
|
||||
|
||||
## 🙋 Support
|
||||
|
||||
Feel free to ask any questions, raise problems, and request new features in [discussion](https://github.com/FlowiseAI/Flowise/discussions)
|
||||
Feel free to ask any questions, raise problems, and request new features in [Discussion](https://github.com/FlowiseAI/Flowise/discussions).
|
||||
|
||||
## 🙌 Contributing
|
||||
|
||||
|
|
@ -206,9 +230,10 @@ Thanks go to these awesome contributors
|
|||
|
||||
<a href="https://github.com/FlowiseAI/Flowise/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
</a><br><br>
|
||||
|
||||
See [Contributing Guide](CONTRIBUTING.md). Reach out to us at [Discord](https://discord.gg/jbaHfsRVBW) if you have any questions or issues.
|
||||
|
||||
See [contributing guide](CONTRIBUTING.md). Reach out to us at [Discord](https://discord.gg/jbaHfsRVBW) if you have any questions or issues.
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 License
|
||||
|
|
|
|||
58
SECURITY.md
58
SECURITY.md
|
|
@ -1,40 +1,38 @@
|
|||
### Responsible Disclosure Policy
|
||||
### Responsible Disclosure Policy
|
||||
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
|
||||
### Vulnerabilities
|
||||
### Out of scope vulnerabilities
|
||||
|
||||
The following types of issues are some of the most common vulnerabilities:
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
### Reporting Guidelines
|
||||
|
||||
### Reporting Guidelines
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
### Disclosure Guidelines
|
||||
|
||||
### Disclosure Guidelines
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
### Response to Reports
|
||||
|
||||
### Response to Reports
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
|
|
|
|||
|
|
@ -1,16 +1,12 @@
|
|||
PORT=3000
|
||||
|
||||
# APIKEY_PATH=/your_apikey_path/.flowise # (will be deprecated by end of 2025)
|
||||
|
||||
############################################################################################################
|
||||
############################################## DATABASE ####################################################
|
||||
############################################################################################################
|
||||
|
||||
DATABASE_PATH=/root/.flowise
|
||||
APIKEY_PATH=/root/.flowise
|
||||
SECRETKEY_PATH=/root/.flowise
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
# APIKEY_STORAGE_TYPE=json (json | db)
|
||||
|
||||
# NUMBER_OF_PROXIES= 1
|
||||
# CORS_ORIGINS=*
|
||||
# IFRAME_ORIGINS=*
|
||||
|
||||
# DATABASE_TYPE=postgres
|
||||
# DATABASE_PORT=5432
|
||||
# DATABASE_HOST=""
|
||||
|
|
@ -18,51 +14,118 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
|
|||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_REJECT_UNAUTHORIZED=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECRET KEYS #################################################
|
||||
############################################################################################################
|
||||
|
||||
# SECRETKEY_STORAGE_TYPE=local #(local | aws)
|
||||
# SECRETKEY_PATH=/your_api_key_path/.flowise
|
||||
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
|
||||
SECRETKEY_PATH=/root/.flowise
|
||||
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey # (if you want to overwrite the secret key)
|
||||
# SECRETKEY_AWS_ACCESS_KEY=<your-access-key>
|
||||
# SECRETKEY_AWS_SECRET_KEY=<your-secret-key>
|
||||
# SECRETKEY_AWS_REGION=us-west-2
|
||||
# SECRETKEY_AWS_NAME=FlowiseEncryptionKey
|
||||
|
||||
# FLOWISE_USERNAME=user
|
||||
# FLOWISE_PASSWORD=1234
|
||||
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey
|
||||
# FLOWISE_FILE_SIZE_LIMIT=50mb
|
||||
|
||||
############################################################################################################
|
||||
############################################## LOGGING #####################################################
|
||||
############################################################################################################
|
||||
|
||||
# DEBUG=true
|
||||
# LOG_LEVEL=info (error | warn | info | verbose | debug)
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
# LOG_LEVEL=info #(error | warn | info | verbose | debug)
|
||||
# LOG_SANITIZE_BODY_FIELDS=password,pwd,pass,secret,token,apikey,api_key,accesstoken,access_token,refreshtoken,refresh_token,clientsecret,client_secret,privatekey,private_key,secretkey,secret_key,auth,authorization,credential,credentials
|
||||
# LOG_SANITIZE_HEADER_FIELDS=authorization,x-api-key,x-auth-token,cookie
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
# ALLOW_BUILTIN_DEP=false
|
||||
|
||||
# LANGCHAIN_TRACING_V2=true
|
||||
# LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
|
||||
# LANGCHAIN_API_KEY=your_api_key
|
||||
# LANGCHAIN_PROJECT=your_project
|
||||
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
############################################################################################################
|
||||
############################################## STORAGE #####################################################
|
||||
############################################################################################################
|
||||
|
||||
# Uncomment the following line to enable model list config, load the list of models from your local config file
|
||||
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
|
||||
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
|
||||
|
||||
# STORAGE_TYPE=local (local | s3)
|
||||
# BLOB_STORAGE_PATH=/your_storage_path/.flowise/storage
|
||||
# STORAGE_TYPE=local (local | s3 | gcs)
|
||||
BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
# S3_STORAGE_BUCKET_NAME=flowise
|
||||
# S3_STORAGE_ACCESS_KEY_ID=<your-access-key>
|
||||
# S3_STORAGE_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
# S3_STORAGE_REGION=us-west-2
|
||||
# S3_ENDPOINT_URL=<custom-s3-endpoint-url>
|
||||
# S3_FORCE_PATH_STYLE=false
|
||||
# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path
|
||||
# GOOGLE_CLOUD_STORAGE_PROJ_ID=<your-gcp-project-id>
|
||||
# GOOGLE_CLOUD_STORAGE_BUCKET_NAME=<the-bucket-name>
|
||||
# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SETTINGS ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# NUMBER_OF_PROXIES= 1
|
||||
# CORS_ORIGINS=*
|
||||
# IFRAME_ORIGINS=*
|
||||
# FLOWISE_FILE_SIZE_LIMIT=50mb
|
||||
# SHOW_COMMUNITY_NODES=true
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
# DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable)
|
||||
# Uncomment the following line to enable model list config, load the list of models from your local config file
|
||||
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
|
||||
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################ AUTH PARAMETERS ###############################################
|
||||
############################################################################################################
|
||||
|
||||
# APP_URL=http://localhost:3000
|
||||
|
||||
# SMTP_HOST=smtp.host.com
|
||||
# SMTP_PORT=465
|
||||
# SMTP_USER=smtp_user
|
||||
# SMTP_PASSWORD=smtp_password
|
||||
# SMTP_SECURE=true
|
||||
# ALLOW_UNAUTHORIZED_CERTS=false
|
||||
# SENDER_EMAIL=team@example.com
|
||||
|
||||
JWT_AUTH_TOKEN_SECRET='AABBCCDDAABBCCDDAABBCCDDAABBCCDDAABBCCDD'
|
||||
JWT_REFRESH_TOKEN_SECRET='AABBCCDDAABBCCDDAABBCCDDAABBCCDDAABBCCDD'
|
||||
JWT_ISSUER='ISSUER'
|
||||
JWT_AUDIENCE='AUDIENCE'
|
||||
JWT_TOKEN_EXPIRY_IN_MINUTES=360
|
||||
JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
||||
# EXPIRE_AUTH_TOKENS_ON_RESTART=true # (if you need to expire all tokens on app restart)
|
||||
# EXPRESS_SESSION_SECRET=flowise
|
||||
# SECURE_COOKIES=
|
||||
|
||||
# INVITE_TOKEN_EXPIRY_IN_HOURS=24
|
||||
# PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=15
|
||||
# PASSWORD_SALT_HASH_ROUNDS=10
|
||||
# TOKEN_HASH_SECRET='popcorn'
|
||||
|
||||
# WORKSPACE_INVITE_TEMPLATE_PATH=/path/to/custom/workspace_invite.hbs
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################# ENTERPRISE ###################################################
|
||||
############################################################################################################
|
||||
|
||||
# LICENSE_URL=
|
||||
# FLOWISE_EE_LICENSE_KEY=
|
||||
# OFFLINE=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
########################################### METRICS COLLECTION #############################################
|
||||
############################################################################################################
|
||||
|
||||
# POSTHOG_PUBLIC_API_KEY=your_posthog_public_api_key
|
||||
|
||||
######################
|
||||
# METRICS COLLECTION
|
||||
#######################
|
||||
# ENABLE_METRICS=false
|
||||
# METRICS_PROVIDER=prometheus # prometheus | open_telemetry
|
||||
# METRICS_INCLUDE_NODE_METRICS=true # default is true
|
||||
|
|
@ -73,19 +136,27 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
|
|||
# METRICS_OPEN_TELEMETRY_PROTOCOL=http # http | grpc | proto (default is http)
|
||||
# METRICS_OPEN_TELEMETRY_DEBUG=true # default is false
|
||||
|
||||
# Uncomment the following lines to enable global agent proxy
|
||||
# see https://www.npmjs.com/package/global-agent for more details
|
||||
|
||||
############################################################################################################
|
||||
############################################### PROXY ######################################################
|
||||
############################################################################################################
|
||||
|
||||
# Uncomment the following lines to enable global agent proxy, see https://www.npmjs.com/package/global-agent for more details
|
||||
# GLOBAL_AGENT_HTTP_PROXY=CorporateHttpProxyUrl
|
||||
# GLOBAL_AGENT_HTTPS_PROXY=CorporateHttpsProxyUrl
|
||||
# GLOBAL_AGENT_NO_PROXY=ExceptionHostsToBypassProxyIfNeeded
|
||||
|
||||
######################
|
||||
# QUEUE CONFIGURATION
|
||||
#######################
|
||||
|
||||
############################################################################################################
|
||||
########################################### QUEUE CONFIGURATION ############################################
|
||||
############################################################################################################
|
||||
|
||||
# MODE=queue #(queue | main)
|
||||
# QUEUE_NAME=flowise-queue
|
||||
# QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000
|
||||
# WORKER_CONCURRENCY=100000
|
||||
# REMOVE_ON_AGE=86400
|
||||
# REMOVE_ON_COUNT=10000
|
||||
# REDIS_URL=
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
|
|
@ -94,4 +165,16 @@ BLOB_STORAGE_PATH=/root/.flowise/storage
|
|||
# REDIS_TLS=
|
||||
# REDIS_CERT=
|
||||
# REDIS_KEY=
|
||||
# REDIS_CA=
|
||||
# REDIS_CA=
|
||||
# REDIS_KEEP_ALIVE=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECURITY ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# HTTP_DENY_LIST=
|
||||
# CUSTOM_MCP_SECURITY_CHECK=true
|
||||
# CUSTOM_MCP_PROTOCOL=sse #(stdio | sse)
|
||||
# TRUST_PROXY=true #(true | false | 1 | loopback| linklocal | uniquelocal | IP addresses | loopback, IP addresses)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ RUN npm install -g flowise
|
|||
FROM node:20-alpine
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev
|
||||
RUN apk add --no-cache chromium git python3 py3-pip make g++ build-base cairo-dev pango-dev curl
|
||||
|
||||
# Set the environment variable for Puppeteer to find Chromium
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
|
|
|||
|
|
@ -9,28 +9,43 @@ Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise
|
|||
3. Open [http://localhost:3000](http://localhost:3000)
|
||||
4. You can bring the containers down by `docker compose stop`
|
||||
|
||||
## 🔒 Authentication
|
||||
|
||||
1. Create `.env` file and specify the `PORT`, `FLOWISE_USERNAME`, and `FLOWISE_PASSWORD` (refer to `.env.example`)
|
||||
2. Pass `FLOWISE_USERNAME` and `FLOWISE_PASSWORD` to the `docker-compose.yml` file:
|
||||
```
|
||||
environment:
|
||||
- PORT=${PORT}
|
||||
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
||||
```
|
||||
3. `docker compose up -d`
|
||||
4. Open [http://localhost:3000](http://localhost:3000)
|
||||
5. You can bring the containers down by `docker compose stop`
|
||||
|
||||
## 🌱 Env Variables
|
||||
|
||||
If you like to persist your data (flows, logs, apikeys, credentials), set these variables in the `.env` file inside `docker` folder:
|
||||
If you like to persist your data (flows, logs, credentials, storage), set these variables in the `.env` file inside `docker` folder:
|
||||
|
||||
- DATABASE_PATH=/root/.flowise
|
||||
- APIKEY_PATH=/root/.flowise
|
||||
- LOG_PATH=/root/.flowise/logs
|
||||
- SECRETKEY_PATH=/root/.flowise
|
||||
- BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/environment-variables)
|
||||
Flowise also support different environment variables to configure your instance. Read [more](https://docs.flowiseai.com/configuration/environment-variables)
|
||||
|
||||
## Queue Mode:
|
||||
|
||||
### Building from source:
|
||||
|
||||
You can build the images for worker and main from scratch with:
|
||||
|
||||
```
|
||||
docker compose -f docker-compose-queue-source.yml up -d
|
||||
```
|
||||
|
||||
Monitor Health:
|
||||
|
||||
```
|
||||
docker compose -f docker-compose-queue-source.yml ps
|
||||
```
|
||||
|
||||
### From pre-built images:
|
||||
|
||||
You can also use the pre-built images:
|
||||
|
||||
```
|
||||
docker compose -f docker-compose-queue-prebuilt.yml up -d
|
||||
```
|
||||
|
||||
Monitor Health:
|
||||
|
||||
```
|
||||
docker compose -f docker-compose-queue-prebuilt.yml ps
|
||||
```
|
||||
|
|
|
|||
|
|
@ -0,0 +1,316 @@
|
|||
version: '3.1'
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
container_name: flowise-redis
|
||||
ports:
|
||||
- '6379:6379'
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- flowise-net
|
||||
restart: always
|
||||
|
||||
flowise:
|
||||
image: flowiseai/flowise:latest
|
||||
container_name: flowise-main
|
||||
restart: always
|
||||
ports:
|
||||
- '${PORT:-3000}:${PORT:-3000}'
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
environment:
|
||||
# --- Essential Flowise Vars ---
|
||||
- PORT=${PORT:-3000}
|
||||
- DATABASE_PATH=${DATABASE_PATH:-/root/.flowise}
|
||||
- DATABASE_TYPE=${DATABASE_TYPE}
|
||||
- DATABASE_PORT=${DATABASE_PORT}
|
||||
- DATABASE_HOST=${DATABASE_HOST}
|
||||
- DATABASE_NAME=${DATABASE_NAME}
|
||||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_SSL=${DATABASE_SSL}
|
||||
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
|
||||
|
||||
# SECRET KEYS
|
||||
- SECRETKEY_STORAGE_TYPE=${SECRETKEY_STORAGE_TYPE}
|
||||
- SECRETKEY_PATH=${SECRETKEY_PATH}
|
||||
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
|
||||
- SECRETKEY_AWS_ACCESS_KEY=${SECRETKEY_AWS_ACCESS_KEY}
|
||||
- SECRETKEY_AWS_SECRET_KEY=${SECRETKEY_AWS_SECRET_KEY}
|
||||
- SECRETKEY_AWS_REGION=${SECRETKEY_AWS_REGION}
|
||||
- SECRETKEY_AWS_NAME=${SECRETKEY_AWS_NAME}
|
||||
|
||||
# LOGGING
|
||||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
|
||||
- S3_STORAGE_BUCKET_NAME=${S3_STORAGE_BUCKET_NAME}
|
||||
- S3_STORAGE_ACCESS_KEY_ID=${S3_STORAGE_ACCESS_KEY_ID}
|
||||
- S3_STORAGE_SECRET_ACCESS_KEY=${S3_STORAGE_SECRET_ACCESS_KEY}
|
||||
- S3_STORAGE_REGION=${S3_STORAGE_REGION}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL}
|
||||
- S3_FORCE_PATH_STYLE=${S3_FORCE_PATH_STYLE}
|
||||
- GOOGLE_CLOUD_STORAGE_CREDENTIAL=${GOOGLE_CLOUD_STORAGE_CREDENTIAL}
|
||||
- GOOGLE_CLOUD_STORAGE_PROJ_ID=${GOOGLE_CLOUD_STORAGE_PROJ_ID}
|
||||
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=${GOOGLE_CLOUD_STORAGE_BUCKET_NAME}
|
||||
- GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=${GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS}
|
||||
|
||||
# SETTINGS
|
||||
- NUMBER_OF_PROXIES=${NUMBER_OF_PROXIES}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- SHOW_COMMUNITY_NODES=${SHOW_COMMUNITY_NODES}
|
||||
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
|
||||
- DISABLED_NODES=${DISABLED_NODES}
|
||||
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
|
||||
|
||||
# AUTH PARAMETERS
|
||||
- APP_URL=${APP_URL}
|
||||
- JWT_AUTH_TOKEN_SECRET=${JWT_AUTH_TOKEN_SECRET}
|
||||
- JWT_REFRESH_TOKEN_SECRET=${JWT_REFRESH_TOKEN_SECRET}
|
||||
- JWT_ISSUER=${JWT_ISSUER}
|
||||
- JWT_AUDIENCE=${JWT_AUDIENCE}
|
||||
- JWT_TOKEN_EXPIRY_IN_MINUTES=${JWT_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=${JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- EXPIRE_AUTH_TOKENS_ON_RESTART=${EXPIRE_AUTH_TOKENS_ON_RESTART}
|
||||
- EXPRESS_SESSION_SECRET=${EXPRESS_SESSION_SECRET}
|
||||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
- SMTP_PORT=${SMTP_PORT}
|
||||
- SMTP_USER=${SMTP_USER}
|
||||
- SMTP_PASSWORD=${SMTP_PASSWORD}
|
||||
- SMTP_SECURE=${SMTP_SECURE}
|
||||
- ALLOW_UNAUTHORIZED_CERTS=${ALLOW_UNAUTHORIZED_CERTS}
|
||||
- SENDER_EMAIL=${SENDER_EMAIL}
|
||||
|
||||
# ENTERPRISE
|
||||
- LICENSE_URL=${LICENSE_URL}
|
||||
- FLOWISE_EE_LICENSE_KEY=${FLOWISE_EE_LICENSE_KEY}
|
||||
- OFFLINE=${OFFLINE}
|
||||
- INVITE_TOKEN_EXPIRY_IN_HOURS=${INVITE_TOKEN_EXPIRY_IN_HOURS}
|
||||
- WORKSPACE_INVITE_TEMPLATE_PATH=${WORKSPACE_INVITE_TEMPLATE_PATH}
|
||||
|
||||
# METRICS COLLECTION
|
||||
- POSTHOG_PUBLIC_API_KEY=${POSTHOG_PUBLIC_API_KEY}
|
||||
- ENABLE_METRICS=${ENABLE_METRICS}
|
||||
- METRICS_PROVIDER=${METRICS_PROVIDER}
|
||||
- METRICS_INCLUDE_NODE_METRICS=${METRICS_INCLUDE_NODE_METRICS}
|
||||
- METRICS_SERVICE_NAME=${METRICS_SERVICE_NAME}
|
||||
- METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT=${METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT}
|
||||
- METRICS_OPEN_TELEMETRY_PROTOCOL=${METRICS_OPEN_TELEMETRY_PROTOCOL}
|
||||
- METRICS_OPEN_TELEMETRY_DEBUG=${METRICS_OPEN_TELEMETRY_DEBUG}
|
||||
|
||||
# PROXY
|
||||
- GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY}
|
||||
- GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY}
|
||||
- GLOBAL_AGENT_NO_PROXY=${GLOBAL_AGENT_NO_PROXY}
|
||||
|
||||
# --- Queue Configuration (Main Instance) ---
|
||||
- MODE=${MODE:-queue}
|
||||
- QUEUE_NAME=${QUEUE_NAME:-flowise-queue}
|
||||
- QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN}
|
||||
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
|
||||
- REMOVE_ON_AGE=${REMOVE_ON_AGE}
|
||||
- REMOVE_ON_COUNT=${REMOVE_ON_COUNT}
|
||||
- REDIS_URL=${REDIS_URL:-redis://redis:6379}
|
||||
- REDIS_HOST=${REDIS_HOST}
|
||||
- REDIS_PORT=${REDIS_PORT}
|
||||
- REDIS_USERNAME=${REDIS_USERNAME}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_TLS=${REDIS_TLS}
|
||||
- REDIS_CERT=${REDIS_CERT}
|
||||
- REDIS_KEY=${REDIS_KEY}
|
||||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${PORT:-3000}/api/v1/ping']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
||||
depends_on:
|
||||
- redis
|
||||
networks:
|
||||
- flowise-net
|
||||
|
||||
flowise-worker:
|
||||
image: flowiseai/flowise-worker:latest
|
||||
container_name: flowise-worker
|
||||
restart: always
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
environment:
|
||||
# --- Essential Flowise Vars ---
|
||||
- WORKER_PORT=${WORKER_PORT:-5566}
|
||||
- DATABASE_PATH=${DATABASE_PATH:-/root/.flowise}
|
||||
- DATABASE_TYPE=${DATABASE_TYPE}
|
||||
- DATABASE_PORT=${DATABASE_PORT}
|
||||
- DATABASE_HOST=${DATABASE_HOST}
|
||||
- DATABASE_NAME=${DATABASE_NAME}
|
||||
- DATABASE_USER=${DATABASE_USER}
|
||||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_SSL=${DATABASE_SSL}
|
||||
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
|
||||
|
||||
# SECRET KEYS
|
||||
- SECRETKEY_STORAGE_TYPE=${SECRETKEY_STORAGE_TYPE}
|
||||
- SECRETKEY_PATH=${SECRETKEY_PATH}
|
||||
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
|
||||
- SECRETKEY_AWS_ACCESS_KEY=${SECRETKEY_AWS_ACCESS_KEY}
|
||||
- SECRETKEY_AWS_SECRET_KEY=${SECRETKEY_AWS_SECRET_KEY}
|
||||
- SECRETKEY_AWS_REGION=${SECRETKEY_AWS_REGION}
|
||||
- SECRETKEY_AWS_NAME=${SECRETKEY_AWS_NAME}
|
||||
|
||||
# LOGGING
|
||||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
|
||||
- S3_STORAGE_BUCKET_NAME=${S3_STORAGE_BUCKET_NAME}
|
||||
- S3_STORAGE_ACCESS_KEY_ID=${S3_STORAGE_ACCESS_KEY_ID}
|
||||
- S3_STORAGE_SECRET_ACCESS_KEY=${S3_STORAGE_SECRET_ACCESS_KEY}
|
||||
- S3_STORAGE_REGION=${S3_STORAGE_REGION}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL}
|
||||
- S3_FORCE_PATH_STYLE=${S3_FORCE_PATH_STYLE}
|
||||
- GOOGLE_CLOUD_STORAGE_CREDENTIAL=${GOOGLE_CLOUD_STORAGE_CREDENTIAL}
|
||||
- GOOGLE_CLOUD_STORAGE_PROJ_ID=${GOOGLE_CLOUD_STORAGE_PROJ_ID}
|
||||
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=${GOOGLE_CLOUD_STORAGE_BUCKET_NAME}
|
||||
- GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=${GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS}
|
||||
|
||||
# SETTINGS
|
||||
- NUMBER_OF_PROXIES=${NUMBER_OF_PROXIES}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- SHOW_COMMUNITY_NODES=${SHOW_COMMUNITY_NODES}
|
||||
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
|
||||
- DISABLED_NODES=${DISABLED_NODES}
|
||||
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
|
||||
|
||||
# AUTH PARAMETERS
|
||||
- APP_URL=${APP_URL}
|
||||
- JWT_AUTH_TOKEN_SECRET=${JWT_AUTH_TOKEN_SECRET}
|
||||
- JWT_REFRESH_TOKEN_SECRET=${JWT_REFRESH_TOKEN_SECRET}
|
||||
- JWT_ISSUER=${JWT_ISSUER}
|
||||
- JWT_AUDIENCE=${JWT_AUDIENCE}
|
||||
- JWT_TOKEN_EXPIRY_IN_MINUTES=${JWT_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=${JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- EXPIRE_AUTH_TOKENS_ON_RESTART=${EXPIRE_AUTH_TOKENS_ON_RESTART}
|
||||
- EXPRESS_SESSION_SECRET=${EXPRESS_SESSION_SECRET}
|
||||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
- SMTP_PORT=${SMTP_PORT}
|
||||
- SMTP_USER=${SMTP_USER}
|
||||
- SMTP_PASSWORD=${SMTP_PASSWORD}
|
||||
- SMTP_SECURE=${SMTP_SECURE}
|
||||
- ALLOW_UNAUTHORIZED_CERTS=${ALLOW_UNAUTHORIZED_CERTS}
|
||||
- SENDER_EMAIL=${SENDER_EMAIL}
|
||||
|
||||
# ENTERPRISE
|
||||
- LICENSE_URL=${LICENSE_URL}
|
||||
- FLOWISE_EE_LICENSE_KEY=${FLOWISE_EE_LICENSE_KEY}
|
||||
- OFFLINE=${OFFLINE}
|
||||
- INVITE_TOKEN_EXPIRY_IN_HOURS=${INVITE_TOKEN_EXPIRY_IN_HOURS}
|
||||
- WORKSPACE_INVITE_TEMPLATE_PATH=${WORKSPACE_INVITE_TEMPLATE_PATH}
|
||||
|
||||
# METRICS COLLECTION
|
||||
- POSTHOG_PUBLIC_API_KEY=${POSTHOG_PUBLIC_API_KEY}
|
||||
- ENABLE_METRICS=${ENABLE_METRICS}
|
||||
- METRICS_PROVIDER=${METRICS_PROVIDER}
|
||||
- METRICS_INCLUDE_NODE_METRICS=${METRICS_INCLUDE_NODE_METRICS}
|
||||
- METRICS_SERVICE_NAME=${METRICS_SERVICE_NAME}
|
||||
- METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT=${METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT}
|
||||
- METRICS_OPEN_TELEMETRY_PROTOCOL=${METRICS_OPEN_TELEMETRY_PROTOCOL}
|
||||
- METRICS_OPEN_TELEMETRY_DEBUG=${METRICS_OPEN_TELEMETRY_DEBUG}
|
||||
|
||||
# PROXY
|
||||
- GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY}
|
||||
- GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY}
|
||||
- GLOBAL_AGENT_NO_PROXY=${GLOBAL_AGENT_NO_PROXY}
|
||||
|
||||
# --- Queue Configuration (Worker Instance) ---
|
||||
- MODE=${MODE:-queue}
|
||||
- QUEUE_NAME=${QUEUE_NAME:-flowise-queue}
|
||||
- QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN}
|
||||
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
|
||||
- REMOVE_ON_AGE=${REMOVE_ON_AGE}
|
||||
- REMOVE_ON_COUNT=${REMOVE_ON_COUNT}
|
||||
- REDIS_URL=${REDIS_URL:-redis://redis:6379}
|
||||
- REDIS_HOST=${REDIS_HOST}
|
||||
- REDIS_PORT=${REDIS_PORT}
|
||||
- REDIS_USERNAME=${REDIS_USERNAME}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_TLS=${REDIS_TLS}
|
||||
- REDIS_CERT=${REDIS_CERT}
|
||||
- REDIS_KEY=${REDIS_KEY}
|
||||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${WORKER_PORT:-5566}/healthz']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
entrypoint: /bin/sh -c "node /app/healthcheck/healthcheck.js & sleep 5 && pnpm run start-worker"
|
||||
depends_on:
|
||||
- redis
|
||||
- flowise
|
||||
networks:
|
||||
- flowise-net
|
||||
|
||||
volumes:
|
||||
redis_data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
flowise-net:
|
||||
driver: bridge
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
version: '3.1'
|
||||
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
container_name: flowise-redis
|
||||
ports:
|
||||
- '6379:6379'
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
networks:
|
||||
- flowise-net
|
||||
|
||||
flowise:
|
||||
container_name: flowise-main
|
||||
build:
|
||||
context: .. # Build using the Dockerfile in the root directory
|
||||
dockerfile: docker/Dockerfile
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
volumes:
|
||||
# Mount local .flowise to container's default location
|
||||
- ../.flowise:/root/.flowise
|
||||
environment:
|
||||
# --- Essential Flowise Vars ---
|
||||
- PORT=${PORT:-3000}
|
||||
- DATABASE_PATH=/root/.flowise
|
||||
- SECRETKEY_PATH=/root/.flowise
|
||||
- LOG_PATH=/root/.flowise/logs
|
||||
- BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
# --- Queue Vars (Main Instance) ---
|
||||
- MODE=queue
|
||||
- QUEUE_NAME=flowise-queue # Ensure this matches worker
|
||||
- REDIS_URL=redis://redis:6379 # Use service name 'redis'
|
||||
depends_on:
|
||||
- redis
|
||||
networks:
|
||||
- flowise-net
|
||||
|
||||
flowise-worker:
|
||||
container_name: flowise-worker
|
||||
build:
|
||||
context: .. # Build context is still the root
|
||||
dockerfile: docker/worker/Dockerfile # Ensure this path is correct
|
||||
volumes:
|
||||
# Mount same local .flowise to worker
|
||||
- ../.flowise:/root/.flowise
|
||||
environment:
|
||||
# --- Essential Flowise Vars ---
|
||||
- WORKER_PORT=${WORKER_PORT:-5566} # Port for worker healthcheck
|
||||
- DATABASE_PATH=/root/.flowise
|
||||
- SECRETKEY_PATH=/root/.flowise
|
||||
- LOG_PATH=/root/.flowise/logs
|
||||
- BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
# --- Queue Vars (Main Instance) ---
|
||||
- MODE=queue
|
||||
- QUEUE_NAME=flowise-queue # Ensure this matches worker
|
||||
- REDIS_URL=redis://redis:6379 # Use service name 'redis'
|
||||
depends_on:
|
||||
- redis
|
||||
- flowise
|
||||
networks:
|
||||
- flowise-net
|
||||
|
||||
volumes:
|
||||
redis_data:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
flowise-net:
|
||||
driver: bridge
|
||||
|
|
@ -2,16 +2,12 @@ version: '3.1'
|
|||
|
||||
services:
|
||||
flowise:
|
||||
image: flowiseai/flowise
|
||||
image: flowiseai/flowise:latest
|
||||
restart: always
|
||||
environment:
|
||||
- PORT=${PORT}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- DEBUG=${DEBUG}
|
||||
|
||||
# DATABASE
|
||||
- DATABASE_PATH=${DATABASE_PATH}
|
||||
- DATABASE_TYPE=${DATABASE_TYPE}
|
||||
- DATABASE_PORT=${DATABASE_PORT}
|
||||
|
|
@ -21,34 +17,130 @@ services:
|
|||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_SSL=${DATABASE_SSL}
|
||||
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
|
||||
- APIKEY_STORAGE_TYPE=${APIKEY_STORAGE_TYPE}
|
||||
- APIKEY_PATH=${APIKEY_PATH}
|
||||
|
||||
# SECRET KEYS
|
||||
- SECRETKEY_STORAGE_TYPE=${SECRETKEY_STORAGE_TYPE}
|
||||
- SECRETKEY_PATH=${SECRETKEY_PATH}
|
||||
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- SECRETKEY_AWS_ACCESS_KEY=${SECRETKEY_AWS_ACCESS_KEY}
|
||||
- SECRETKEY_AWS_SECRET_KEY=${SECRETKEY_AWS_SECRET_KEY}
|
||||
- SECRETKEY_AWS_REGION=${SECRETKEY_AWS_REGION}
|
||||
- SECRETKEY_AWS_NAME=${SECRETKEY_AWS_NAME}
|
||||
|
||||
# LOGGING
|
||||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
|
||||
- S3_STORAGE_BUCKET_NAME=${S3_STORAGE_BUCKET_NAME}
|
||||
- S3_STORAGE_ACCESS_KEY_ID=${S3_STORAGE_ACCESS_KEY_ID}
|
||||
- S3_STORAGE_SECRET_ACCESS_KEY=${S3_STORAGE_SECRET_ACCESS_KEY}
|
||||
- S3_STORAGE_REGION=${S3_STORAGE_REGION}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL}
|
||||
- S3_FORCE_PATH_STYLE=${S3_FORCE_PATH_STYLE}
|
||||
- GOOGLE_CLOUD_STORAGE_CREDENTIAL=${GOOGLE_CLOUD_STORAGE_CREDENTIAL}
|
||||
- GOOGLE_CLOUD_STORAGE_PROJ_ID=${GOOGLE_CLOUD_STORAGE_PROJ_ID}
|
||||
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=${GOOGLE_CLOUD_STORAGE_BUCKET_NAME}
|
||||
- GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=${GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS}
|
||||
|
||||
# SETTINGS
|
||||
- NUMBER_OF_PROXIES=${NUMBER_OF_PROXIES}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- SHOW_COMMUNITY_NODES=${SHOW_COMMUNITY_NODES}
|
||||
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
|
||||
- DISABLED_NODES=${DISABLED_NODES}
|
||||
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
|
||||
|
||||
# AUTH PARAMETERS
|
||||
- APP_URL=${APP_URL}
|
||||
- JWT_AUTH_TOKEN_SECRET=${JWT_AUTH_TOKEN_SECRET}
|
||||
- JWT_REFRESH_TOKEN_SECRET=${JWT_REFRESH_TOKEN_SECRET}
|
||||
- JWT_ISSUER=${JWT_ISSUER}
|
||||
- JWT_AUDIENCE=${JWT_AUDIENCE}
|
||||
- JWT_TOKEN_EXPIRY_IN_MINUTES=${JWT_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=${JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- EXPIRE_AUTH_TOKENS_ON_RESTART=${EXPIRE_AUTH_TOKENS_ON_RESTART}
|
||||
- EXPRESS_SESSION_SECRET=${EXPRESS_SESSION_SECRET}
|
||||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
- SMTP_PORT=${SMTP_PORT}
|
||||
- SMTP_USER=${SMTP_USER}
|
||||
- SMTP_PASSWORD=${SMTP_PASSWORD}
|
||||
- SMTP_SECURE=${SMTP_SECURE}
|
||||
- ALLOW_UNAUTHORIZED_CERTS=${ALLOW_UNAUTHORIZED_CERTS}
|
||||
- SENDER_EMAIL=${SENDER_EMAIL}
|
||||
|
||||
# ENTERPRISE
|
||||
- LICENSE_URL=${LICENSE_URL}
|
||||
- FLOWISE_EE_LICENSE_KEY=${FLOWISE_EE_LICENSE_KEY}
|
||||
- OFFLINE=${OFFLINE}
|
||||
- INVITE_TOKEN_EXPIRY_IN_HOURS=${INVITE_TOKEN_EXPIRY_IN_HOURS}
|
||||
- WORKSPACE_INVITE_TEMPLATE_PATH=${WORKSPACE_INVITE_TEMPLATE_PATH}
|
||||
|
||||
# METRICS COLLECTION
|
||||
- POSTHOG_PUBLIC_API_KEY=${POSTHOG_PUBLIC_API_KEY}
|
||||
- ENABLE_METRICS=${ENABLE_METRICS}
|
||||
- METRICS_PROVIDER=${METRICS_PROVIDER}
|
||||
- METRICS_INCLUDE_NODE_METRICS=${METRICS_INCLUDE_NODE_METRICS}
|
||||
- METRICS_SERVICE_NAME=${METRICS_SERVICE_NAME}
|
||||
- METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT=${METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT}
|
||||
- METRICS_OPEN_TELEMETRY_PROTOCOL=${METRICS_OPEN_TELEMETRY_PROTOCOL}
|
||||
- METRICS_OPEN_TELEMETRY_DEBUG=${METRICS_OPEN_TELEMETRY_DEBUG}
|
||||
|
||||
# PROXY
|
||||
- GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY}
|
||||
- GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY}
|
||||
- GLOBAL_AGENT_NO_PROXY=${GLOBAL_AGENT_NO_PROXY}
|
||||
- DISABLED_NODES=${DISABLED_NODES}
|
||||
|
||||
# QUEUE CONFIGURATION
|
||||
- MODE=${MODE}
|
||||
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
|
||||
- QUEUE_NAME=${QUEUE_NAME}
|
||||
- QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN}
|
||||
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
|
||||
- REMOVE_ON_AGE=${REMOVE_ON_AGE}
|
||||
- REMOVE_ON_COUNT=${REMOVE_ON_COUNT}
|
||||
- REDIS_URL=${REDIS_URL}
|
||||
- REDIS_HOST=${REDIS_HOST}
|
||||
- REDIS_PORT=${REDIS_PORT}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_USERNAME=${REDIS_USERNAME}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_TLS=${REDIS_TLS}
|
||||
- REDIS_CERT=${REDIS_CERT}
|
||||
- REDIS_KEY=${REDIS_KEY}
|
||||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${PORT}/api/v1/ping']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
entrypoint: /bin/sh -c "sleep 3; flowise start"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,180 @@
|
|||
WORKER_PORT=5566
|
||||
|
||||
# APIKEY_PATH=/your_apikey_path/.flowise # (will be deprecated by end of 2025)
|
||||
|
||||
############################################################################################################
|
||||
############################################## DATABASE ####################################################
|
||||
############################################################################################################
|
||||
|
||||
DATABASE_PATH=/root/.flowise
|
||||
# DATABASE_TYPE=postgres
|
||||
# DATABASE_PORT=5432
|
||||
# DATABASE_HOST=""
|
||||
# DATABASE_NAME=flowise
|
||||
# DATABASE_USER=root
|
||||
# DATABASE_PASSWORD=mypassword
|
||||
# DATABASE_SSL=true
|
||||
# DATABASE_REJECT_UNAUTHORIZED=true
|
||||
# DATABASE_SSL_KEY_BASE64=<Self signed certificate in BASE64>
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECRET KEYS #################################################
|
||||
############################################################################################################
|
||||
|
||||
# SECRETKEY_STORAGE_TYPE=local #(local | aws)
|
||||
SECRETKEY_PATH=/root/.flowise
|
||||
# FLOWISE_SECRETKEY_OVERWRITE=myencryptionkey # (if you want to overwrite the secret key)
|
||||
# SECRETKEY_AWS_ACCESS_KEY=<your-access-key>
|
||||
# SECRETKEY_AWS_SECRET_KEY=<your-secret-key>
|
||||
# SECRETKEY_AWS_REGION=us-west-2
|
||||
# SECRETKEY_AWS_NAME=FlowiseEncryptionKey
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## LOGGING #####################################################
|
||||
############################################################################################################
|
||||
|
||||
# DEBUG=true
|
||||
LOG_PATH=/root/.flowise/logs
|
||||
# LOG_LEVEL=info #(error | warn | info | verbose | debug)
|
||||
# LOG_SANITIZE_BODY_FIELDS=password,pwd,pass,secret,token,apikey,api_key,accesstoken,access_token,refreshtoken,refresh_token,clientsecret,client_secret,privatekey,private_key,secretkey,secret_key,auth,authorization,credential,credentials
|
||||
# LOG_SANITIZE_HEADER_FIELDS=authorization,x-api-key,x-auth-token,cookie
|
||||
# TOOL_FUNCTION_BUILTIN_DEP=crypto,fs
|
||||
# TOOL_FUNCTION_EXTERNAL_DEP=moment,lodash
|
||||
# ALLOW_BUILTIN_DEP=false
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## STORAGE #####################################################
|
||||
############################################################################################################
|
||||
|
||||
# STORAGE_TYPE=local (local | s3 | gcs)
|
||||
BLOB_STORAGE_PATH=/root/.flowise/storage
|
||||
# S3_STORAGE_BUCKET_NAME=flowise
|
||||
# S3_STORAGE_ACCESS_KEY_ID=<your-access-key>
|
||||
# S3_STORAGE_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
# S3_STORAGE_REGION=us-west-2
|
||||
# S3_ENDPOINT_URL=<custom-s3-endpoint-url>
|
||||
# S3_FORCE_PATH_STYLE=false
|
||||
# GOOGLE_CLOUD_STORAGE_CREDENTIAL=/the/keyfilename/path
|
||||
# GOOGLE_CLOUD_STORAGE_PROJ_ID=<your-gcp-project-id>
|
||||
# GOOGLE_CLOUD_STORAGE_BUCKET_NAME=<the-bucket-name>
|
||||
# GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=true
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SETTINGS ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# NUMBER_OF_PROXIES= 1
|
||||
# CORS_ORIGINS=*
|
||||
# IFRAME_ORIGINS=*
|
||||
# FLOWISE_FILE_SIZE_LIMIT=50mb
|
||||
# SHOW_COMMUNITY_NODES=true
|
||||
# DISABLE_FLOWISE_TELEMETRY=true
|
||||
# DISABLED_NODES=bufferMemory,chatOpenAI (comma separated list of node names to disable)
|
||||
# Uncomment the following line to enable model list config, load the list of models from your local config file
|
||||
# see https://raw.githubusercontent.com/FlowiseAI/Flowise/main/packages/components/models.json for the format
|
||||
# MODEL_LIST_CONFIG_JSON=/your_model_list_config_file_path
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################ AUTH PARAMETERS ###############################################
|
||||
############################################################################################################
|
||||
|
||||
# APP_URL=http://localhost:3000
|
||||
|
||||
# SMTP_HOST=smtp.host.com
|
||||
# SMTP_PORT=465
|
||||
# SMTP_USER=smtp_user
|
||||
# SMTP_PASSWORD=smtp_password
|
||||
# SMTP_SECURE=true
|
||||
# ALLOW_UNAUTHORIZED_CERTS=false
|
||||
# SENDER_EMAIL=team@example.com
|
||||
|
||||
JWT_AUTH_TOKEN_SECRET='AABBCCDDAABBCCDDAABBCCDDAABBCCDDAABBCCDD'
|
||||
JWT_REFRESH_TOKEN_SECRET='AABBCCDDAABBCCDDAABBCCDDAABBCCDDAABBCCDD'
|
||||
JWT_ISSUER='ISSUER'
|
||||
JWT_AUDIENCE='AUDIENCE'
|
||||
JWT_TOKEN_EXPIRY_IN_MINUTES=360
|
||||
JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=43200
|
||||
# EXPIRE_AUTH_TOKENS_ON_RESTART=true # (if you need to expire all tokens on app restart)
|
||||
# EXPRESS_SESSION_SECRET=flowise
|
||||
# SECURE_COOKIES=
|
||||
|
||||
# INVITE_TOKEN_EXPIRY_IN_HOURS=24
|
||||
# PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=15
|
||||
# PASSWORD_SALT_HASH_ROUNDS=10
|
||||
# TOKEN_HASH_SECRET='popcorn'
|
||||
|
||||
# WORKSPACE_INVITE_TEMPLATE_PATH=/path/to/custom/workspace_invite.hbs
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################# ENTERPRISE ###################################################
|
||||
############################################################################################################
|
||||
|
||||
# LICENSE_URL=
|
||||
# FLOWISE_EE_LICENSE_KEY=
|
||||
# OFFLINE=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
########################################### METRICS COLLECTION #############################################
|
||||
############################################################################################################
|
||||
|
||||
# POSTHOG_PUBLIC_API_KEY=your_posthog_public_api_key
|
||||
|
||||
# ENABLE_METRICS=false
|
||||
# METRICS_PROVIDER=prometheus # prometheus | open_telemetry
|
||||
# METRICS_INCLUDE_NODE_METRICS=true # default is true
|
||||
# METRICS_SERVICE_NAME=FlowiseAI
|
||||
|
||||
# ONLY NEEDED if METRICS_PROVIDER=open_telemetry
|
||||
# METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT=http://localhost:4318/v1/metrics
|
||||
# METRICS_OPEN_TELEMETRY_PROTOCOL=http # http | grpc | proto (default is http)
|
||||
# METRICS_OPEN_TELEMETRY_DEBUG=true # default is false
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################### PROXY ######################################################
|
||||
############################################################################################################
|
||||
|
||||
# Uncomment the following lines to enable global agent proxy, see https://www.npmjs.com/package/global-agent for more details
|
||||
# GLOBAL_AGENT_HTTP_PROXY=CorporateHttpProxyUrl
|
||||
# GLOBAL_AGENT_HTTPS_PROXY=CorporateHttpsProxyUrl
|
||||
# GLOBAL_AGENT_NO_PROXY=ExceptionHostsToBypassProxyIfNeeded
|
||||
|
||||
|
||||
############################################################################################################
|
||||
########################################### QUEUE CONFIGURATION ############################################
|
||||
############################################################################################################
|
||||
|
||||
# MODE=queue #(queue | main)
|
||||
# QUEUE_NAME=flowise-queue
|
||||
# QUEUE_REDIS_EVENT_STREAM_MAX_LEN=100000
|
||||
# WORKER_CONCURRENCY=100000
|
||||
# REMOVE_ON_AGE=86400
|
||||
# REMOVE_ON_COUNT=10000
|
||||
# REDIS_URL=
|
||||
# REDIS_HOST=localhost
|
||||
# REDIS_PORT=6379
|
||||
# REDIS_USERNAME=
|
||||
# REDIS_PASSWORD=
|
||||
# REDIS_TLS=
|
||||
# REDIS_CERT=
|
||||
# REDIS_KEY=
|
||||
# REDIS_CA=
|
||||
# REDIS_KEEP_ALIVE=
|
||||
# ENABLE_BULLMQ_DASHBOARD=
|
||||
|
||||
|
||||
############################################################################################################
|
||||
############################################## SECURITY ####################################################
|
||||
############################################################################################################
|
||||
|
||||
# HTTP_DENY_LIST=
|
||||
# CUSTOM_MCP_SECURITY_CHECK=true
|
||||
# CUSTOM_MCP_PROTOCOL=sse #(stdio | sse)
|
||||
# TRUST_PROXY=true #(true | false | 1 | loopback| linklocal | uniquelocal | IP addresses | loopback, IP addresses)
|
||||
|
|
@ -0,0 +1,49 @@
|
|||
FROM node:20-alpine
|
||||
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install Chromium and curl for container-level health checks
|
||||
RUN apk add --no-cache chromium curl
|
||||
|
||||
#install PNPM globally
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
|
||||
WORKDIR /usr/src
|
||||
|
||||
# Copy app source
|
||||
COPY . .
|
||||
|
||||
RUN pnpm install
|
||||
|
||||
RUN pnpm build
|
||||
|
||||
# --- Healthcheck Setup ---
|
||||
|
||||
WORKDIR /app/healthcheck
|
||||
|
||||
COPY docker/worker/healthcheck/package.json .
|
||||
|
||||
RUN npm install --omit=dev
|
||||
|
||||
COPY docker/worker/healthcheck/healthcheck.js .
|
||||
|
||||
# --- End Healthcheck Setup ---
|
||||
|
||||
# Set the main working directory back
|
||||
WORKDIR /usr/src
|
||||
|
||||
# Environment variables for port configuration
|
||||
ENV WORKER_PORT=5566
|
||||
|
||||
# Expose port (can be overridden by env var)
|
||||
EXPOSE ${WORKER_PORT}
|
||||
|
||||
# Start healthcheck in background and flowise worker in foreground
|
||||
CMD ["/bin/sh", "-c", "node /app/healthcheck/healthcheck.js & sleep 5 && pnpm run start-worker"]
|
||||
|
|
@ -18,7 +18,11 @@ Here’s an overview of the process:
|
|||
|
||||
## Setting up Worker:
|
||||
|
||||
1. Copy paste the same `.env` file used to setup main server. Change the `PORT` to other available port numbers. Ex: 5566
|
||||
2. `docker compose up -d`
|
||||
3. Open [http://localhost:5566](http://localhost:5566)
|
||||
1. Navigate to `docker/worker` folder
|
||||
2. In the `.env.example`, setup all the necessary env variables for `QUEUE CONFIGURATION`. Env variables for worker must match the one for main server. Change the `WORKER_PORT` to other available port numbers to listen for healthcheck. Ex: 5566
|
||||
3. `docker compose up -d`
|
||||
4. You can bring the worker container down by `docker compose stop`
|
||||
|
||||
## Entrypoint:
|
||||
|
||||
Different from main server image which is using `flowise start`, entrypoint for worker is `pnpm run start-worker`. This is because the worker's [Dockerfile](./Dockerfile) build the image from source files via `pnpm build` instead of npm registry via `RUN npm install -g flowise`.
|
||||
|
|
|
|||
|
|
@ -2,16 +2,12 @@ version: '3.1'
|
|||
|
||||
services:
|
||||
flowise:
|
||||
image: flowiseai/flowise
|
||||
image: flowiseai/flowise-worker:latest
|
||||
restart: always
|
||||
environment:
|
||||
- PORT=${PORT}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_USERNAME=${FLOWISE_USERNAME}
|
||||
- FLOWISE_PASSWORD=${FLOWISE_PASSWORD}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- DEBUG=${DEBUG}
|
||||
- WORKER_PORT=${WORKER_PORT:-5566}
|
||||
|
||||
# DATABASE
|
||||
- DATABASE_PATH=${DATABASE_PATH}
|
||||
- DATABASE_TYPE=${DATABASE_TYPE}
|
||||
- DATABASE_PORT=${DATABASE_PORT}
|
||||
|
|
@ -21,34 +17,130 @@ services:
|
|||
- DATABASE_PASSWORD=${DATABASE_PASSWORD}
|
||||
- DATABASE_SSL=${DATABASE_SSL}
|
||||
- DATABASE_SSL_KEY_BASE64=${DATABASE_SSL_KEY_BASE64}
|
||||
- APIKEY_STORAGE_TYPE=${APIKEY_STORAGE_TYPE}
|
||||
- APIKEY_PATH=${APIKEY_PATH}
|
||||
|
||||
# SECRET KEYS
|
||||
- SECRETKEY_STORAGE_TYPE=${SECRETKEY_STORAGE_TYPE}
|
||||
- SECRETKEY_PATH=${SECRETKEY_PATH}
|
||||
- FLOWISE_SECRETKEY_OVERWRITE=${FLOWISE_SECRETKEY_OVERWRITE}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- SECRETKEY_AWS_ACCESS_KEY=${SECRETKEY_AWS_ACCESS_KEY}
|
||||
- SECRETKEY_AWS_SECRET_KEY=${SECRETKEY_AWS_SECRET_KEY}
|
||||
- SECRETKEY_AWS_REGION=${SECRETKEY_AWS_REGION}
|
||||
- SECRETKEY_AWS_NAME=${SECRETKEY_AWS_NAME}
|
||||
|
||||
# LOGGING
|
||||
- DEBUG=${DEBUG}
|
||||
- LOG_PATH=${LOG_PATH}
|
||||
- LOG_LEVEL=${LOG_LEVEL}
|
||||
- LOG_SANITIZE_BODY_FIELDS=${LOG_SANITIZE_BODY_FIELDS}
|
||||
- LOG_SANITIZE_HEADER_FIELDS=${LOG_SANITIZE_HEADER_FIELDS}
|
||||
|
||||
# CUSTOM TOOL/FUNCTION DEPENDENCIES
|
||||
- TOOL_FUNCTION_BUILTIN_DEP=${TOOL_FUNCTION_BUILTIN_DEP}
|
||||
- TOOL_FUNCTION_EXTERNAL_DEP=${TOOL_FUNCTION_EXTERNAL_DEP}
|
||||
- ALLOW_BUILTIN_DEP=${ALLOW_BUILTIN_DEP}
|
||||
|
||||
# STORAGE
|
||||
- STORAGE_TYPE=${STORAGE_TYPE}
|
||||
- BLOB_STORAGE_PATH=${BLOB_STORAGE_PATH}
|
||||
- S3_STORAGE_BUCKET_NAME=${S3_STORAGE_BUCKET_NAME}
|
||||
- S3_STORAGE_ACCESS_KEY_ID=${S3_STORAGE_ACCESS_KEY_ID}
|
||||
- S3_STORAGE_SECRET_ACCESS_KEY=${S3_STORAGE_SECRET_ACCESS_KEY}
|
||||
- S3_STORAGE_REGION=${S3_STORAGE_REGION}
|
||||
- S3_ENDPOINT_URL=${S3_ENDPOINT_URL}
|
||||
- S3_FORCE_PATH_STYLE=${S3_FORCE_PATH_STYLE}
|
||||
- GOOGLE_CLOUD_STORAGE_CREDENTIAL=${GOOGLE_CLOUD_STORAGE_CREDENTIAL}
|
||||
- GOOGLE_CLOUD_STORAGE_PROJ_ID=${GOOGLE_CLOUD_STORAGE_PROJ_ID}
|
||||
- GOOGLE_CLOUD_STORAGE_BUCKET_NAME=${GOOGLE_CLOUD_STORAGE_BUCKET_NAME}
|
||||
- GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS=${GOOGLE_CLOUD_UNIFORM_BUCKET_ACCESS}
|
||||
|
||||
# SETTINGS
|
||||
- NUMBER_OF_PROXIES=${NUMBER_OF_PROXIES}
|
||||
- CORS_ORIGINS=${CORS_ORIGINS}
|
||||
- IFRAME_ORIGINS=${IFRAME_ORIGINS}
|
||||
- FLOWISE_FILE_SIZE_LIMIT=${FLOWISE_FILE_SIZE_LIMIT}
|
||||
- SHOW_COMMUNITY_NODES=${SHOW_COMMUNITY_NODES}
|
||||
- DISABLE_FLOWISE_TELEMETRY=${DISABLE_FLOWISE_TELEMETRY}
|
||||
- DISABLED_NODES=${DISABLED_NODES}
|
||||
- MODEL_LIST_CONFIG_JSON=${MODEL_LIST_CONFIG_JSON}
|
||||
|
||||
# AUTH PARAMETERS
|
||||
- APP_URL=${APP_URL}
|
||||
- JWT_AUTH_TOKEN_SECRET=${JWT_AUTH_TOKEN_SECRET}
|
||||
- JWT_REFRESH_TOKEN_SECRET=${JWT_REFRESH_TOKEN_SECRET}
|
||||
- JWT_ISSUER=${JWT_ISSUER}
|
||||
- JWT_AUDIENCE=${JWT_AUDIENCE}
|
||||
- JWT_TOKEN_EXPIRY_IN_MINUTES=${JWT_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES=${JWT_REFRESH_TOKEN_EXPIRY_IN_MINUTES}
|
||||
- EXPIRE_AUTH_TOKENS_ON_RESTART=${EXPIRE_AUTH_TOKENS_ON_RESTART}
|
||||
- EXPRESS_SESSION_SECRET=${EXPRESS_SESSION_SECRET}
|
||||
- PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS=${PASSWORD_RESET_TOKEN_EXPIRY_IN_MINS}
|
||||
- PASSWORD_SALT_HASH_ROUNDS=${PASSWORD_SALT_HASH_ROUNDS}
|
||||
- TOKEN_HASH_SECRET=${TOKEN_HASH_SECRET}
|
||||
- SECURE_COOKIES=${SECURE_COOKIES}
|
||||
|
||||
# EMAIL
|
||||
- SMTP_HOST=${SMTP_HOST}
|
||||
- SMTP_PORT=${SMTP_PORT}
|
||||
- SMTP_USER=${SMTP_USER}
|
||||
- SMTP_PASSWORD=${SMTP_PASSWORD}
|
||||
- SMTP_SECURE=${SMTP_SECURE}
|
||||
- ALLOW_UNAUTHORIZED_CERTS=${ALLOW_UNAUTHORIZED_CERTS}
|
||||
- SENDER_EMAIL=${SENDER_EMAIL}
|
||||
|
||||
# ENTERPRISE
|
||||
- LICENSE_URL=${LICENSE_URL}
|
||||
- FLOWISE_EE_LICENSE_KEY=${FLOWISE_EE_LICENSE_KEY}
|
||||
- OFFLINE=${OFFLINE}
|
||||
- INVITE_TOKEN_EXPIRY_IN_HOURS=${INVITE_TOKEN_EXPIRY_IN_HOURS}
|
||||
- WORKSPACE_INVITE_TEMPLATE_PATH=${WORKSPACE_INVITE_TEMPLATE_PATH}
|
||||
|
||||
# METRICS COLLECTION
|
||||
- POSTHOG_PUBLIC_API_KEY=${POSTHOG_PUBLIC_API_KEY}
|
||||
- ENABLE_METRICS=${ENABLE_METRICS}
|
||||
- METRICS_PROVIDER=${METRICS_PROVIDER}
|
||||
- METRICS_INCLUDE_NODE_METRICS=${METRICS_INCLUDE_NODE_METRICS}
|
||||
- METRICS_SERVICE_NAME=${METRICS_SERVICE_NAME}
|
||||
- METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT=${METRICS_OPEN_TELEMETRY_METRIC_ENDPOINT}
|
||||
- METRICS_OPEN_TELEMETRY_PROTOCOL=${METRICS_OPEN_TELEMETRY_PROTOCOL}
|
||||
- METRICS_OPEN_TELEMETRY_DEBUG=${METRICS_OPEN_TELEMETRY_DEBUG}
|
||||
|
||||
# PROXY
|
||||
- GLOBAL_AGENT_HTTP_PROXY=${GLOBAL_AGENT_HTTP_PROXY}
|
||||
- GLOBAL_AGENT_HTTPS_PROXY=${GLOBAL_AGENT_HTTPS_PROXY}
|
||||
- GLOBAL_AGENT_NO_PROXY=${GLOBAL_AGENT_NO_PROXY}
|
||||
- DISABLED_NODES=${DISABLED_NODES}
|
||||
|
||||
# QUEUE CONFIGURATION
|
||||
- MODE=${MODE}
|
||||
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
|
||||
- QUEUE_NAME=${QUEUE_NAME}
|
||||
- QUEUE_REDIS_EVENT_STREAM_MAX_LEN=${QUEUE_REDIS_EVENT_STREAM_MAX_LEN}
|
||||
- WORKER_CONCURRENCY=${WORKER_CONCURRENCY}
|
||||
- REMOVE_ON_AGE=${REMOVE_ON_AGE}
|
||||
- REMOVE_ON_COUNT=${REMOVE_ON_COUNT}
|
||||
- REDIS_URL=${REDIS_URL}
|
||||
- REDIS_HOST=${REDIS_HOST}
|
||||
- REDIS_PORT=${REDIS_PORT}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_USERNAME=${REDIS_USERNAME}
|
||||
- REDIS_PASSWORD=${REDIS_PASSWORD}
|
||||
- REDIS_TLS=${REDIS_TLS}
|
||||
- REDIS_CERT=${REDIS_CERT}
|
||||
- REDIS_KEY=${REDIS_KEY}
|
||||
- REDIS_CA=${REDIS_CA}
|
||||
- REDIS_KEEP_ALIVE=${REDIS_KEEP_ALIVE}
|
||||
- ENABLE_BULLMQ_DASHBOARD=${ENABLE_BULLMQ_DASHBOARD}
|
||||
|
||||
# SECURITY
|
||||
- CUSTOM_MCP_SECURITY_CHECK=${CUSTOM_MCP_SECURITY_CHECK}
|
||||
- CUSTOM_MCP_PROTOCOL=${CUSTOM_MCP_PROTOCOL}
|
||||
- HTTP_DENY_LIST=${HTTP_DENY_LIST}
|
||||
- TRUST_PROXY=${TRUST_PROXY}
|
||||
ports:
|
||||
- '${PORT}:${PORT}'
|
||||
- '${WORKER_PORT}:${WORKER_PORT}'
|
||||
healthcheck:
|
||||
test: ['CMD', 'curl', '-f', 'http://localhost:${WORKER_PORT}/healthz']
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
start_period: 30s
|
||||
volumes:
|
||||
- ~/.flowise:/root/.flowise
|
||||
entrypoint: /bin/sh -c "sleep 3; flowise worker"
|
||||
entrypoint: /bin/sh -c "node /app/healthcheck/healthcheck.js & sleep 5 && pnpm run start-worker"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
const express = require('express')
|
||||
const app = express()
|
||||
|
||||
const port = process.env.WORKER_PORT || 5566
|
||||
|
||||
app.get('/healthz', (req, res) => {
|
||||
res.status(200).send('OK')
|
||||
})
|
||||
|
||||
app.listen(port, () => {
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(`Healthcheck server listening on port ${port}`)
|
||||
})
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"name": "flowise-worker-healthcheck",
|
||||
"version": "1.0.0",
|
||||
"description": "Simple healthcheck server for Flowise worker",
|
||||
"main": "healthcheck.js",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"start": "node healthcheck.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"express": "^4.19.2"
|
||||
}
|
||||
}
|
||||
|
|
@ -112,46 +112,41 @@ Flowise 在一个单一的单体存储库中有 3 个不同的模块。
|
|||
pnpm start
|
||||
```
|
||||
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/master) 的分叉分支上提交 Pull Request。
|
||||
11. 提交代码并从指向 [Flowise 主分支](https://github.com/FlowiseAI/Flowise/tree/main) 的分叉分支上提交 Pull Request。
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。阅读[更多信息](https://docs.flowiseai.com/environment-variables)
|
||||
|
||||
| 变量名 | 描述 | 类型 | 默认值 |
|
||||
| ---------------------------- | ------------------------------------------------------- | ----------------------------------------------- | ----------------------------------- | --- |
|
||||
| PORT | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| FLOWISE_USERNAME | 登录用户名 | 字符串 | |
|
||||
| FLOWISE_PASSWORD | 登录密码 | 字符串 | |
|
||||
| FLOWISE_FILE_SIZE_LIMIT | 上传文件大小限制 | 字符串 | 50mb | |
|
||||
| DEBUG | 打印组件的日志 | 布尔值 | |
|
||||
| LOG_PATH | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| LOG_LEVEL | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| APIKEY_STORAGE_TYPE | 存储 API 密钥的存储类型 | 枚举字符串: `json`, `db` | `json` |
|
||||
| APIKEY_PATH | 存储 API 密钥的位置, 当`APIKEY_STORAGE_TYPE`是`json` | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| TOOL_FUNCTION_BUILTIN_DEP | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| TOOL_FUNCTION_EXTERNAL_DEP | 用于工具函数的外部模块 | 字符串 | |
|
||||
| DATABASE_TYPE | 存储 flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| DATABASE_PATH | 数据库保存的位置(当 DATABASE_TYPE 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| DATABASE_HOST | 主机 URL 或 IP 地址(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PORT | 数据库端口(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_USERNAME | 数据库用户名(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_PASSWORD | 数据库密码(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| DATABASE_NAME | 数据库名称(当 DATABASE_TYPE 不是 sqlite 时) | 字符串 | |
|
||||
| SECRETKEY_PATH | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | 加密密钥用于替代存储在 SECRETKEY_PATH 中的密钥 | 字符串 |
|
||||
| DISABLE_FLOWISE_TELEMETRY | 关闭遥测 | 字符串 |
|
||||
| MODEL_LIST_CONFIG_JSON | 加载模型的位置 | 字符 | `/your_model_list_config_file_path` |
|
||||
| STORAGE_TYPE | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
|
||||
| BLOB_STORAGE_PATH | 上传文件存储的本地文件夹路径, 当`STORAGE_TYPE`是`local` | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| S3_STORAGE_BUCKET_NAME | S3 存储文件夹路径, 当`STORAGE_TYPE`是`s3` | 字符串 | |
|
||||
| S3_STORAGE_ACCESS_KEY_ID | AWS 访问密钥 (Access Key) | 字符串 | |
|
||||
| S3_STORAGE_SECRET_ACCESS_KEY | AWS 密钥 (Secret Key) | 字符串 | |
|
||||
| S3_STORAGE_REGION | S3 存储地区 | 字符串 | |
|
||||
| S3_ENDPOINT_URL | S3 端点 URL | 字符串 | |
|
||||
| S3_FORCE_PATH_STYLE | 将其设置为 true 以强制请求使用路径样式寻址 | 布尔值 | false |
|
||||
| SHOW_COMMUNITY_NODES | 显示由社区创建的节点 | 布尔值 | |
|
||||
| DISABLED_NODES | 从界面中隐藏节点(以逗号分隔的节点名称列表) | 字符串 | |
|
||||
|-----------------------------|---------------------------------------------------------|-------------------------------------------------|-------------------------------------|
|
||||
| `PORT` | Flowise 运行的 HTTP 端口 | 数字 | 3000 |
|
||||
| `FLOWISE_FILE_SIZE_LIMIT` | 上传文件大小限制 | 字符串 | 50mb |
|
||||
| `DEBUG` | 打印组件的日志 | 布尔值 | |
|
||||
| `LOG_PATH` | 存储日志文件的位置 | 字符串 | `your-path/Flowise/logs` |
|
||||
| `LOG_LEVEL` | 日志的不同级别 | 枚举字符串: `error`, `info`, `verbose`, `debug` | `info` |
|
||||
| `TOOL_FUNCTION_BUILTIN_DEP` | 用于工具函数的 NodeJS 内置模块 | 字符串 | |
|
||||
| `TOOL_FUNCTION_EXTERNAL_DEP`| 用于工具函数的外部模块 | 字符串 | |
|
||||
| `DATABASE_TYPE` | 存储 Flowise 数据的数据库类型 | 枚举字符串: `sqlite`, `mysql`, `postgres` | `sqlite` |
|
||||
| `DATABASE_PATH` | 数据库保存的位置(当 `DATABASE_TYPE` 是 sqlite 时) | 字符串 | `your-home-dir/.flowise` |
|
||||
| `DATABASE_HOST` | 主机 URL 或 IP 地址(当 `DATABASE_TYPE` 不是 sqlite 时)| 字符串 | |
|
||||
| `DATABASE_PORT` | 数据库端口(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `DATABASE_USERNAME` | 数据库用户名(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `DATABASE_PASSWORD` | 数据库密码(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `DATABASE_NAME` | 数据库名称(当 `DATABASE_TYPE` 不是 sqlite 时) | 字符串 | |
|
||||
| `SECRETKEY_PATH` | 保存加密密钥(用于加密/解密凭据)的位置 | 字符串 | `your-path/Flowise/packages/server` |
|
||||
| `FLOWISE_SECRETKEY_OVERWRITE`| 加密密钥用于替代存储在 `SECRETKEY_PATH` 中的密钥 | 字符串 | |
|
||||
| `MODEL_LIST_CONFIG_JSON` | 加载模型的位置 | 字符串 | `/your_model_list_config_file_path` |
|
||||
| `STORAGE_TYPE` | 上传文件的存储类型 | 枚举字符串: `local`, `s3` | `local` |
|
||||
| `BLOB_STORAGE_PATH` | 本地上传文件存储路径(当 `STORAGE_TYPE` 为 `local`) | 字符串 | `your-home-dir/.flowise/storage` |
|
||||
| `S3_STORAGE_BUCKET_NAME` | S3 存储文件夹路径(当 `STORAGE_TYPE` 为 `s3`) | 字符串 | |
|
||||
| `S3_STORAGE_ACCESS_KEY_ID` | AWS 访问密钥 (Access Key) | 字符串 | |
|
||||
| `S3_STORAGE_SECRET_ACCESS_KEY` | AWS 密钥 (Secret Key) | 字符串 | |
|
||||
| `S3_STORAGE_REGION` | S3 存储地区 | 字符串 | |
|
||||
| `S3_ENDPOINT_URL` | S3 端点 URL | 字符串 | |
|
||||
| `S3_FORCE_PATH_STYLE` | 设置为 true 以强制请求使用路径样式寻址 | 布尔值 | false |
|
||||
| `SHOW_COMMUNITY_NODES` | 显示由社区创建的节点 | 布尔值 | |
|
||||
| `DISABLED_NODES` | 从界面中隐藏节点(以逗号分隔的节点名称列表) | 字符串 | |
|
||||
|
||||
您也可以在使用 `npx` 时指定环境变量。例如:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - LLM アプリを簡単に構築
|
||||
<p align="center">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
|
|
@ -10,11 +11,11 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | [繁體中文](./README-TW.md) | [簡體中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
|
||||
[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | 日本語 | [한국어](./README-KR.md)
|
||||
|
||||
<h3>ドラッグ&ドロップでカスタマイズした LLM フローを構築できる UI</h3>
|
||||
<h3>AIエージェントをビジュアルに構築</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
## ⚡ クイックスタート
|
||||
|
||||
|
|
@ -30,12 +31,6 @@
|
|||
npx flowise start
|
||||
```
|
||||
|
||||
ユーザー名とパスワードを入力
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. [http://localhost:3000](http://localhost:3000) を開く
|
||||
|
||||
## 🐳 Docker
|
||||
|
|
@ -126,15 +121,6 @@ Flowise には、3 つの異なるモジュールが 1 つの mono リポジト
|
|||
|
||||
コードの変更は [http://localhost:8080](http://localhost:8080) に自動的にアプリをリロードします
|
||||
|
||||
## 🔒 認証
|
||||
|
||||
アプリレベルの認証を有効にするには、 `FLOWISE_USERNAME` と `FLOWISE_PASSWORD` を `packages/server` の `.env` ファイルに追加します:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 環境変数
|
||||
|
||||
Flowise は、インスタンスを設定するためのさまざまな環境変数をサポートしています。`packages/server` フォルダ内の `.env` ファイルで以下の変数を指定することができる。[続き](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)を読む
|
||||
|
|
@ -196,9 +182,9 @@ Flowise は、インスタンスを設定するためのさまざまな環境変
|
|||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
[コントリビューティングガイド](CONTRIBUTING.md)を参照してください。質問や問題があれば、[Discord](https://discord.gg/jbaHfsRVBW) までご連絡ください。
|
||||
[コントリビューティングガイド](../CONTRIBUTING.md)を参照してください。質問や問題があれば、[Discord](https://discord.gg/jbaHfsRVBW) までご連絡ください。
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 ライセンス
|
||||
|
||||
このリポジトリのソースコードは、[Apache License Version 2.0](LICENSE.md)の下で利用可能です。
|
||||
このリポジトリのソースコードは、[Apache License Version 2.0](../LICENSE.md)の下で利用可能です。
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - 간편한 LLM 애플리케이션 제작
|
||||
<p align="center">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
|
|
@ -10,11 +11,11 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | [繁體中文](./README-TW.md) | [簡體中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
|
||||
[English](../README.md) | [繁體中文](./README-TW.md) | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | 한국어
|
||||
|
||||
<h3>드래그 앤 드롭 UI로 맞춤형 LLM 플로우 구축하기</h3>
|
||||
<h3>AI 에이전트를 시각적으로 구축하세요</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
## ⚡빠른 시작 가이드
|
||||
|
||||
|
|
@ -30,12 +31,6 @@
|
|||
npx flowise start
|
||||
```
|
||||
|
||||
사용자 이름과 비밀번호로 시작하기
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. [http://localhost:3000](http://localhost:3000) URL 열기
|
||||
|
||||
## 🐳 도커(Docker)를 활용하여 시작하기
|
||||
|
|
@ -126,15 +121,6 @@ Flowise는 단일 리포지토리에 3개의 서로 다른 모듈이 있습니
|
|||
|
||||
코드가 변경되면 [http://localhost:8080](http://localhost:8080)에서 자동으로 애플리케이션을 새로고침 합니다.
|
||||
|
||||
## 🔒 인증
|
||||
|
||||
애플리케이션 수준의 인증을 사용하려면 `packages/server`의 `.env` 파일에 `FLOWISE_USERNAME` 및 `FLOWISE_PASSWORD`를 추가합니다:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 환경 변수
|
||||
|
||||
Flowise는 인스턴스 구성을 위한 다양한 환경 변수를 지원합니다. `packages/server` 폴더 내 `.env` 파일에 다양한 환경 변수를 지정할 수 있습니다. [자세히 보기](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
|
@ -196,9 +182,9 @@ Flowise는 인스턴스 구성을 위한 다양한 환경 변수를 지원합니
|
|||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
[contributing guide](CONTRIBUTING.md)를 살펴보세요. 디스코드 [Discord](https://discord.gg/jbaHfsRVBW) 채널에서도 이슈나 질의응답을 진행하실 수 있습니다.
|
||||
[contributing guide](../CONTRIBUTING.md)를 살펴보세요. 디스코드 [Discord](https://discord.gg/jbaHfsRVBW) 채널에서도 이슈나 질의응답을 진행하실 수 있습니다.
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 라이센스
|
||||
|
||||
본 리포지토리의 소스코드는 [Apache License Version 2.0](LICENSE.md) 라이센스가 적용됩니다.
|
||||
본 리포지토리의 소스코드는 [Apache License Version 2.0](../LICENSE.md) 라이센스가 적용됩니다.
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - 輕鬆構建 LLM 應用
|
||||
<p align="center">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
|
|
@ -10,13 +11,13 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | 繁體中文 | [簡體中文](./i18n/README-ZH.md) | [日本語](./i18n/README-JA.md) | [한국어](./i18n/README-KR.md)
|
||||
[English](../README.md) | 繁體中文 | [简体中文](./README-ZH.md) | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
|
||||
<h3>拖放 UI 以構建自定義的 LLM 流程</h3>
|
||||
<h3>可視化建置 AI/LLM 流程</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
## ⚡快速開始
|
||||
## ⚡ 快速開始
|
||||
|
||||
下載並安裝 [NodeJS](https://nodejs.org/en/download) >= 18.15.0
|
||||
|
||||
|
|
@ -30,28 +31,22 @@
|
|||
npx flowise start
|
||||
```
|
||||
|
||||
使用用戶名和密碼
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. 打開 [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
## 🐳 Docker
|
||||
|
||||
### Docker Compose
|
||||
|
||||
1. 克隆 Flowise 項目
|
||||
2. 進入項目根目錄的 `docker` 文件夾
|
||||
3. 複製 `.env.example` 文件,粘貼到相同位置,並重命名為 `.env` 文件
|
||||
1. 複製 Flowise 專案
|
||||
2. 進入專案根目錄的 `docker` 資料夾
|
||||
3. 複製 `.env.example` 文件,貼到相同位置,並重新命名為 `.env` 文件
|
||||
4. `docker compose up -d`
|
||||
5. 打開 [http://localhost:3000](http://localhost:3000)
|
||||
6. 您可以通過 `docker compose stop` 停止容器
|
||||
6. 您可以透過 `docker compose stop` 停止容器
|
||||
|
||||
### Docker 映像
|
||||
|
||||
1. 本地構建映像:
|
||||
1. 本地建置映像:
|
||||
```bash
|
||||
docker build --no-cache -t flowise .
|
||||
```
|
||||
|
|
@ -68,7 +63,7 @@
|
|||
|
||||
## 👨💻 開發者
|
||||
|
||||
Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
||||
Flowise 在單個 mono 儲存庫中有 3 個不同的模組。
|
||||
|
||||
- `server`: 提供 API 邏輯的 Node 後端
|
||||
- `ui`: React 前端
|
||||
|
|
@ -84,33 +79,33 @@ Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
|||
|
||||
### 設置
|
||||
|
||||
1. 克隆存儲庫
|
||||
1. 複製儲存庫
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FlowiseAI/Flowise.git
|
||||
```
|
||||
|
||||
2. 進入存儲庫文件夾
|
||||
2. 進入儲存庫文件夾
|
||||
|
||||
```bash
|
||||
cd Flowise
|
||||
```
|
||||
|
||||
3. 安裝所有模塊的所有依賴項:
|
||||
3. 安裝所有模組的所有依賴項:
|
||||
|
||||
```bash
|
||||
pnpm install
|
||||
```
|
||||
|
||||
4. 構建所有代碼:
|
||||
4. 建置所有程式碼:
|
||||
|
||||
```bash
|
||||
pnpm build
|
||||
```
|
||||
|
||||
<details>
|
||||
<summary>退出代碼 134(JavaScript 堆內存不足)</summary>
|
||||
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 堆大小並重新運行腳本:
|
||||
<summary>Exit code 134(JavaScript heap out of memory)</summary>
|
||||
如果在運行上述 `build` 腳本時遇到此錯誤,請嘗試增加 Node.js 中的 Heap 記憶體大小並重新運行腳本:
|
||||
|
||||
export NODE_OPTIONS="--max-old-space-size=4096"
|
||||
pnpm build
|
||||
|
|
@ -123,9 +118,9 @@ Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
|||
pnpm start
|
||||
```
|
||||
|
||||
您現在可以訪問 [http://localhost:3000](http://localhost:3000)
|
||||
您現在可以開啟 [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
6. 對於開發構建:
|
||||
6. 對於開發建置:
|
||||
|
||||
- 在 `packages/ui` 中創建 `.env` 文件並指定 `VITE_PORT`(參考 `.env.example`)
|
||||
- 在 `packages/server` 中創建 `.env` 文件並指定 `PORT`(參考 `.env.example`)
|
||||
|
|
@ -135,28 +130,19 @@ Flowise 在單個 mono 存儲庫中有 3 個不同的模塊。
|
|||
pnpm dev
|
||||
```
|
||||
|
||||
任何代碼更改都會自動重新加載應用程序 [http://localhost:8080](http://localhost:8080)
|
||||
任何程式碼更改都會自動重新加載應用程式 [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
## 🔒 認證
|
||||
## 🌱 環境變數
|
||||
|
||||
要啟用應用級別的身份驗證,請在 `packages/server` 中的 `.env` 文件中添加 `FLOWISE_USERNAME` 和 `FLOWISE_PASSWORD`:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 環境變量
|
||||
|
||||
Flowise 支持不同的環境變量來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變量。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
Flowise 支持不同的環境變數來配置您的實例。您可以在 `packages/server` 文件夾中的 `.env` 文件中指定以下變數。閱讀 [更多](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
||||
## 📖 文檔
|
||||
|
||||
[Flowise 文檔](https://docs.flowiseai.com/)
|
||||
|
||||
## 🌐 自我托管
|
||||
## 🌐 自行架設
|
||||
|
||||
在您現有的基礎設施中部署 Flowise 自我托管,我們支持各種 [部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
在您現有的基礎設施中部署 Flowise,我們支持各種自行架設選項 [部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/configuration/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/configuration/deployment/azure)
|
||||
|
|
@ -192,9 +178,9 @@ Flowise 支持不同的環境變量來配置您的實例。您可以在 `package
|
|||
|
||||
</details>
|
||||
|
||||
## ☁️ Flowise 雲
|
||||
## ☁️ Flowise 雲端平台
|
||||
|
||||
[開始使用 Flowise 雲](https://flowiseai.com/)
|
||||
[開始使用 Flowise 雲端平台](https://flowiseai.com/)
|
||||
|
||||
## 🙋 支持
|
||||
|
||||
|
|
@ -208,9 +194,9 @@ Flowise 支持不同的環境變量來配置您的實例。您可以在 `package
|
|||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
請參閱 [貢獻指南](CONTRIBUTING.md)。如果您有任何問題或問題,請通過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
|
||||
請參閱 [貢獻指南](../CONTRIBUTING.md)。如果您有任何問題或問題,請透過 [Discord](https://discord.gg/jbaHfsRVBW) 與我們聯繫。
|
||||
[](https://star-history.com/#FlowiseAI/Flowise&Date)
|
||||
|
||||
## 📄 許可證
|
||||
|
||||
此存儲庫中的源代碼根據 [Apache 許可證版本 2.0](LICENSE.md) 提供。
|
||||
此儲存庫中的原始碼根據 [Apache 2.0 授權條款](../LICENSE.md) 授權使用。
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
<!-- markdownlint-disable MD030 -->
|
||||
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.png?raw=true"></a>
|
||||
|
||||
# Flowise - 轻松构建 LLM 应用程序
|
||||
<p align="center">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_white.svg#gh-light-mode-only">
|
||||
<img src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_dark.svg#gh-dark-mode-only">
|
||||
</p>
|
||||
|
||||
[](https://github.com/FlowiseAI/Flowise/releases)
|
||||
[](https://discord.gg/jbaHfsRVBW)
|
||||
|
|
@ -10,11 +11,11 @@
|
|||
[](https://star-history.com/#FlowiseAI/Flowise)
|
||||
[](https://github.com/FlowiseAI/Flowise/fork)
|
||||
|
||||
[English](../README.md) | 簡體中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
[English](../README.md) | [繁體中文](./README-TW.md) | 简体中文 | [日本語](./README-JA.md) | [한국어](./README-KR.md)
|
||||
|
||||
<h3>拖放界面构建定制化的LLM流程</h3>
|
||||
<h3>可视化构建 AI/LLM 流程</h3>
|
||||
<a href="https://github.com/FlowiseAI/Flowise">
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise.gif?raw=true"></a>
|
||||
<img width="100%" src="https://github.com/FlowiseAI/Flowise/blob/main/images/flowise_agentflow.gif?raw=true"></a>
|
||||
|
||||
## ⚡ 快速入门
|
||||
|
||||
|
|
@ -30,12 +31,6 @@
|
|||
npx flowise start
|
||||
```
|
||||
|
||||
使用用户名和密码
|
||||
|
||||
```bash
|
||||
npx flowise start --FLOWISE_USERNAME=user --FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
3. 打开 [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
## 🐳 Docker
|
||||
|
|
@ -126,15 +121,6 @@ Flowise 在一个单一的代码库中有 3 个不同的模块。
|
|||
|
||||
任何代码更改都会自动重新加载应用程序,访问 [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
## 🔒 认证
|
||||
|
||||
要启用应用程序级身份验证,在 `packages/server` 的 `.env` 文件中添加 `FLOWISE_USERNAME` 和 `FLOWISE_PASSWORD`:
|
||||
|
||||
```
|
||||
FLOWISE_USERNAME=user
|
||||
FLOWISE_PASSWORD=1234
|
||||
```
|
||||
|
||||
## 🌱 环境变量
|
||||
|
||||
Flowise 支持不同的环境变量来配置您的实例。您可以在 `packages/server` 文件夹中的 `.env` 文件中指定以下变量。了解更多信息,请阅读[文档](https://github.com/FlowiseAI/Flowise/blob/main/CONTRIBUTING.md#-env-variables)
|
||||
|
|
@ -170,9 +156,9 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
|
|||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
- [Sealos](https://template.sealos.io/deploy?templateName=flowise)
|
||||
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
[](https://template.sealos.io/deploy?templateName=flowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
|
|
@ -196,8 +182,8 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
|
|||
<img src="https://contrib.rocks/image?repo=FlowiseAI/Flowise" />
|
||||
</a>
|
||||
|
||||
参见[贡献指南](CONTRIBUTING.md)。如果您有任何问题或问题,请在[Discord](https://discord.gg/jbaHfsRVBW)上与我们联系。
|
||||
参见[贡献指南](CONTRIBUTING-ZH.md)。如果您有任何问题或问题,请在[Discord](https://discord.gg/jbaHfsRVBW)上与我们联系。
|
||||
|
||||
## 📄 许可证
|
||||
|
||||
此代码库中的源代码在[Apache License Version 2.0 许可证](LICENSE.md)下提供。
|
||||
此代码库中的源代码在[Apache License Version 2.0 许可证](../LICENSE.md)下提供。
|
||||
|
|
|
|||
Binary file not shown.
|
After Width: | Height: | Size: 14 MiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 40 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 40 KiB |
|
|
@ -1,6 +1,7 @@
|
|||
version: "2"
|
||||
services:
|
||||
otel-collector:
|
||||
read_only: true
|
||||
image: otel/opentelemetry-collector-contrib
|
||||
command: ["--config=/etc/otelcol-contrib/config.yaml", "--feature-gates=-exporter.datadogexporter.DisableAPMStats", "${OTELCOL_ARGS}"]
|
||||
volumes:
|
||||
|
|
|
|||
26
package.json
26
package.json
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "2.2.7",
|
||||
"version": "3.0.11",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
@ -13,13 +13,17 @@
|
|||
"scripts": {
|
||||
"build": "turbo run build",
|
||||
"build-force": "pnpm clean && turbo run build --force",
|
||||
"dev": "turbo run dev --parallel",
|
||||
"dev": "turbo run dev --parallel --no-cache",
|
||||
"start": "run-script-os",
|
||||
"start:windows": "cd packages/server/bin && run start",
|
||||
"start:default": "cd packages/server/bin && ./run start",
|
||||
"start-worker": "run-script-os",
|
||||
"start-worker:windows": "cd packages/server/bin && run worker",
|
||||
"start-worker:default": "cd packages/server/bin && ./run worker",
|
||||
"user": "run-script-os",
|
||||
"user:windows": "cd packages/server/bin && run user",
|
||||
"user:default": "cd packages/server/bin && ./run user",
|
||||
"test": "turbo run test",
|
||||
"clean": "pnpm --filter \"./packages/**\" clean",
|
||||
"nuke": "pnpm --filter \"./packages/**\" nuke && rimraf node_modules .turbo",
|
||||
"format": "prettier --write \"**/*.{ts,tsx,md}\"",
|
||||
|
|
@ -47,7 +51,7 @@
|
|||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
"husky": "^8.0.1",
|
||||
"kill-port": "^2.0.1",
|
||||
"kill-port": "2.0.1",
|
||||
"lint-staged": "^13.0.3",
|
||||
"prettier": "^2.7.1",
|
||||
"pretty-quick": "^3.1.3",
|
||||
|
|
@ -62,20 +66,26 @@
|
|||
"sqlite3"
|
||||
],
|
||||
"overrides": {
|
||||
"axios": "1.7.9",
|
||||
"axios": "1.12.0",
|
||||
"body-parser": "2.0.2",
|
||||
"braces": "3.0.3",
|
||||
"cross-spawn": "7.0.6",
|
||||
"form-data": "4.0.4",
|
||||
"glob-parent": "6.0.2",
|
||||
"http-proxy-middleware": "3.0.3",
|
||||
"json5": "2.2.3",
|
||||
"nth-check": "2.1.1",
|
||||
"path-to-regexp": "0.1.12",
|
||||
"prismjs": "1.29.0",
|
||||
"rollup": "4.45.0",
|
||||
"semver": "7.7.1",
|
||||
"set-value": "4.1.0",
|
||||
"solid-js": "1.9.7",
|
||||
"tar-fs": "3.1.0",
|
||||
"unset-value": "2.0.1",
|
||||
"webpack-dev-middleware": "7.4.2"
|
||||
"webpack-dev-middleware": "7.4.2",
|
||||
"ws": "8.18.3",
|
||||
"xlsx": "https://cdn.sheetjs.com/xlsx-0.20.3/xlsx-0.20.3.tgz"
|
||||
}
|
||||
},
|
||||
"engines": {
|
||||
|
|
@ -83,11 +93,11 @@
|
|||
"pnpm": ">=9"
|
||||
},
|
||||
"resolutions": {
|
||||
"@google/generative-ai": "^0.22.0",
|
||||
"@google/generative-ai": "^0.24.0",
|
||||
"@grpc/grpc-js": "^1.10.10",
|
||||
"@langchain/core": "0.3.37",
|
||||
"@langchain/core": "0.3.61",
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.6",
|
||||
"openai": "4.82.0",
|
||||
"openai": "4.96.0",
|
||||
"protobufjs": "7.4.0"
|
||||
},
|
||||
"eslintIgnore": [
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
{
|
||||
"name": "flowise-api",
|
||||
"version": "1.0.2",
|
||||
"version": "1.0.3",
|
||||
"description": "Flowise API documentation server",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"start": "node dist/index.js",
|
||||
"dev": "concurrently \"tsc-watch --noClear -p ./tsconfig.json\" \"nodemon\"",
|
||||
"lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0"
|
||||
},
|
||||
"license": "SEE LICENSE IN LICENSE.md",
|
||||
|
|
|
|||
|
|
@ -1216,15 +1216,18 @@ paths:
|
|||
security:
|
||||
- bearerAuth: []
|
||||
operationId: createPrediction
|
||||
summary: Create a new prediction
|
||||
description: Create a new prediction
|
||||
summary: Send message to flow and get AI response
|
||||
description: |
|
||||
Send a message to your flow and receive an AI-generated response. This is the primary endpoint for interacting with your flows and assistants.
|
||||
**Authentication**: API key may be required depending on flow settings.
|
||||
parameters:
|
||||
- in: path
|
||||
name: id
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
description: Chatflow ID
|
||||
description: Flow ID - the unique identifier of your flow
|
||||
example: 'your-flow-id'
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
|
|
@ -1236,24 +1239,36 @@ paths:
|
|||
properties:
|
||||
question:
|
||||
type: string
|
||||
description: Question to ask during the prediction process
|
||||
description: Question/message to send to the flow
|
||||
example: 'Analyze this uploaded file and summarize its contents'
|
||||
files:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
format: binary
|
||||
description: Files to be uploaded
|
||||
modelName:
|
||||
description: Files to be uploaded (images, audio, documents, etc.)
|
||||
streaming:
|
||||
type: boolean
|
||||
description: Enable streaming responses
|
||||
default: false
|
||||
overrideConfig:
|
||||
type: string
|
||||
nullable: true
|
||||
example: ''
|
||||
description: Other override configurations
|
||||
description: JSON string of configuration overrides
|
||||
example: '{"sessionId":"user-123","temperature":0.7}'
|
||||
history:
|
||||
type: string
|
||||
description: JSON string of conversation history
|
||||
example: '[{"role":"userMessage","content":"Hello"},{"role":"apiMessage","content":"Hi there!"}]'
|
||||
humanInput:
|
||||
type: string
|
||||
description: JSON string of human input for resuming execution
|
||||
example: '{"type":"proceed","feedback":"Continue with the plan"}'
|
||||
required:
|
||||
- question
|
||||
required: true
|
||||
responses:
|
||||
'200':
|
||||
description: Prediction created successfully
|
||||
description: Successful prediction response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
|
|
@ -1261,45 +1276,106 @@ paths:
|
|||
properties:
|
||||
text:
|
||||
type: string
|
||||
description: The result of the prediction
|
||||
description: The AI-generated response text
|
||||
example: 'Artificial intelligence (AI) is a branch of computer science that focuses on creating systems capable of performing tasks that typically require human intelligence.'
|
||||
json:
|
||||
type: object
|
||||
description: The result of the prediction in JSON format if available
|
||||
description: The result in JSON format if available (for structured outputs)
|
||||
nullable: true
|
||||
question:
|
||||
type: string
|
||||
description: The question asked during the prediction process
|
||||
description: The original question/message sent to the flow
|
||||
example: 'What is artificial intelligence?'
|
||||
chatId:
|
||||
type: string
|
||||
description: The chat ID associated with the prediction
|
||||
description: Unique identifier for the chat session
|
||||
example: 'chat-12345'
|
||||
chatMessageId:
|
||||
type: string
|
||||
description: The chat message ID associated with the prediction
|
||||
description: Unique identifier for this specific message
|
||||
example: 'msg-67890'
|
||||
sessionId:
|
||||
type: string
|
||||
description: The session ID associated with the prediction
|
||||
description: Session identifier for conversation continuity
|
||||
example: 'user-session-123'
|
||||
nullable: true
|
||||
memoryType:
|
||||
type: string
|
||||
description: The memory type associated with the prediction
|
||||
description: Type of memory used for conversation context
|
||||
example: 'Buffer Memory'
|
||||
nullable: true
|
||||
sourceDocuments:
|
||||
type: array
|
||||
description: Documents retrieved from vector store (if RAG is enabled)
|
||||
items:
|
||||
$ref: '#/components/schemas/Document'
|
||||
nullable: true
|
||||
usedTools:
|
||||
type: array
|
||||
description: Tools that were invoked during the response generation
|
||||
items:
|
||||
$ref: '#/components/schemas/UsedTool'
|
||||
fileAnnotations:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/FileAnnotation'
|
||||
nullable: true
|
||||
'400':
|
||||
description: Invalid input provided
|
||||
description: Bad Request - Invalid input provided or request format is incorrect
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Invalid request format. Check required fields and parameter types.'
|
||||
'401':
|
||||
description: Unauthorized - API key required or invalid
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Unauthorized access. Please verify your API key.'
|
||||
'404':
|
||||
description: Chatflow not found
|
||||
description: Not Found - Chatflow with specified ID does not exist
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Chatflow not found. Please verify the chatflow ID.'
|
||||
'413':
|
||||
description: Payload Too Large - Request payload exceeds size limits
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Request payload too large. Please reduce file sizes or split large requests.'
|
||||
'422':
|
||||
description: Validation error
|
||||
description: Validation Error - Request validation failed
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Validation failed. Check parameter requirements and data types.'
|
||||
'500':
|
||||
description: Internal server error
|
||||
description: Internal Server Error - Flow configuration or execution error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
example: 'Internal server error. Check flow configuration and node settings.'
|
||||
/tools:
|
||||
post:
|
||||
tags:
|
||||
|
|
@ -2011,13 +2087,33 @@ components:
|
|||
properties:
|
||||
question:
|
||||
type: string
|
||||
description: The question being asked
|
||||
description: The question/message to send to the flow
|
||||
example: 'What is artificial intelligence?'
|
||||
form:
|
||||
type: object
|
||||
description: The form object to send to the flow (alternative to question for Agentflow V2)
|
||||
additionalProperties: true
|
||||
example:
|
||||
title: 'Example'
|
||||
count: 1
|
||||
streaming:
|
||||
type: boolean
|
||||
description: Enable streaming responses for real-time output
|
||||
default: false
|
||||
example: false
|
||||
overrideConfig:
|
||||
type: object
|
||||
description: The configuration to override the default prediction settings (optional)
|
||||
description: Override flow configuration and pass variables at runtime
|
||||
additionalProperties: true
|
||||
example:
|
||||
sessionId: 'user-session-123'
|
||||
temperature: 0.7
|
||||
maxTokens: 500
|
||||
vars:
|
||||
user_name: 'Alice'
|
||||
history:
|
||||
type: array
|
||||
description: The history messages to be prepended (optional)
|
||||
description: Previous conversation messages for context
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -2030,8 +2126,14 @@ components:
|
|||
type: string
|
||||
description: The content of the message
|
||||
example: 'Hello, how can I help you?'
|
||||
example:
|
||||
- role: 'apiMessage'
|
||||
content: "Hello! I'm an AI assistant. How can I help you today?"
|
||||
- role: 'userMessage'
|
||||
content: "Hi, my name is Sarah and I'm learning about AI"
|
||||
uploads:
|
||||
type: array
|
||||
description: Files to upload (images, audio, documents, etc.)
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -2051,7 +2153,42 @@ components:
|
|||
mime:
|
||||
type: string
|
||||
description: The MIME type of the file or resource
|
||||
enum:
|
||||
[
|
||||
'image/png',
|
||||
'image/jpeg',
|
||||
'image/jpg',
|
||||
'image/gif',
|
||||
'image/webp',
|
||||
'audio/mp4',
|
||||
'audio/webm',
|
||||
'audio/wav',
|
||||
'audio/mpeg',
|
||||
'audio/ogg',
|
||||
'audio/aac'
|
||||
]
|
||||
example: 'image/png'
|
||||
example:
|
||||
- type: 'file'
|
||||
name: 'example.png'
|
||||
data: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABgAAAAYCAYAAADgdz34AAABjElEQVRIS+2Vv0oDQRDG'
|
||||
mime: 'image/png'
|
||||
humanInput:
|
||||
type: object
|
||||
description: Return human feedback and resume execution from a stopped checkpoint
|
||||
properties:
|
||||
type:
|
||||
type: string
|
||||
enum: [proceed, reject]
|
||||
description: Type of human input response
|
||||
example: 'reject'
|
||||
feedback:
|
||||
type: string
|
||||
description: Feedback to the last output
|
||||
example: 'Include more emoji'
|
||||
example:
|
||||
type: 'reject'
|
||||
feedback: 'Include more emoji'
|
||||
|
||||
Tool:
|
||||
type: object
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
Flowise 的应用集成。包含节点和凭据。
|
||||
|
||||

|
||||

|
||||
|
||||
安装:
|
||||
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ English | [中文](./README-ZH.md)
|
|||
|
||||
Apps integration for Flowise. Contain Nodes and Credentials.
|
||||
|
||||

|
||||

|
||||
|
||||
Install:
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class AgentflowApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Agentflow API'
|
||||
this.name = 'agentflowApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Agentflow Api Key',
|
||||
name: 'agentflowApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: AgentflowApi }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeCredential, INodeParams } from '../src/Interface'
|
||||
|
||||
class CometApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Comet API'
|
||||
this.name = 'cometApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Comet API Key',
|
||||
name: 'cometApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: CometApi }
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ElevenLabsApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Eleven Labs API'
|
||||
this.name = 'elevenLabsApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Sign up for a Eleven Labs account and <a target="_blank" href="https://elevenlabs.io/app/settings/api-keys">create an API Key</a>.'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Eleven Labs API Key',
|
||||
name: 'elevenLabsApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ElevenLabsApi }
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
const scopes = [
|
||||
'https://www.googleapis.com/auth/gmail.readonly',
|
||||
'https://www.googleapis.com/auth/gmail.compose',
|
||||
'https://www.googleapis.com/auth/gmail.modify',
|
||||
'https://www.googleapis.com/auth/gmail.labels'
|
||||
]
|
||||
|
||||
class GmailOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
description: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Gmail OAuth2'
|
||||
this.name = 'gmailOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/gmail">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://accounts.google.com/o/oauth2/v2/auth'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://oauth2.googleapis.com/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Additional Parameters',
|
||||
name: 'additionalParameters',
|
||||
type: 'string',
|
||||
default: 'access_type=offline&prompt=consent',
|
||||
hidden: true
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: GmailOAuth2 }
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
const scopes = ['https://www.googleapis.com/auth/calendar', 'https://www.googleapis.com/auth/calendar.events']
|
||||
|
||||
class GoogleCalendarOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
description: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Google Calendar OAuth2'
|
||||
this.name = 'googleCalendarOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/google-calendar">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://accounts.google.com/o/oauth2/v2/auth'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://oauth2.googleapis.com/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Additional Parameters',
|
||||
name: 'additionalParameters',
|
||||
type: 'string',
|
||||
default: 'access_type=offline&prompt=consent',
|
||||
hidden: true
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: GoogleCalendarOAuth2 }
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
const scopes = [
|
||||
'https://www.googleapis.com/auth/documents',
|
||||
'https://www.googleapis.com/auth/drive',
|
||||
'https://www.googleapis.com/auth/drive.file'
|
||||
]
|
||||
|
||||
class GoogleDocsOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
description: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Google Docs OAuth2'
|
||||
this.name = 'googleDocsOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/google-sheets">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://accounts.google.com/o/oauth2/v2/auth'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://oauth2.googleapis.com/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Additional Parameters',
|
||||
name: 'additionalParameters',
|
||||
type: 'string',
|
||||
default: 'access_type=offline&prompt=consent',
|
||||
hidden: true
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: GoogleDocsOAuth2 }
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
const scopes = [
|
||||
'https://www.googleapis.com/auth/drive',
|
||||
'https://www.googleapis.com/auth/drive.appdata',
|
||||
'https://www.googleapis.com/auth/drive.photos.readonly'
|
||||
]
|
||||
|
||||
class GoogleDriveOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
description: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Google Drive OAuth2'
|
||||
this.name = 'googleDriveOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/google-drive">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://accounts.google.com/o/oauth2/v2/auth'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://oauth2.googleapis.com/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Additional Parameters',
|
||||
name: 'additionalParameters',
|
||||
type: 'string',
|
||||
default: 'access_type=offline&prompt=consent',
|
||||
hidden: true
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: GoogleDriveOAuth2 }
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
const scopes = [
|
||||
'https://www.googleapis.com/auth/drive.file',
|
||||
'https://www.googleapis.com/auth/spreadsheets',
|
||||
'https://www.googleapis.com/auth/drive.metadata'
|
||||
]
|
||||
|
||||
class GoogleSheetsOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
description: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Google Sheets OAuth2'
|
||||
this.name = 'googleSheetsOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/google-sheets">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://accounts.google.com/o/oauth2/v2/auth'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://oauth2.googleapis.com/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Additional Parameters',
|
||||
name: 'additionalParameters',
|
||||
type: 'string',
|
||||
default: 'access_type=offline&prompt=consent',
|
||||
hidden: true
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: GoogleSheetsOAuth2 }
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class HTTPApiKeyCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'HTTP Api Key'
|
||||
this.name = 'httpApiKey'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: HTTPApiKeyCredential }
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class HttpBasicAuthCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'HTTP Basic Auth'
|
||||
this.name = 'httpBasicAuth'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Basic Auth Username',
|
||||
name: 'basicAuthUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Basic Auth Password',
|
||||
name: 'basicAuthPassword',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: HttpBasicAuthCredential }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class HTTPBearerTokenCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'HTTP Bearer Token'
|
||||
this.name = 'httpBearerToken'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Token',
|
||||
name: 'token',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: HTTPBearerTokenCredential }
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class JiraApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Jira API'
|
||||
this.name = 'jiraApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/">official guide</a> on how to get accessToken on Github'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'User Name',
|
||||
name: 'username',
|
||||
type: 'string',
|
||||
placeholder: 'username@example.com'
|
||||
},
|
||||
{
|
||||
label: 'Access Token',
|
||||
name: 'accessToken',
|
||||
type: 'password',
|
||||
placeholder: '<JIRA_ACCESS_TOKEN>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: JiraApi }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class LitellmApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Litellm API'
|
||||
this.name = 'litellmApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'litellmApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: LitellmApi }
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class Mem0MemoryApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Mem0 Memory API'
|
||||
this.name = 'mem0MemoryApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Visit <a target="_blank" href="https://app.mem0.ai/settings/api-keys">Mem0 Platform</a> to get your API credentials'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'apiKey',
|
||||
type: 'password',
|
||||
description: 'API Key from Mem0 dashboard'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: Mem0MemoryApi }
|
||||
|
|
@ -0,0 +1,66 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
const scopes = [
|
||||
'openid',
|
||||
'offline_access',
|
||||
'Contacts.Read',
|
||||
'Contacts.ReadWrite',
|
||||
'Calendars.Read',
|
||||
'Calendars.Read.Shared',
|
||||
'Calendars.ReadWrite',
|
||||
'Mail.Read',
|
||||
'Mail.ReadWrite',
|
||||
'Mail.ReadWrite.Shared',
|
||||
'Mail.Send',
|
||||
'Mail.Send.Shared',
|
||||
'MailboxSettings.Read'
|
||||
]
|
||||
|
||||
class MsoftOutlookOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Microsoft Outlook OAuth2'
|
||||
this.name = 'microsoftOutlookOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/microsoft-outlook">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://login.microsoftonline.com/<tenantId>/oauth2/v2.0/authorize'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://login.microsoftonline.com/<tenantId>/oauth2/v2.0/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MsoftOutlookOAuth2 }
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
// Comprehensive scopes for Microsoft Teams operations
|
||||
const scopes = [
|
||||
// Basic authentication
|
||||
'openid',
|
||||
'offline_access',
|
||||
|
||||
// User permissions
|
||||
'User.Read',
|
||||
'User.ReadWrite.All',
|
||||
|
||||
// Teams and Groups
|
||||
'Group.ReadWrite.All',
|
||||
'Team.ReadBasic.All',
|
||||
'Team.Create',
|
||||
'TeamMember.ReadWrite.All',
|
||||
|
||||
// Channels
|
||||
'Channel.ReadBasic.All',
|
||||
'Channel.Create',
|
||||
'Channel.Delete.All',
|
||||
'ChannelMember.ReadWrite.All',
|
||||
|
||||
// Chat operations
|
||||
'Chat.ReadWrite',
|
||||
'Chat.Create',
|
||||
'ChatMember.ReadWrite',
|
||||
|
||||
// Messages
|
||||
'ChatMessage.Send',
|
||||
'ChatMessage.Read',
|
||||
'ChannelMessage.Send',
|
||||
'ChannelMessage.Read.All',
|
||||
|
||||
// Reactions and advanced features
|
||||
'TeamsActivity.Send'
|
||||
]
|
||||
|
||||
class MsoftTeamsOAuth2 implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
description: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Microsoft Teams OAuth2'
|
||||
this.name = 'microsoftTeamsOAuth2'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'You can find the setup instructions <a target="_blank" href="https://docs.flowiseai.com/integrations/langchain/tools/microsoft-teams">here</a>'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Authorization URL',
|
||||
name: 'authorizationUrl',
|
||||
type: 'string',
|
||||
default: 'https://login.microsoftonline.com/<tenantId>/oauth2/v2.0/authorize'
|
||||
},
|
||||
{
|
||||
label: 'Access Token URL',
|
||||
name: 'accessTokenUrl',
|
||||
type: 'string',
|
||||
default: 'https://login.microsoftonline.com/<tenantId>/oauth2/v2.0/token'
|
||||
},
|
||||
{
|
||||
label: 'Client ID',
|
||||
name: 'clientId',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Client Secret',
|
||||
name: 'clientSecret',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Scope',
|
||||
name: 'scope',
|
||||
type: 'string',
|
||||
hidden: true,
|
||||
default: scopes.join(' ')
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: MsoftTeamsOAuth2 }
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
import { INodeCredential, INodeParams } from '../src/Interface'
|
||||
|
||||
class NvidiaNIMApi implements INodeCredential {
|
||||
label: string
|
||||
|
|
@ -8,12 +8,12 @@ class NvidiaNIMApi implements INodeCredential {
|
|||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Nvdia NIM API Key'
|
||||
this.label = 'NVIDIA NGC API Key'
|
||||
this.name = 'nvidiaNIMApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Nvidia NIM API Key',
|
||||
label: 'NVIDIA NGC API Key',
|
||||
name: 'nvidiaNIMApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class OpikApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Opik API'
|
||||
this.name = 'opikApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://www.comet.com/docs/opik/tracing/sdk_configuration">Opik documentation</a> on how to configure Opik credentials'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'API Key',
|
||||
name: 'opikApiKey',
|
||||
type: 'password',
|
||||
placeholder: '<OPIK_API_KEY>'
|
||||
},
|
||||
{
|
||||
label: 'URL',
|
||||
name: 'opikUrl',
|
||||
type: 'string',
|
||||
placeholder: 'https://www.comet.com/opik/api'
|
||||
},
|
||||
{
|
||||
label: 'Workspace',
|
||||
name: 'opikWorkspace',
|
||||
type: 'string',
|
||||
placeholder: 'default'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: OpikApi }
|
||||
|
|
@ -0,0 +1,30 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class OxylabsApiCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Oxylabs API'
|
||||
this.name = 'oxylabsApi'
|
||||
this.version = 1.0
|
||||
this.description = 'Oxylabs API credentials description, to add more info'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Oxylabs Username',
|
||||
name: 'username',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Oxylabs Password',
|
||||
name: 'password',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: OxylabsApiCredential }
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class PerplexityApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Perplexity API'
|
||||
this.name = 'perplexityApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.perplexity.ai/docs/getting-started">official guide</a> on how to get API key'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Perplexity API Key',
|
||||
name: 'perplexityApiKey',
|
||||
type: 'password',
|
||||
placeholder: '<PERPLEXITY_API_KEY>'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: PerplexityApi }
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class SambanovaApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Sambanova API'
|
||||
this.name = 'sambanovaApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Sambanova Api Key',
|
||||
name: 'sambanovaApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: SambanovaApi }
|
||||
|
|
@ -10,8 +10,8 @@ class TavilyApi implements INodeCredential {
|
|||
constructor() {
|
||||
this.label = 'Tavily API'
|
||||
this.name = 'tavilyApi'
|
||||
this.version = 1.0
|
||||
this.description = 'Tavily API is a real-time API to access Google search results'
|
||||
this.version = 1.1
|
||||
this.description = 'Tavily API is a search engine designed for LLMs and AI agents'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tavily Api Key',
|
||||
|
|
|
|||
|
|
@ -0,0 +1,26 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataBearerTokenCredential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
description: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata Bearer Token'
|
||||
this.name = 'teradataBearerToken'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://docs.teradata.com/r/Enterprise_IntelliFlex_VMware/Teradata-Vector-Store-User-Guide/Setting-up-Vector-Store/Importing-Modules-Required-for-Vector-Store">official guide</a> on how to get Teradata Bearer Token'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Token',
|
||||
name: 'token',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataBearerTokenCredential }
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataTD2Credential implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata TD2 Auth'
|
||||
this.name = 'teradataTD2Auth'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Teradata TD2 Auth Username',
|
||||
name: 'tdUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Teradata TD2 Auth Password',
|
||||
name: 'tdPassword',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataTD2Credential }
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class TeradataVectorStoreApiCredentials implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Teradata Vector Store API Credentials'
|
||||
this.name = 'teradataVectorStoreApiCredentials'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Teradata Host IP',
|
||||
name: 'tdHostIp',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Username',
|
||||
name: 'tdUsername',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Password',
|
||||
name: 'tdPassword',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Vector_Store_Base_URL',
|
||||
name: 'baseURL',
|
||||
description: 'Teradata Vector Store Base URL',
|
||||
placeholder: `Base_URL`,
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'JWT Token',
|
||||
name: 'jwtToken',
|
||||
type: 'password',
|
||||
description: 'Bearer token for JWT authentication',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: TeradataVectorStoreApiCredentials }
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
import { RunCollectorCallbackHandler } from '@langchain/core/tracers/run_collector'
|
||||
import { Run } from '@langchain/core/tracers/base'
|
||||
import { EvaluationRunner } from './EvaluationRunner'
|
||||
import { encoding_for_model, get_encoding } from '@dqbd/tiktoken'
|
||||
|
||||
export class EvaluationRunTracer extends RunCollectorCallbackHandler {
|
||||
evaluationRunId: string
|
||||
model: string
|
||||
|
||||
constructor(id: string) {
|
||||
super()
|
||||
this.evaluationRunId = id
|
||||
}
|
||||
|
||||
async persistRun(run: Run): Promise<void> {
|
||||
return super.persistRun(run)
|
||||
}
|
||||
|
||||
countPromptTokens = (encoding: any, run: Run): number => {
|
||||
let promptTokenCount = 0
|
||||
if (encoding) {
|
||||
if (run.inputs?.messages?.length > 0 && run.inputs?.messages[0]?.length > 0) {
|
||||
run.inputs.messages[0].map((message: any) => {
|
||||
let content = message.content
|
||||
? message.content
|
||||
: message.SystemMessage?.content
|
||||
? message.SystemMessage.content
|
||||
: message.HumanMessage?.content
|
||||
? message.HumanMessage.content
|
||||
: message.AIMessage?.content
|
||||
? message.AIMessage.content
|
||||
: undefined
|
||||
promptTokenCount += content ? encoding.encode(content).length : 0
|
||||
})
|
||||
}
|
||||
if (run.inputs?.prompts?.length > 0) {
|
||||
const content = run.inputs.prompts[0]
|
||||
promptTokenCount += content ? encoding.encode(content).length : 0
|
||||
}
|
||||
}
|
||||
return promptTokenCount
|
||||
}
|
||||
|
||||
countCompletionTokens = (encoding: any, run: Run): number => {
|
||||
let completionTokenCount = 0
|
||||
if (encoding) {
|
||||
if (run.outputs?.generations?.length > 0 && run.outputs?.generations[0]?.length > 0) {
|
||||
run.outputs?.generations[0].map((chunk: any) => {
|
||||
let content = chunk.text ? chunk.text : chunk.message?.content ? chunk.message?.content : undefined
|
||||
completionTokenCount += content ? encoding.encode(content).length : 0
|
||||
})
|
||||
}
|
||||
}
|
||||
return completionTokenCount
|
||||
}
|
||||
|
||||
extractModelName = (run: Run): string => {
|
||||
return (
|
||||
(run?.serialized as any)?.kwargs?.model ||
|
||||
(run?.serialized as any)?.kwargs?.model_name ||
|
||||
(run?.extra as any)?.metadata?.ls_model_name ||
|
||||
(run?.extra as any)?.metadata?.fw_model_name
|
||||
)
|
||||
}
|
||||
|
||||
onLLMEnd?(run: Run): void | Promise<void> {
|
||||
if (run.name) {
|
||||
let provider = run.name
|
||||
if (provider === 'BedrockChat') {
|
||||
provider = 'awsChatBedrock'
|
||||
}
|
||||
EvaluationRunner.addMetrics(
|
||||
this.evaluationRunId,
|
||||
JSON.stringify({
|
||||
provider: provider
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
let model = this.extractModelName(run)
|
||||
if (run.outputs?.llmOutput?.tokenUsage) {
|
||||
const tokenUsage = run.outputs?.llmOutput?.tokenUsage
|
||||
if (tokenUsage) {
|
||||
const metric = {
|
||||
completionTokens: tokenUsage.completionTokens,
|
||||
promptTokens: tokenUsage.promptTokens,
|
||||
model: model,
|
||||
totalTokens: tokenUsage.totalTokens
|
||||
}
|
||||
EvaluationRunner.addMetrics(this.evaluationRunId, JSON.stringify(metric))
|
||||
}
|
||||
} else if (
|
||||
run.outputs?.generations?.length > 0 &&
|
||||
run.outputs?.generations[0].length > 0 &&
|
||||
run.outputs?.generations[0][0]?.message?.usage_metadata?.total_tokens
|
||||
) {
|
||||
const usage_metadata = run.outputs?.generations[0][0]?.message?.usage_metadata
|
||||
if (usage_metadata) {
|
||||
const metric = {
|
||||
completionTokens: usage_metadata.output_tokens,
|
||||
promptTokens: usage_metadata.input_tokens,
|
||||
model: model || this.model,
|
||||
totalTokens: usage_metadata.total_tokens
|
||||
}
|
||||
EvaluationRunner.addMetrics(this.evaluationRunId, JSON.stringify(metric))
|
||||
}
|
||||
} else {
|
||||
let encoding: any = undefined
|
||||
let promptInputTokens = 0
|
||||
let completionTokenCount = 0
|
||||
try {
|
||||
encoding = encoding_for_model(model as any)
|
||||
promptInputTokens = this.countPromptTokens(encoding, run)
|
||||
completionTokenCount = this.countCompletionTokens(encoding, run)
|
||||
} catch (e) {
|
||||
try {
|
||||
// as tiktoken will fail for non openai models, assume that is 'cl100k_base'
|
||||
encoding = get_encoding('cl100k_base')
|
||||
promptInputTokens = this.countPromptTokens(encoding, run)
|
||||
completionTokenCount = this.countCompletionTokens(encoding, run)
|
||||
} catch (e) {
|
||||
// stay silent
|
||||
}
|
||||
}
|
||||
const metric = {
|
||||
completionTokens: completionTokenCount,
|
||||
promptTokens: promptInputTokens,
|
||||
model: model,
|
||||
totalTokens: promptInputTokens + completionTokenCount
|
||||
}
|
||||
EvaluationRunner.addMetrics(this.evaluationRunId, JSON.stringify(metric))
|
||||
//cleanup
|
||||
this.model = ''
|
||||
}
|
||||
}
|
||||
|
||||
async onRunUpdate(run: Run): Promise<void> {
|
||||
const json = {
|
||||
[run.run_type]: elapsed(run)
|
||||
}
|
||||
let metric = JSON.stringify(json)
|
||||
if (metric) {
|
||||
EvaluationRunner.addMetrics(this.evaluationRunId, metric)
|
||||
}
|
||||
|
||||
if (run.run_type === 'llm') {
|
||||
let model = this.extractModelName(run)
|
||||
if (model) {
|
||||
EvaluationRunner.addMetrics(this.evaluationRunId, JSON.stringify({ model: model }))
|
||||
this.model = model
|
||||
}
|
||||
// OpenAI non streaming models
|
||||
const estimatedTokenUsage = run.outputs?.llmOutput?.estimatedTokenUsage
|
||||
if (estimatedTokenUsage && typeof estimatedTokenUsage === 'object' && Object.keys(estimatedTokenUsage).length > 0) {
|
||||
EvaluationRunner.addMetrics(this.evaluationRunId, estimatedTokenUsage)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function elapsed(run: Run) {
|
||||
if (!run.end_time) return ''
|
||||
const elapsed = run.end_time - run.start_time
|
||||
return `${elapsed.toFixed(2)}`
|
||||
}
|
||||
|
|
@ -0,0 +1,186 @@
|
|||
import { ChatMessage, LLMEndEvent, LLMStartEvent, LLMStreamEvent, MessageContentTextDetail, RetrievalEndEvent, Settings } from 'llamaindex'
|
||||
import { EvaluationRunner } from './EvaluationRunner'
|
||||
import { additionalCallbacks, ICommonObject, INodeData } from '../src'
|
||||
import { RetrievalStartEvent } from 'llamaindex/dist/type/llm/types'
|
||||
import { AgentEndEvent, AgentStartEvent } from 'llamaindex/dist/type/agent/types'
|
||||
import { encoding_for_model } from '@dqbd/tiktoken'
|
||||
import { MessageContent } from '@langchain/core/messages'
|
||||
|
||||
export class EvaluationRunTracerLlama {
|
||||
evaluationRunId: string
|
||||
static cbInit = false
|
||||
static startTimes = new Map<string, number>()
|
||||
static models = new Map<string, string>()
|
||||
static tokenCounts = new Map<string, number>()
|
||||
|
||||
constructor(id: string) {
|
||||
this.evaluationRunId = id
|
||||
EvaluationRunTracerLlama.constructCallBacks()
|
||||
}
|
||||
|
||||
static constructCallBacks = () => {
|
||||
if (!EvaluationRunTracerLlama.cbInit) {
|
||||
Settings.callbackManager.on('llm-start', (event: LLMStartEvent) => {
|
||||
const evalID = (event as any).reason.parent?.caller?.evaluationRunId || (event as any).reason.caller?.evaluationRunId
|
||||
if (!evalID) return
|
||||
const model = (event as any).reason?.caller?.model
|
||||
if (model) {
|
||||
EvaluationRunTracerLlama.models.set(evalID, model)
|
||||
try {
|
||||
const encoding = encoding_for_model(model)
|
||||
if (encoding) {
|
||||
const { messages } = event.detail.payload
|
||||
let tokenCount = messages.reduce((count: number, message: ChatMessage) => {
|
||||
return count + encoding.encode(extractText(message.content)).length
|
||||
}, 0)
|
||||
EvaluationRunTracerLlama.tokenCounts.set(evalID + '_promptTokens', tokenCount)
|
||||
EvaluationRunTracerLlama.tokenCounts.set(evalID + '_outputTokens', 0)
|
||||
}
|
||||
} catch (e) {
|
||||
// catch the error and continue to work.
|
||||
}
|
||||
}
|
||||
EvaluationRunTracerLlama.startTimes.set(evalID + '_llm', event.timeStamp)
|
||||
})
|
||||
Settings.callbackManager.on('llm-end', (event: LLMEndEvent) => {
|
||||
this.calculateAndSetMetrics(event, 'llm')
|
||||
})
|
||||
Settings.callbackManager.on('llm-stream', (event: LLMStreamEvent) => {
|
||||
const evalID = (event as any).reason.parent?.caller?.evaluationRunId || (event as any).reason.caller?.evaluationRunId
|
||||
if (!evalID) return
|
||||
const { chunk } = event.detail.payload
|
||||
const { delta } = chunk
|
||||
const model = (event as any).reason?.caller?.model
|
||||
try {
|
||||
const encoding = encoding_for_model(model)
|
||||
if (encoding) {
|
||||
let tokenCount = EvaluationRunTracerLlama.tokenCounts.get(evalID + '_outputTokens') || 0
|
||||
tokenCount += encoding.encode(extractText(delta)).length
|
||||
EvaluationRunTracerLlama.tokenCounts.set(evalID + '_outputTokens', tokenCount)
|
||||
}
|
||||
} catch (e) {
|
||||
// catch the error and continue to work.
|
||||
}
|
||||
})
|
||||
Settings.callbackManager.on('retrieve-start', (event: RetrievalStartEvent) => {
|
||||
const evalID = (event as any).reason.parent?.caller?.evaluationRunId || (event as any).reason.caller?.evaluationRunId
|
||||
if (evalID) {
|
||||
EvaluationRunTracerLlama.startTimes.set(evalID + '_retriever', event.timeStamp)
|
||||
}
|
||||
})
|
||||
Settings.callbackManager.on('retrieve-end', (event: RetrievalEndEvent) => {
|
||||
this.calculateAndSetMetrics(event, 'retriever')
|
||||
})
|
||||
Settings.callbackManager.on('agent-start', (event: AgentStartEvent) => {
|
||||
const evalID = (event as any).reason.parent?.caller?.evaluationRunId || (event as any).reason.caller?.evaluationRunId
|
||||
if (evalID) {
|
||||
EvaluationRunTracerLlama.startTimes.set(evalID + '_agent', event.timeStamp)
|
||||
}
|
||||
})
|
||||
Settings.callbackManager.on('agent-end', (event: AgentEndEvent) => {
|
||||
this.calculateAndSetMetrics(event, 'agent')
|
||||
})
|
||||
EvaluationRunTracerLlama.cbInit = true
|
||||
}
|
||||
}
|
||||
|
||||
private static calculateAndSetMetrics(event: any, label: string) {
|
||||
const evalID = event.reason.parent?.caller?.evaluationRunId || event.reason.caller?.evaluationRunId
|
||||
if (!evalID) return
|
||||
const startTime = EvaluationRunTracerLlama.startTimes.get(evalID + '_' + label) as number
|
||||
let model =
|
||||
(event as any).reason?.caller?.model || (event as any).reason?.caller?.llm?.model || EvaluationRunTracerLlama.models.get(evalID)
|
||||
|
||||
if (event.detail.payload?.response?.message && model) {
|
||||
try {
|
||||
const encoding = encoding_for_model(model)
|
||||
if (encoding) {
|
||||
let tokenCount = EvaluationRunTracerLlama.tokenCounts.get(evalID + '_outputTokens') || 0
|
||||
tokenCount += encoding.encode(event.detail.payload.response?.message?.content || '').length
|
||||
EvaluationRunTracerLlama.tokenCounts.set(evalID + '_outputTokens', tokenCount)
|
||||
}
|
||||
} catch (e) {
|
||||
// catch the error and continue to work.
|
||||
}
|
||||
}
|
||||
|
||||
// Anthropic
|
||||
if (event.detail?.payload?.response?.raw?.usage) {
|
||||
const usage = event.detail.payload.response.raw.usage
|
||||
if (usage.output_tokens) {
|
||||
const metric = {
|
||||
completionTokens: usage.output_tokens,
|
||||
promptTokens: usage.input_tokens,
|
||||
model: model,
|
||||
totalTokens: usage.input_tokens + usage.output_tokens
|
||||
}
|
||||
EvaluationRunner.addMetrics(evalID, JSON.stringify(metric))
|
||||
} else if (usage.completion_tokens) {
|
||||
const metric = {
|
||||
completionTokens: usage.completion_tokens,
|
||||
promptTokens: usage.prompt_tokens,
|
||||
model: model,
|
||||
totalTokens: usage.total_tokens
|
||||
}
|
||||
EvaluationRunner.addMetrics(evalID, JSON.stringify(metric))
|
||||
}
|
||||
} else if (event.detail?.payload?.response?.raw['amazon-bedrock-invocationMetrics']) {
|
||||
const usage = event.detail?.payload?.response?.raw['amazon-bedrock-invocationMetrics']
|
||||
const metric = {
|
||||
completionTokens: usage.outputTokenCount,
|
||||
promptTokens: usage.inputTokenCount,
|
||||
model: event.detail?.payload?.response?.raw.model,
|
||||
totalTokens: usage.inputTokenCount + usage.outputTokenCount
|
||||
}
|
||||
EvaluationRunner.addMetrics(evalID, JSON.stringify(metric))
|
||||
} else {
|
||||
const metric = {
|
||||
[label]: (event.timeStamp - startTime).toFixed(2),
|
||||
completionTokens: EvaluationRunTracerLlama.tokenCounts.get(evalID + '_outputTokens'),
|
||||
promptTokens: EvaluationRunTracerLlama.tokenCounts.get(evalID + '_promptTokens'),
|
||||
model: model || EvaluationRunTracerLlama.models.get(evalID) || '',
|
||||
totalTokens:
|
||||
(EvaluationRunTracerLlama.tokenCounts.get(evalID + '_outputTokens') || 0) +
|
||||
(EvaluationRunTracerLlama.tokenCounts.get(evalID + '_promptTokens') || 0)
|
||||
}
|
||||
EvaluationRunner.addMetrics(evalID, JSON.stringify(metric))
|
||||
}
|
||||
|
||||
//cleanup
|
||||
EvaluationRunTracerLlama.startTimes.delete(evalID + '_' + label)
|
||||
EvaluationRunTracerLlama.startTimes.delete(evalID + '_outputTokens')
|
||||
EvaluationRunTracerLlama.startTimes.delete(evalID + '_promptTokens')
|
||||
EvaluationRunTracerLlama.models.delete(evalID)
|
||||
}
|
||||
|
||||
static async injectEvaluationMetadata(nodeData: INodeData, options: ICommonObject, callerObj: any) {
|
||||
if (options.evaluationRunId && callerObj) {
|
||||
// these are needed for evaluation runs
|
||||
options.llamaIndex = true
|
||||
await additionalCallbacks(nodeData, options)
|
||||
Object.defineProperty(callerObj, 'evaluationRunId', {
|
||||
enumerable: true,
|
||||
configurable: true,
|
||||
writable: true,
|
||||
value: options.evaluationRunId
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// from https://github.com/run-llama/LlamaIndexTS/blob/main/packages/core/src/llm/utils.ts
|
||||
export function extractText(message: MessageContent): string {
|
||||
if (typeof message !== 'string' && !Array.isArray(message)) {
|
||||
console.warn('extractText called with non-MessageContent message, this is likely a bug.')
|
||||
return `${message}`
|
||||
} else if (typeof message !== 'string' && Array.isArray(message)) {
|
||||
// message is of type MessageContentDetail[] - retrieve just the text parts and concatenate them
|
||||
// so we can pass them to the context generator
|
||||
return message
|
||||
.filter((c): c is MessageContentTextDetail => c.type === 'text')
|
||||
.map((c) => c.text)
|
||||
.join('\n\n')
|
||||
} else {
|
||||
return message
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,226 @@
|
|||
import axios from 'axios'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { ICommonObject } from '../src'
|
||||
|
||||
import { getModelConfigByModelName, MODEL_TYPE } from '../src/modelLoader'
|
||||
|
||||
export class EvaluationRunner {
|
||||
static metrics = new Map<string, string[]>()
|
||||
|
||||
static getCostMetrics = async (selectedProvider: string, selectedModel: string) => {
|
||||
let modelConfig = await getModelConfigByModelName(MODEL_TYPE.CHAT, selectedProvider, selectedModel)
|
||||
if (modelConfig) {
|
||||
if (modelConfig['cost_values']) {
|
||||
return modelConfig.cost_values
|
||||
}
|
||||
return { cost_values: modelConfig }
|
||||
} else {
|
||||
modelConfig = await getModelConfigByModelName(MODEL_TYPE.LLM, selectedProvider, selectedModel)
|
||||
if (modelConfig) {
|
||||
if (modelConfig['cost_values']) {
|
||||
return modelConfig.cost_values
|
||||
}
|
||||
return { cost_values: modelConfig }
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
static async getAndDeleteMetrics(id: string) {
|
||||
const val = EvaluationRunner.metrics.get(id)
|
||||
if (val) {
|
||||
try {
|
||||
//first lets get the provider and model
|
||||
let selectedModel = undefined
|
||||
let selectedProvider = undefined
|
||||
if (val && val.length > 0) {
|
||||
let modelName = ''
|
||||
let providerName = ''
|
||||
for (let i = 0; i < val.length; i++) {
|
||||
const metric = val[i]
|
||||
if (typeof metric === 'object') {
|
||||
modelName = metric['model']
|
||||
providerName = metric['provider']
|
||||
} else {
|
||||
modelName = JSON.parse(metric)['model']
|
||||
providerName = JSON.parse(metric)['provider']
|
||||
}
|
||||
|
||||
if (modelName) {
|
||||
selectedModel = modelName
|
||||
}
|
||||
if (providerName) {
|
||||
selectedProvider = providerName
|
||||
}
|
||||
}
|
||||
}
|
||||
if (selectedProvider && selectedModel) {
|
||||
const modelConfig = await EvaluationRunner.getCostMetrics(selectedProvider, selectedModel)
|
||||
if (modelConfig) {
|
||||
val.push(JSON.stringify({ cost_values: modelConfig }))
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
//stay silent
|
||||
}
|
||||
}
|
||||
EvaluationRunner.metrics.delete(id)
|
||||
return val
|
||||
}
|
||||
|
||||
static addMetrics(id: string, metric: string) {
|
||||
if (EvaluationRunner.metrics.has(id)) {
|
||||
EvaluationRunner.metrics.get(id)?.push(metric)
|
||||
} else {
|
||||
EvaluationRunner.metrics.set(id, [metric])
|
||||
}
|
||||
}
|
||||
|
||||
baseURL = ''
|
||||
|
||||
constructor(baseURL: string) {
|
||||
this.baseURL = baseURL
|
||||
}
|
||||
|
||||
getChatflowApiKey(chatflowId: string, apiKeys: { chatflowId: string; apiKey: string }[] = []) {
|
||||
return apiKeys.find((item) => item.chatflowId === chatflowId)?.apiKey || ''
|
||||
}
|
||||
|
||||
public async runEvaluations(data: ICommonObject) {
|
||||
const chatflowIds = JSON.parse(data.chatflowId)
|
||||
const returnData: ICommonObject = {}
|
||||
returnData.evaluationId = data.evaluationId
|
||||
returnData.runDate = new Date()
|
||||
returnData.rows = []
|
||||
for (let i = 0; i < data.dataset.rows.length; i++) {
|
||||
returnData.rows.push({
|
||||
input: data.dataset.rows[i].input,
|
||||
expectedOutput: data.dataset.rows[i].output,
|
||||
itemNo: data.dataset.rows[i].sequenceNo,
|
||||
evaluations: [],
|
||||
status: 'pending'
|
||||
})
|
||||
}
|
||||
for (let i = 0; i < chatflowIds.length; i++) {
|
||||
const chatflowId = chatflowIds[i]
|
||||
await this.evaluateChatflow(chatflowId, this.getChatflowApiKey(chatflowId, data.apiKeys), data, returnData)
|
||||
}
|
||||
return returnData
|
||||
}
|
||||
|
||||
async evaluateChatflow(chatflowId: string, apiKey: string, data: any, returnData: any) {
|
||||
for (let i = 0; i < data.dataset.rows.length; i++) {
|
||||
const item = data.dataset.rows[i]
|
||||
const uuid = uuidv4()
|
||||
|
||||
const headers: any = {
|
||||
'X-Request-ID': uuid,
|
||||
'X-Flowise-Evaluation': 'true'
|
||||
}
|
||||
if (apiKey) {
|
||||
headers['Authorization'] = `Bearer ${apiKey}`
|
||||
}
|
||||
let axiosConfig = {
|
||||
headers: headers
|
||||
}
|
||||
let startTime = performance.now()
|
||||
const runData: any = {}
|
||||
runData.chatflowId = chatflowId
|
||||
runData.startTime = startTime
|
||||
const postData: any = { question: item.input, evaluationRunId: uuid, evaluation: true }
|
||||
if (data.sessionId) {
|
||||
postData.overrideConfig = { sessionId: data.sessionId }
|
||||
}
|
||||
try {
|
||||
let response = await axios.post(`${this.baseURL}/api/v1/prediction/${chatflowId}`, postData, axiosConfig)
|
||||
let agentFlowMetrics: any[] = []
|
||||
if (response?.data?.agentFlowExecutedData) {
|
||||
for (let i = 0; i < response.data.agentFlowExecutedData.length; i++) {
|
||||
const agentFlowExecutedData = response.data.agentFlowExecutedData[i]
|
||||
const input_tokens = agentFlowExecutedData?.data?.output?.usageMetadata?.input_tokens || 0
|
||||
const output_tokens = agentFlowExecutedData?.data?.output?.usageMetadata?.output_tokens || 0
|
||||
const total_tokens =
|
||||
agentFlowExecutedData?.data?.output?.usageMetadata?.total_tokens || input_tokens + output_tokens
|
||||
const metrics: any = {
|
||||
promptTokens: input_tokens,
|
||||
completionTokens: output_tokens,
|
||||
totalTokens: total_tokens,
|
||||
provider:
|
||||
agentFlowExecutedData.data?.input?.llmModelConfig?.llmModel ||
|
||||
agentFlowExecutedData.data?.input?.agentModelConfig?.agentModel,
|
||||
model:
|
||||
agentFlowExecutedData.data?.input?.llmModelConfig?.modelName ||
|
||||
agentFlowExecutedData.data?.input?.agentModelConfig?.modelName,
|
||||
nodeLabel: agentFlowExecutedData?.nodeLabel,
|
||||
nodeId: agentFlowExecutedData?.nodeId
|
||||
}
|
||||
if (metrics.provider && metrics.model) {
|
||||
const modelConfig = await EvaluationRunner.getCostMetrics(metrics.provider, metrics.model)
|
||||
if (modelConfig) {
|
||||
metrics.cost_values = {
|
||||
input_cost: (modelConfig.cost_values.input_cost || 0) * (input_tokens / 1000),
|
||||
output_cost: (modelConfig.cost_values.output_cost || 0) * (output_tokens / 1000)
|
||||
}
|
||||
metrics.cost_values.total_cost = metrics.cost_values.input_cost + metrics.cost_values.output_cost
|
||||
}
|
||||
}
|
||||
agentFlowMetrics.push(metrics)
|
||||
}
|
||||
}
|
||||
const endTime = performance.now()
|
||||
const timeTaken = (endTime - startTime).toFixed(2)
|
||||
if (response?.data?.metrics) {
|
||||
runData.metrics = response.data.metrics
|
||||
runData.metrics.push({
|
||||
apiLatency: timeTaken
|
||||
})
|
||||
} else {
|
||||
runData.metrics = [
|
||||
{
|
||||
apiLatency: timeTaken
|
||||
}
|
||||
]
|
||||
}
|
||||
if (agentFlowMetrics.length > 0) {
|
||||
runData.nested_metrics = agentFlowMetrics
|
||||
}
|
||||
runData.status = 'complete'
|
||||
let resultText = ''
|
||||
if (response.data.text) resultText = response.data.text
|
||||
else if (response.data.json) resultText = '```json\n' + JSON.stringify(response.data.json, null, 2)
|
||||
else resultText = JSON.stringify(response.data, null, 2)
|
||||
|
||||
runData.actualOutput = resultText
|
||||
runData.latency = timeTaken
|
||||
runData.error = ''
|
||||
} catch (error: any) {
|
||||
runData.status = 'error'
|
||||
runData.actualOutput = ''
|
||||
runData.error = error?.response?.data?.message
|
||||
? error.response.data.message
|
||||
: error?.message
|
||||
? error.message
|
||||
: 'Unknown error'
|
||||
try {
|
||||
if (runData.error.indexOf('-') > -1) {
|
||||
// if there is a dash, remove all content before
|
||||
runData.error = 'Error: ' + runData.error.substr(runData.error.indexOf('-') + 1).trim()
|
||||
}
|
||||
} catch (error) {
|
||||
//stay silent
|
||||
}
|
||||
const endTime = performance.now()
|
||||
const timeTaken = (endTime - startTime).toFixed(2)
|
||||
runData.metrics = [
|
||||
{
|
||||
apiLatency: timeTaken
|
||||
}
|
||||
]
|
||||
runData.latency = timeTaken
|
||||
}
|
||||
runData.uuid = uuid
|
||||
returnData.rows[i].evaluations.push(runData)
|
||||
}
|
||||
return returnData
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
roots: ['<rootDir>/nodes'],
|
||||
transform: {
|
||||
'^.+\\.tsx?$': 'ts-jest'
|
||||
},
|
||||
testRegex: '(/__tests__/.*|(\\.|/)(test|spec))\\.tsx?$',
|
||||
moduleFileExtensions: ['ts', 'tsx', 'js', 'jsx', 'json', 'node'],
|
||||
verbose: true,
|
||||
testPathIgnorePatterns: ['/node_modules/', '/dist/'],
|
||||
moduleNameMapper: {
|
||||
'^../../../src/(.*)$': '<rootDir>/src/$1'
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,351 @@
|
|||
import { CommonType, ICommonObject, ICondition, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import removeMarkdown from 'remove-markdown'
|
||||
|
||||
class Condition_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Condition'
|
||||
this.name = 'conditionAgentflow'
|
||||
this.version = 1.0
|
||||
this.type = 'Condition'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = `Split flows based on If Else conditions`
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#FFB938'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Conditions',
|
||||
name: 'conditions',
|
||||
type: 'array',
|
||||
description: 'Values to compare',
|
||||
acceptVariable: true,
|
||||
default: [
|
||||
{
|
||||
type: 'string',
|
||||
value1: '',
|
||||
operation: 'equal',
|
||||
value2: ''
|
||||
}
|
||||
],
|
||||
array: [
|
||||
{
|
||||
label: 'Type',
|
||||
name: 'type',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'String',
|
||||
name: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Number',
|
||||
name: 'number'
|
||||
},
|
||||
{
|
||||
label: 'Boolean',
|
||||
name: 'boolean'
|
||||
}
|
||||
],
|
||||
default: 'string'
|
||||
},
|
||||
/////////////////////////////////////// STRING ////////////////////////////////////////
|
||||
{
|
||||
label: 'Value 1',
|
||||
name: 'value1',
|
||||
type: 'string',
|
||||
default: '',
|
||||
description: 'First value to be compared with',
|
||||
acceptVariable: true,
|
||||
show: {
|
||||
'conditions[$index].type': 'string'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Operation',
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Contains',
|
||||
name: 'contains'
|
||||
},
|
||||
{
|
||||
label: 'Ends With',
|
||||
name: 'endsWith'
|
||||
},
|
||||
{
|
||||
label: 'Equal',
|
||||
name: 'equal'
|
||||
},
|
||||
{
|
||||
label: 'Not Contains',
|
||||
name: 'notContains'
|
||||
},
|
||||
{
|
||||
label: 'Not Equal',
|
||||
name: 'notEqual'
|
||||
},
|
||||
{
|
||||
label: 'Regex',
|
||||
name: 'regex'
|
||||
},
|
||||
{
|
||||
label: 'Starts With',
|
||||
name: 'startsWith'
|
||||
},
|
||||
{
|
||||
label: 'Is Empty',
|
||||
name: 'isEmpty'
|
||||
},
|
||||
{
|
||||
label: 'Not Empty',
|
||||
name: 'notEmpty'
|
||||
}
|
||||
],
|
||||
default: 'equal',
|
||||
description: 'Type of operation',
|
||||
show: {
|
||||
'conditions[$index].type': 'string'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Value 2',
|
||||
name: 'value2',
|
||||
type: 'string',
|
||||
default: '',
|
||||
description: 'Second value to be compared with',
|
||||
acceptVariable: true,
|
||||
show: {
|
||||
'conditions[$index].type': 'string'
|
||||
},
|
||||
hide: {
|
||||
'conditions[$index].operation': ['isEmpty', 'notEmpty']
|
||||
}
|
||||
},
|
||||
/////////////////////////////////////// NUMBER ////////////////////////////////////////
|
||||
{
|
||||
label: 'Value 1',
|
||||
name: 'value1',
|
||||
type: 'number',
|
||||
default: '',
|
||||
description: 'First value to be compared with',
|
||||
acceptVariable: true,
|
||||
show: {
|
||||
'conditions[$index].type': 'number'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Operation',
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Smaller',
|
||||
name: 'smaller'
|
||||
},
|
||||
{
|
||||
label: 'Smaller Equal',
|
||||
name: 'smallerEqual'
|
||||
},
|
||||
{
|
||||
label: 'Equal',
|
||||
name: 'equal'
|
||||
},
|
||||
{
|
||||
label: 'Not Equal',
|
||||
name: 'notEqual'
|
||||
},
|
||||
{
|
||||
label: 'Larger',
|
||||
name: 'larger'
|
||||
},
|
||||
{
|
||||
label: 'Larger Equal',
|
||||
name: 'largerEqual'
|
||||
},
|
||||
{
|
||||
label: 'Is Empty',
|
||||
name: 'isEmpty'
|
||||
},
|
||||
{
|
||||
label: 'Not Empty',
|
||||
name: 'notEmpty'
|
||||
}
|
||||
],
|
||||
default: 'equal',
|
||||
description: 'Type of operation',
|
||||
show: {
|
||||
'conditions[$index].type': 'number'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Value 2',
|
||||
name: 'value2',
|
||||
type: 'number',
|
||||
default: 0,
|
||||
description: 'Second value to be compared with',
|
||||
acceptVariable: true,
|
||||
show: {
|
||||
'conditions[$index].type': 'number'
|
||||
}
|
||||
},
|
||||
/////////////////////////////////////// BOOLEAN ////////////////////////////////////////
|
||||
{
|
||||
label: 'Value 1',
|
||||
name: 'value1',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
description: 'First value to be compared with',
|
||||
show: {
|
||||
'conditions[$index].type': 'boolean'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Operation',
|
||||
name: 'operation',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Equal',
|
||||
name: 'equal'
|
||||
},
|
||||
{
|
||||
label: 'Not Equal',
|
||||
name: 'notEqual'
|
||||
}
|
||||
],
|
||||
default: 'equal',
|
||||
description: 'Type of operation',
|
||||
show: {
|
||||
'conditions[$index].type': 'boolean'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Value 2',
|
||||
name: 'value2',
|
||||
type: 'boolean',
|
||||
default: false,
|
||||
description: 'Second value to be compared with',
|
||||
show: {
|
||||
'conditions[$index].type': 'boolean'
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: '0',
|
||||
name: '0',
|
||||
description: 'Condition 0'
|
||||
},
|
||||
{
|
||||
label: '1',
|
||||
name: '1',
|
||||
description: 'Else'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
||||
const compareOperationFunctions: {
|
||||
[key: string]: (value1: CommonType, value2: CommonType) => boolean
|
||||
} = {
|
||||
contains: (value1: CommonType, value2: CommonType) => (value1 || '').toString().includes((value2 || '').toString()),
|
||||
notContains: (value1: CommonType, value2: CommonType) => !(value1 || '').toString().includes((value2 || '').toString()),
|
||||
endsWith: (value1: CommonType, value2: CommonType) => (value1 as string).endsWith(value2 as string),
|
||||
equal: (value1: CommonType, value2: CommonType) => value1 === value2,
|
||||
notEqual: (value1: CommonType, value2: CommonType) => value1 !== value2,
|
||||
larger: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) > (Number(value2) || 0),
|
||||
largerEqual: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) >= (Number(value2) || 0),
|
||||
smaller: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) < (Number(value2) || 0),
|
||||
smallerEqual: (value1: CommonType, value2: CommonType) => (Number(value1) || 0) <= (Number(value2) || 0),
|
||||
startsWith: (value1: CommonType, value2: CommonType) => (value1 as string).startsWith(value2 as string),
|
||||
isEmpty: (value1: CommonType) => [undefined, null, ''].includes(value1 as string),
|
||||
notEmpty: (value1: CommonType) => ![undefined, null, ''].includes(value1 as string)
|
||||
}
|
||||
|
||||
const _conditions = nodeData.inputs?.conditions
|
||||
const conditions: ICondition[] = typeof _conditions === 'string' ? JSON.parse(_conditions) : _conditions
|
||||
const initialConditions = { ...conditions }
|
||||
|
||||
for (const condition of conditions) {
|
||||
const _value1 = condition.value1
|
||||
const _value2 = condition.value2
|
||||
const operation = condition.operation
|
||||
|
||||
let value1: CommonType
|
||||
let value2: CommonType
|
||||
|
||||
switch (condition.type) {
|
||||
case 'boolean':
|
||||
value1 = _value1
|
||||
value2 = _value2
|
||||
break
|
||||
case 'number':
|
||||
value1 = parseFloat(_value1 as string) || 0
|
||||
value2 = parseFloat(_value2 as string) || 0
|
||||
break
|
||||
default: // string
|
||||
value1 = removeMarkdown((_value1 as string) || '')
|
||||
value2 = removeMarkdown((_value2 as string) || '')
|
||||
}
|
||||
|
||||
const compareOperationResult = compareOperationFunctions[operation](value1, value2)
|
||||
if (compareOperationResult) {
|
||||
// find the matching condition
|
||||
const conditionIndex = conditions.findIndex((c) => JSON.stringify(c) === JSON.stringify(condition))
|
||||
// add isFulfilled to the condition
|
||||
if (conditionIndex > -1) {
|
||||
conditions[conditionIndex] = { ...condition, isFulfilled: true }
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If no condition is fulfilled, add isFulfilled to the ELSE condition
|
||||
const dummyElseConditionData = {
|
||||
type: 'string',
|
||||
value1: '',
|
||||
operation: 'equal',
|
||||
value2: ''
|
||||
}
|
||||
if (!conditions.some((c) => c.isFulfilled)) {
|
||||
conditions.push({
|
||||
...dummyElseConditionData,
|
||||
isFulfilled: true
|
||||
})
|
||||
} else {
|
||||
conditions.push({
|
||||
...dummyElseConditionData,
|
||||
isFulfilled: false
|
||||
})
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: { conditions: initialConditions },
|
||||
output: { conditions },
|
||||
state
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Condition_Agentflow }
|
||||
|
|
@ -0,0 +1,614 @@
|
|||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages'
|
||||
import {
|
||||
getPastChatHistoryImageMessages,
|
||||
getUniqueImageMessages,
|
||||
processMessagesWithImages,
|
||||
replaceBase64ImagesWithFileReferences
|
||||
} from '../utils'
|
||||
import { CONDITION_AGENT_SYSTEM_PROMPT, DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
|
||||
class ConditionAgent_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Condition Agent'
|
||||
this.name = 'conditionAgentAgentflow'
|
||||
this.version = 1.1
|
||||
this.type = 'ConditionAgent'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = `Utilize an agent to split flows based on dynamic conditions`
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#ff8fab'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'conditionAgentModel',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
loadConfig: true
|
||||
},
|
||||
{
|
||||
label: 'Instructions',
|
||||
name: 'conditionAgentInstructions',
|
||||
type: 'string',
|
||||
description: 'A general instructions of what the condition agent should do',
|
||||
rows: 4,
|
||||
acceptVariable: true,
|
||||
placeholder: 'Determine if the user is interested in learning about AI'
|
||||
},
|
||||
{
|
||||
label: 'Input',
|
||||
name: 'conditionAgentInput',
|
||||
type: 'string',
|
||||
description: 'Input to be used for the condition agent',
|
||||
rows: 4,
|
||||
acceptVariable: true,
|
||||
default: '<p><span class="variable" data-type="mention" data-id="question" data-label="question">{{ question }}</span> </p>'
|
||||
},
|
||||
{
|
||||
label: 'Scenarios',
|
||||
name: 'conditionAgentScenarios',
|
||||
description: 'Define the scenarios that will be used as the conditions to split the flow',
|
||||
type: 'array',
|
||||
array: [
|
||||
{
|
||||
label: 'Scenario',
|
||||
name: 'scenario',
|
||||
type: 'string',
|
||||
placeholder: 'User is asking for a pizza'
|
||||
}
|
||||
],
|
||||
default: [
|
||||
{
|
||||
scenario: ''
|
||||
},
|
||||
{
|
||||
scenario: ''
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Override System Prompt',
|
||||
name: 'conditionAgentOverrideSystemPrompt',
|
||||
type: 'boolean',
|
||||
description: 'Override initial system prompt for Condition Agent',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Node System Prompt',
|
||||
name: 'conditionAgentSystemPrompt',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
default: CONDITION_AGENT_SYSTEM_PROMPT,
|
||||
description: 'Expert use only. Modifying this can significantly alter agent behavior. Leave default if unsure',
|
||||
show: {
|
||||
conditionAgentOverrideSystemPrompt: true
|
||||
}
|
||||
}
|
||||
/*{
|
||||
label: 'Enable Memory',
|
||||
name: 'conditionAgentEnableMemory',
|
||||
type: 'boolean',
|
||||
description: 'Enable memory for the conversation thread',
|
||||
default: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Memory Type',
|
||||
name: 'conditionAgentMemoryType',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'All Messages',
|
||||
name: 'allMessages',
|
||||
description: 'Retrieve all messages from the conversation'
|
||||
},
|
||||
{
|
||||
label: 'Window Size',
|
||||
name: 'windowSize',
|
||||
description: 'Uses a fixed window size to surface the last N messages'
|
||||
},
|
||||
{
|
||||
label: 'Conversation Summary',
|
||||
name: 'conversationSummary',
|
||||
description: 'Summarizes the whole conversation'
|
||||
},
|
||||
{
|
||||
label: 'Conversation Summary Buffer',
|
||||
name: 'conversationSummaryBuffer',
|
||||
description: 'Summarize conversations once token limit is reached. Default to 2000'
|
||||
}
|
||||
],
|
||||
optional: true,
|
||||
default: 'allMessages',
|
||||
show: {
|
||||
conditionAgentEnableMemory: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Window Size',
|
||||
name: 'conditionAgentMemoryWindowSize',
|
||||
type: 'number',
|
||||
default: '20',
|
||||
description: 'Uses a fixed window size to surface the last N messages',
|
||||
show: {
|
||||
conditionAgentMemoryType: 'windowSize'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Max Token Limit',
|
||||
name: 'conditionAgentMemoryMaxTokenLimit',
|
||||
type: 'number',
|
||||
default: '2000',
|
||||
description: 'Summarize conversations once token limit is reached. Default to 2000',
|
||||
show: {
|
||||
conditionAgentMemoryType: 'conversationSummaryBuffer'
|
||||
}
|
||||
}*/
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: '0',
|
||||
name: '0',
|
||||
description: 'Condition 0'
|
||||
},
|
||||
{
|
||||
label: '1',
|
||||
name: '1',
|
||||
description: 'Else'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const componentNodes = options.componentNodes as {
|
||||
[key: string]: INode
|
||||
}
|
||||
|
||||
const returnOptions: INodeOptionsValue[] = []
|
||||
for (const nodeName in componentNodes) {
|
||||
const componentNode = componentNodes[nodeName]
|
||||
if (componentNode.category === 'Chat Models') {
|
||||
if (componentNode.tags?.includes('LlamaIndex')) {
|
||||
continue
|
||||
}
|
||||
returnOptions.push({
|
||||
label: componentNode.label,
|
||||
name: nodeName,
|
||||
imageSrc: componentNode.icon
|
||||
})
|
||||
}
|
||||
}
|
||||
return returnOptions
|
||||
}
|
||||
}
|
||||
|
||||
private parseJsonMarkdown(jsonString: string): any {
|
||||
// Strip whitespace
|
||||
jsonString = jsonString.trim()
|
||||
const starts = ['```json', '```', '``', '`', '{']
|
||||
const ends = ['```', '``', '`', '}']
|
||||
|
||||
let startIndex = -1
|
||||
let endIndex = -1
|
||||
|
||||
// Find start of JSON
|
||||
for (const s of starts) {
|
||||
startIndex = jsonString.indexOf(s)
|
||||
if (startIndex !== -1) {
|
||||
if (jsonString[startIndex] !== '{') {
|
||||
startIndex += s.length
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Find end of JSON
|
||||
if (startIndex !== -1) {
|
||||
for (const e of ends) {
|
||||
endIndex = jsonString.lastIndexOf(e, jsonString.length)
|
||||
if (endIndex !== -1) {
|
||||
if (jsonString[endIndex] === '}') {
|
||||
endIndex += 1
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (startIndex !== -1 && endIndex !== -1 && startIndex < endIndex) {
|
||||
const extractedContent = jsonString.slice(startIndex, endIndex).trim()
|
||||
try {
|
||||
return JSON.parse(extractedContent)
|
||||
} catch (error) {
|
||||
throw new Error(`Invalid JSON object. Error: ${error}`)
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error('Could not find JSON block in the output.')
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, question: string, options: ICommonObject): Promise<any> {
|
||||
let llmIds: ICommonObject | undefined
|
||||
let analyticHandlers = options.analyticHandlers as AnalyticHandler
|
||||
|
||||
try {
|
||||
const abortController = options.abortController as AbortController
|
||||
|
||||
// Extract input parameters
|
||||
const model = nodeData.inputs?.conditionAgentModel as string
|
||||
const modelConfig = nodeData.inputs?.conditionAgentModelConfig as ICommonObject
|
||||
if (!model) {
|
||||
throw new Error('Model is required')
|
||||
}
|
||||
const conditionAgentInput = nodeData.inputs?.conditionAgentInput as string
|
||||
let input = conditionAgentInput || question
|
||||
const conditionAgentInstructions = nodeData.inputs?.conditionAgentInstructions as string
|
||||
const conditionAgentSystemPrompt = nodeData.inputs?.conditionAgentSystemPrompt as string
|
||||
const conditionAgentOverrideSystemPrompt = nodeData.inputs?.conditionAgentOverrideSystemPrompt as boolean
|
||||
let systemPrompt = CONDITION_AGENT_SYSTEM_PROMPT
|
||||
if (conditionAgentSystemPrompt && conditionAgentOverrideSystemPrompt) {
|
||||
systemPrompt = conditionAgentSystemPrompt
|
||||
}
|
||||
|
||||
// Extract memory and configuration options
|
||||
const enableMemory = nodeData.inputs?.conditionAgentEnableMemory as boolean
|
||||
const memoryType = nodeData.inputs?.conditionAgentMemoryType as string
|
||||
const _conditionAgentScenarios = nodeData.inputs?.conditionAgentScenarios as { scenario: string }[]
|
||||
|
||||
// Extract runtime state and history
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
|
||||
// Initialize the LLM model instance
|
||||
const nodeInstanceFilePath = options.componentNodes[model].filePath as string
|
||||
const nodeModule = await import(nodeInstanceFilePath)
|
||||
const newLLMNodeInstance = new nodeModule.nodeClass()
|
||||
const newNodeData = {
|
||||
...nodeData,
|
||||
credential: modelConfig['FLOWISE_CREDENTIAL_ID'],
|
||||
inputs: {
|
||||
...nodeData.inputs,
|
||||
...modelConfig
|
||||
}
|
||||
}
|
||||
let llmNodeInstance = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel
|
||||
|
||||
const isStructuredOutput =
|
||||
_conditionAgentScenarios && Array.isArray(_conditionAgentScenarios) && _conditionAgentScenarios.length > 0
|
||||
if (!isStructuredOutput) {
|
||||
throw new Error('Scenarios are required')
|
||||
}
|
||||
|
||||
// Prepare messages array
|
||||
const messages: BaseMessageLike[] = [
|
||||
{
|
||||
role: 'system',
|
||||
content: systemPrompt
|
||||
},
|
||||
{
|
||||
role: 'user',
|
||||
content: `{"input": "Hello", "scenarios": ["user is asking about AI", "user is not asking about AI"], "instruction": "Your task is to check if the user is asking about AI."}`
|
||||
},
|
||||
{
|
||||
role: 'assistant',
|
||||
content: `\`\`\`json\n{"output": "user is not asking about AI"}\n\`\`\``
|
||||
}
|
||||
]
|
||||
// Use to store messages with image file references as we do not want to store the base64 data into database
|
||||
let runtimeImageMessagesWithFileRef: BaseMessageLike[] = []
|
||||
// Use to keep track of past messages with image file references
|
||||
let pastImageMessagesWithFileRef: BaseMessageLike[] = []
|
||||
|
||||
input = `{"input": ${input}, "scenarios": ${JSON.stringify(
|
||||
_conditionAgentScenarios.map((scenario) => scenario.scenario)
|
||||
)}, "instruction": ${conditionAgentInstructions}}`
|
||||
|
||||
// Handle memory management if enabled
|
||||
if (enableMemory) {
|
||||
await this.handleMemory({
|
||||
messages,
|
||||
memoryType,
|
||||
pastChatHistory,
|
||||
runtimeChatHistory,
|
||||
llmNodeInstance,
|
||||
nodeData,
|
||||
input,
|
||||
abortController,
|
||||
options,
|
||||
modelConfig,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
})
|
||||
} else {
|
||||
/*
|
||||
* If this is the first node:
|
||||
* - Add images to messages if exist
|
||||
*/
|
||||
if (!runtimeChatHistory.length && options.uploads) {
|
||||
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
|
||||
if (imageContents) {
|
||||
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
|
||||
messages.push(imageMessageWithBase64)
|
||||
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
|
||||
}
|
||||
}
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: input
|
||||
})
|
||||
}
|
||||
|
||||
// Initialize response and determine if streaming is possible
|
||||
let response: AIMessageChunk = new AIMessageChunk('')
|
||||
|
||||
// Start analytics
|
||||
if (analyticHandlers && options.parentTraceIds) {
|
||||
const llmLabel = options?.componentNodes?.[model]?.label || model
|
||||
llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds)
|
||||
}
|
||||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
|
||||
response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
|
||||
|
||||
// Calculate execution time
|
||||
const endTime = Date.now()
|
||||
const timeDelta = endTime - startTime
|
||||
|
||||
// End analytics tracking
|
||||
if (analyticHandlers && llmIds) {
|
||||
await analyticHandlers.onLLMEnd(
|
||||
llmIds,
|
||||
typeof response.content === 'string' ? response.content : JSON.stringify(response.content)
|
||||
)
|
||||
}
|
||||
|
||||
let calledOutputName: string
|
||||
try {
|
||||
const parsedResponse = this.parseJsonMarkdown(response.content as string)
|
||||
if (!parsedResponse.output || typeof parsedResponse.output !== 'string') {
|
||||
throw new Error('LLM response is missing the "output" key or it is not a string.')
|
||||
}
|
||||
calledOutputName = parsedResponse.output
|
||||
} catch (error) {
|
||||
throw new Error(
|
||||
`Failed to parse a valid scenario from the LLM's response. Please check if the model is capable of following JSON output instructions. Raw LLM Response: "${
|
||||
response.content as string
|
||||
}"`
|
||||
)
|
||||
}
|
||||
|
||||
// Clean up empty inputs
|
||||
for (const key in nodeData.inputs) {
|
||||
if (nodeData.inputs[key] === '') {
|
||||
delete nodeData.inputs[key]
|
||||
}
|
||||
}
|
||||
|
||||
// Find the first exact match
|
||||
const matchedScenarioIndex = _conditionAgentScenarios.findIndex(
|
||||
(scenario) => calledOutputName.toLowerCase() === scenario.scenario.toLowerCase()
|
||||
)
|
||||
|
||||
const conditions = _conditionAgentScenarios.map((scenario, index) => {
|
||||
return {
|
||||
output: scenario.scenario,
|
||||
isFulfilled: index === matchedScenarioIndex
|
||||
}
|
||||
})
|
||||
|
||||
// Replace the actual messages array with one that includes the file references for images instead of base64 data
|
||||
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
|
||||
messages,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
)
|
||||
|
||||
// Only add to runtime chat history if this is the first node
|
||||
const inputMessages = []
|
||||
if (!runtimeChatHistory.length) {
|
||||
if (runtimeImageMessagesWithFileRef.length) {
|
||||
inputMessages.push(...runtimeImageMessagesWithFileRef)
|
||||
}
|
||||
if (input && typeof input === 'string') {
|
||||
inputMessages.push({ role: 'user', content: question })
|
||||
}
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: { messages: messagesWithFileReferences },
|
||||
output: {
|
||||
conditions,
|
||||
content: typeof response.content === 'string' ? response.content : JSON.stringify(response.content),
|
||||
timeMetadata: {
|
||||
start: startTime,
|
||||
end: endTime,
|
||||
delta: timeDelta
|
||||
}
|
||||
},
|
||||
state,
|
||||
chatHistory: [...inputMessages]
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
} catch (error) {
|
||||
if (options.analyticHandlers && llmIds) {
|
||||
await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error))
|
||||
}
|
||||
|
||||
if (error instanceof Error && error.message === 'Aborted') {
|
||||
throw error
|
||||
}
|
||||
throw new Error(`Error in Condition Agent node: ${error instanceof Error ? error.message : String(error)}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles memory management based on the specified memory type
|
||||
*/
|
||||
private async handleMemory({
|
||||
messages,
|
||||
memoryType,
|
||||
pastChatHistory,
|
||||
runtimeChatHistory,
|
||||
llmNodeInstance,
|
||||
nodeData,
|
||||
input,
|
||||
abortController,
|
||||
options,
|
||||
modelConfig,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
}: {
|
||||
messages: BaseMessageLike[]
|
||||
memoryType: string
|
||||
pastChatHistory: BaseMessageLike[]
|
||||
runtimeChatHistory: BaseMessageLike[]
|
||||
llmNodeInstance: BaseChatModel
|
||||
nodeData: INodeData
|
||||
input: string
|
||||
abortController: AbortController
|
||||
options: ICommonObject
|
||||
modelConfig: ICommonObject
|
||||
runtimeImageMessagesWithFileRef: BaseMessageLike[]
|
||||
pastImageMessagesWithFileRef: BaseMessageLike[]
|
||||
}): Promise<void> {
|
||||
const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options)
|
||||
pastChatHistory = updatedPastMessages
|
||||
pastImageMessagesWithFileRef.push(...transformedPastMessages)
|
||||
|
||||
let pastMessages = [...pastChatHistory, ...runtimeChatHistory]
|
||||
if (!runtimeChatHistory.length) {
|
||||
/*
|
||||
* If this is the first node:
|
||||
* - Add images to messages if exist
|
||||
*/
|
||||
if (options.uploads) {
|
||||
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
|
||||
if (imageContents) {
|
||||
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
|
||||
pastMessages.push(imageMessageWithBase64)
|
||||
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options)
|
||||
pastMessages = updatedMessages
|
||||
pastImageMessagesWithFileRef.push(...transformedMessages)
|
||||
|
||||
if (pastMessages.length > 0) {
|
||||
if (memoryType === 'windowSize') {
|
||||
// Window memory: Keep the last N messages
|
||||
const windowSize = nodeData.inputs?.conditionAgentMemoryWindowSize as number
|
||||
const windowedMessages = pastMessages.slice(-windowSize * 2)
|
||||
messages.push(...windowedMessages)
|
||||
} else if (memoryType === 'conversationSummary') {
|
||||
// Summary memory: Summarize all past messages
|
||||
const summary = await llmNodeInstance.invoke(
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: DEFAULT_SUMMARIZER_TEMPLATE.replace(
|
||||
'{conversation}',
|
||||
pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
)
|
||||
}
|
||||
],
|
||||
{ signal: abortController?.signal }
|
||||
)
|
||||
messages.push({ role: 'assistant', content: summary.content as string })
|
||||
} else if (memoryType === 'conversationSummaryBuffer') {
|
||||
// Summary buffer: Summarize messages that exceed token limit
|
||||
await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController)
|
||||
} else {
|
||||
// Default: Use all messages
|
||||
messages.push(...pastMessages)
|
||||
}
|
||||
}
|
||||
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: input
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles conversation summary buffer memory type
|
||||
*/
|
||||
private async handleSummaryBuffer(
|
||||
messages: BaseMessageLike[],
|
||||
pastMessages: BaseMessageLike[],
|
||||
llmNodeInstance: BaseChatModel,
|
||||
nodeData: INodeData,
|
||||
abortController: AbortController
|
||||
): Promise<void> {
|
||||
const maxTokenLimit = (nodeData.inputs?.conditionAgentMemoryMaxTokenLimit as number) || 2000
|
||||
|
||||
// Convert past messages to a format suitable for token counting
|
||||
const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
const tokenCount = await llmNodeInstance.getNumTokens(messagesString)
|
||||
|
||||
if (tokenCount > maxTokenLimit) {
|
||||
// Calculate how many messages to summarize (messages that exceed the token limit)
|
||||
let currBufferLength = tokenCount
|
||||
const messagesToSummarize = []
|
||||
const remainingMessages = [...pastMessages]
|
||||
|
||||
// Remove messages from the beginning until we're under the token limit
|
||||
while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) {
|
||||
const poppedMessage = remainingMessages.shift()
|
||||
if (poppedMessage) {
|
||||
messagesToSummarize.push(poppedMessage)
|
||||
// Recalculate token count for remaining messages
|
||||
const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString)
|
||||
}
|
||||
}
|
||||
|
||||
// Summarize the messages that were removed
|
||||
const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
|
||||
const summary = await llmNodeInstance.invoke(
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString)
|
||||
}
|
||||
],
|
||||
{ signal: abortController?.signal }
|
||||
)
|
||||
|
||||
// Add summary as a system message at the beginning, then add remaining messages
|
||||
messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` })
|
||||
messages.push(...remainingMessages)
|
||||
} else {
|
||||
// If under token limit, use all messages
|
||||
messages.push(...pastMessages)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ConditionAgent_Agentflow }
|
||||
|
|
@ -0,0 +1,219 @@
|
|||
import { DataSource } from 'typeorm'
|
||||
import {
|
||||
ICommonObject,
|
||||
IDatabaseEntity,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeOptionsValue,
|
||||
INodeParams,
|
||||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { getVars, executeJavaScriptCode, createCodeExecutionSandbox, processTemplateVariables } from '../../../src/utils'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
interface ICustomFunctionInputVariables {
|
||||
variableName: string
|
||||
variableValue: string
|
||||
}
|
||||
|
||||
const exampleFunc = `/*
|
||||
* You can use any libraries imported in Flowise
|
||||
* You can use properties specified in Input Variables with the prefix $. For example: $foo
|
||||
* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state
|
||||
* You can get global variables: $vars.<variable-name>
|
||||
* Must return a string value at the end of function
|
||||
*/
|
||||
|
||||
const fetch = require('node-fetch');
|
||||
const url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';
|
||||
const options = {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
};
|
||||
try {
|
||||
const response = await fetch(url, options);
|
||||
const text = await response.text();
|
||||
return text;
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
return '';
|
||||
}`
|
||||
|
||||
class CustomFunction_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
hideOutput: boolean
|
||||
hint: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Custom Function'
|
||||
this.name = 'customFunctionAgentflow'
|
||||
this.version = 1.1
|
||||
this.type = 'CustomFunction'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute custom function'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#E4B7FF'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Input Variables',
|
||||
name: 'customFunctionInputVariables',
|
||||
description: 'Input variables can be used in the function with prefix $. For example: $foo',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Variable Name',
|
||||
name: 'variableName',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Variable Value',
|
||||
name: 'variableValue',
|
||||
type: 'string',
|
||||
acceptVariable: true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Javascript Function',
|
||||
name: 'customFunctionJavascriptFunction',
|
||||
type: 'code',
|
||||
codeExample: exampleFunc,
|
||||
description: 'The function to execute. Must return a string or an object that can be converted to a string.'
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'customFunctionUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const javascriptFunction = nodeData.inputs?.customFunctionJavascriptFunction as string
|
||||
const functionInputVariables = (nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]) ?? []
|
||||
const _customFunctionUpdateState = nodeData.inputs?.customFunctionUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const chatId = options.chatId as string
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined
|
||||
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, options)
|
||||
const flow = {
|
||||
input,
|
||||
state,
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
rawOutput: options.postProcessing?.rawOutput || '',
|
||||
chatHistory: options.postProcessing?.chatHistory || [],
|
||||
sourceDocuments: options.postProcessing?.sourceDocuments,
|
||||
usedTools: options.postProcessing?.usedTools,
|
||||
artifacts: options.postProcessing?.artifacts,
|
||||
fileAnnotations: options.postProcessing?.fileAnnotations
|
||||
}
|
||||
|
||||
// Create additional sandbox variables for custom function inputs
|
||||
const additionalSandbox: ICommonObject = {}
|
||||
for (const item of functionInputVariables) {
|
||||
const variableName = item.variableName
|
||||
const variableValue = item.variableValue
|
||||
additionalSandbox[`$${variableName}`] = variableValue
|
||||
}
|
||||
|
||||
const sandbox = createCodeExecutionSandbox(input, variables, flow, additionalSandbox)
|
||||
|
||||
// Setup streaming function if needed
|
||||
const streamOutput = isStreamable
|
||||
? (output: string) => {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, output)
|
||||
}
|
||||
: undefined
|
||||
|
||||
try {
|
||||
const response = await executeJavaScriptCode(javascriptFunction, sandbox, {
|
||||
libraries: ['axios'],
|
||||
streamOutput
|
||||
})
|
||||
|
||||
let finalOutput = response
|
||||
if (typeof response === 'object') {
|
||||
finalOutput = JSON.stringify(response, null, 2)
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_customFunctionUpdateState && Array.isArray(_customFunctionUpdateState) && _customFunctionUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _customFunctionUpdateState)
|
||||
}
|
||||
|
||||
newState = processTemplateVariables(newState, finalOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
inputVariables: functionInputVariables,
|
||||
code: javascriptFunction
|
||||
},
|
||||
output: {
|
||||
content: finalOutput
|
||||
},
|
||||
state: newState
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: CustomFunction_Agentflow }
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
|
||||
class DirectReply_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
hideOutput: boolean
|
||||
hint: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Direct Reply'
|
||||
this.name = 'directReplyAgentflow'
|
||||
this.version = 1.0
|
||||
this.type = 'DirectReply'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Directly reply to the user with a message'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#4DDBBB'
|
||||
this.hideOutput = true
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Message',
|
||||
name: 'directReplyMessage',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
acceptVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const directReplyMessage = nodeData.inputs?.directReplyMessage as string
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const chatId = options.chatId as string
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined
|
||||
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, directReplyMessage)
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {},
|
||||
output: {
|
||||
content: directReplyMessage
|
||||
},
|
||||
state
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: DirectReply_Agentflow }
|
||||
|
|
@ -0,0 +1,296 @@
|
|||
import {
|
||||
ICommonObject,
|
||||
IDatabaseEntity,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeOptionsValue,
|
||||
INodeParams,
|
||||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import axios, { AxiosRequestConfig } from 'axios'
|
||||
import { getCredentialData, getCredentialParam, processTemplateVariables, parseJsonBody } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
class ExecuteFlow_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Execute Flow'
|
||||
this.name = 'executeFlowAgentflow'
|
||||
this.version = 1.2
|
||||
this.type = 'ExecuteFlow'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute another flow'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#a3b18a'
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['chatflowApi'],
|
||||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Select Flow',
|
||||
name: 'executeFlowSelectedFlow',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listFlows'
|
||||
},
|
||||
{
|
||||
label: 'Input',
|
||||
name: 'executeFlowInput',
|
||||
type: 'string',
|
||||
rows: 4,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Override Config',
|
||||
name: 'executeFlowOverrideConfig',
|
||||
description: 'Override the config passed to the flow',
|
||||
type: 'json',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Base URL',
|
||||
name: 'executeFlowBaseURL',
|
||||
type: 'string',
|
||||
description:
|
||||
'Base URL to Flowise. By default, it is the URL of the incoming request. Useful when you need to execute flow through an alternative route.',
|
||||
placeholder: 'http://localhost:3000',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Return Response As',
|
||||
name: 'executeFlowReturnResponseAs',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'User Message',
|
||||
name: 'userMessage'
|
||||
},
|
||||
{
|
||||
label: 'Assistant Message',
|
||||
name: 'assistantMessage'
|
||||
}
|
||||
],
|
||||
default: 'userMessage'
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'executeFlowUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listFlows(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const returnData: INodeOptionsValue[] = []
|
||||
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
if (appDataSource === undefined || !appDataSource) {
|
||||
return returnData
|
||||
}
|
||||
|
||||
const searchOptions = options.searchOptions || {}
|
||||
const chatflows = await appDataSource.getRepository(databaseEntities['ChatFlow']).findBy(searchOptions)
|
||||
|
||||
for (let i = 0; i < chatflows.length; i += 1) {
|
||||
let cfType = 'Chatflow'
|
||||
if (chatflows[i].type === 'AGENTFLOW') {
|
||||
cfType = 'Agentflow V2'
|
||||
} else if (chatflows[i].type === 'MULTIAGENT') {
|
||||
cfType = 'Agentflow V1'
|
||||
}
|
||||
const data = {
|
||||
label: chatflows[i].name,
|
||||
name: chatflows[i].id,
|
||||
description: cfType
|
||||
} as INodeOptionsValue
|
||||
returnData.push(data)
|
||||
}
|
||||
|
||||
// order by label
|
||||
return returnData.sort((a, b) => a.label.localeCompare(b.label))
|
||||
},
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const baseURL = (nodeData.inputs?.executeFlowBaseURL as string) || (options.baseURL as string)
|
||||
const selectedFlowId = nodeData.inputs?.executeFlowSelectedFlow as string
|
||||
const flowInput = nodeData.inputs?.executeFlowInput as string
|
||||
const returnResponseAs = nodeData.inputs?.executeFlowReturnResponseAs as string
|
||||
const _executeFlowUpdateState = nodeData.inputs?.executeFlowUpdateState
|
||||
|
||||
let overrideConfig = nodeData.inputs?.executeFlowOverrideConfig
|
||||
if (typeof overrideConfig === 'string' && overrideConfig.startsWith('{') && overrideConfig.endsWith('}')) {
|
||||
try {
|
||||
overrideConfig = parseJsonBody(overrideConfig)
|
||||
} catch (parseError) {
|
||||
throw new Error(`Invalid JSON in executeFlowOverrideConfig: ${parseError.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
try {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const chatflowApiKey = getCredentialParam('chatflowApiKey', credentialData, nodeData)
|
||||
|
||||
if (selectedFlowId === options.chatflowid) throw new Error('Cannot call the same agentflow!')
|
||||
|
||||
let headers: Record<string, string> = {
|
||||
'Content-Type': 'application/json',
|
||||
'flowise-tool': 'true'
|
||||
}
|
||||
if (chatflowApiKey) headers = { ...headers, Authorization: `Bearer ${chatflowApiKey}` }
|
||||
|
||||
const finalUrl = `${baseURL}/api/v1/prediction/${selectedFlowId}`
|
||||
const requestConfig: AxiosRequestConfig = {
|
||||
method: 'POST',
|
||||
url: finalUrl,
|
||||
headers,
|
||||
data: {
|
||||
question: flowInput,
|
||||
chatId: options.chatId,
|
||||
overrideConfig
|
||||
}
|
||||
}
|
||||
|
||||
const response = await axios(requestConfig)
|
||||
|
||||
let resultText = ''
|
||||
if (response.data.text) resultText = response.data.text
|
||||
else if (response.data.json) resultText = '```json\n' + JSON.stringify(response.data.json, null, 2)
|
||||
else resultText = JSON.stringify(response.data, null, 2)
|
||||
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamTokenEvent(options.chatId, resultText)
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_executeFlowUpdateState && Array.isArray(_executeFlowUpdateState) && _executeFlowUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _executeFlowUpdateState)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, resultText)
|
||||
|
||||
// Only add to runtime chat history if this is the first node
|
||||
const inputMessages = []
|
||||
if (!runtimeChatHistory.length) {
|
||||
inputMessages.push({ role: 'user', content: flowInput })
|
||||
}
|
||||
|
||||
let returnRole = 'user'
|
||||
if (returnResponseAs === 'assistantMessage') {
|
||||
returnRole = 'assistant'
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: flowInput
|
||||
}
|
||||
]
|
||||
},
|
||||
output: {
|
||||
content: resultText
|
||||
},
|
||||
state: newState,
|
||||
chatHistory: [
|
||||
...inputMessages,
|
||||
{
|
||||
role: returnRole,
|
||||
content: resultText,
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
} catch (error) {
|
||||
console.error('ExecuteFlow Error:', error)
|
||||
|
||||
// Format error response
|
||||
const errorResponse: any = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
messages: [
|
||||
{
|
||||
role: 'user',
|
||||
content: flowInput
|
||||
}
|
||||
]
|
||||
},
|
||||
error: {
|
||||
name: error.name || 'Error',
|
||||
message: error.message || 'An error occurred during the execution of the flow'
|
||||
},
|
||||
state
|
||||
}
|
||||
|
||||
// Add more error details if available
|
||||
if (error.response) {
|
||||
errorResponse.error.status = error.response.status
|
||||
errorResponse.error.statusText = error.response.statusText
|
||||
errorResponse.error.data = error.response.data
|
||||
errorResponse.error.headers = error.response.headers
|
||||
}
|
||||
|
||||
throw new Error(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ExecuteFlow_Agentflow }
|
||||
|
|
@ -0,0 +1,380 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { AxiosRequestConfig, Method, ResponseType } from 'axios'
|
||||
import FormData from 'form-data'
|
||||
import * as querystring from 'querystring'
|
||||
import { getCredentialData, getCredentialParam, parseJsonBody } from '../../../src/utils'
|
||||
import { secureAxiosRequest } from '../../../src/httpSecurity'
|
||||
|
||||
class HTTP_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'HTTP'
|
||||
this.name = 'httpAgentflow'
|
||||
this.version = 1.1
|
||||
this.type = 'HTTP'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Send a HTTP request'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#FF7F7F'
|
||||
this.credential = {
|
||||
label: 'HTTP Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['httpBasicAuth', 'httpBearerToken', 'httpApiKey'],
|
||||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Method',
|
||||
name: 'method',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'GET',
|
||||
name: 'GET'
|
||||
},
|
||||
{
|
||||
label: 'POST',
|
||||
name: 'POST'
|
||||
},
|
||||
{
|
||||
label: 'PUT',
|
||||
name: 'PUT'
|
||||
},
|
||||
{
|
||||
label: 'DELETE',
|
||||
name: 'DELETE'
|
||||
},
|
||||
{
|
||||
label: 'PATCH',
|
||||
name: 'PATCH'
|
||||
}
|
||||
],
|
||||
default: 'GET'
|
||||
},
|
||||
{
|
||||
label: 'URL',
|
||||
name: 'url',
|
||||
type: 'string',
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Headers',
|
||||
name: 'headers',
|
||||
type: 'array',
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string',
|
||||
default: ''
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
default: '',
|
||||
acceptVariable: true
|
||||
}
|
||||
],
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Query Params',
|
||||
name: 'queryParams',
|
||||
type: 'array',
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string',
|
||||
default: ''
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
default: '',
|
||||
acceptVariable: true
|
||||
}
|
||||
],
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Body Type',
|
||||
name: 'bodyType',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'JSON',
|
||||
name: 'json'
|
||||
},
|
||||
{
|
||||
label: 'Raw',
|
||||
name: 'raw'
|
||||
},
|
||||
{
|
||||
label: 'Form Data',
|
||||
name: 'formData'
|
||||
},
|
||||
{
|
||||
label: 'x-www-form-urlencoded',
|
||||
name: 'xWwwFormUrlencoded'
|
||||
}
|
||||
],
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Body',
|
||||
name: 'body',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
rows: 4,
|
||||
show: {
|
||||
bodyType: ['raw', 'json']
|
||||
},
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Body',
|
||||
name: 'body',
|
||||
type: 'array',
|
||||
acceptVariable: true,
|
||||
show: {
|
||||
bodyType: ['xWwwFormUrlencoded', 'formData']
|
||||
},
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string',
|
||||
default: ''
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
default: '',
|
||||
acceptVariable: true
|
||||
}
|
||||
],
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Response Type',
|
||||
name: 'responseType',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'JSON',
|
||||
name: 'json'
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text'
|
||||
},
|
||||
{
|
||||
label: 'Array Buffer',
|
||||
name: 'arraybuffer'
|
||||
},
|
||||
{
|
||||
label: 'Raw (Base64)',
|
||||
name: 'base64'
|
||||
}
|
||||
],
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const method = nodeData.inputs?.method as 'GET' | 'POST' | 'PUT' | 'DELETE' | 'PATCH'
|
||||
const url = nodeData.inputs?.url as string
|
||||
const headers = nodeData.inputs?.headers as ICommonObject
|
||||
const queryParams = nodeData.inputs?.queryParams as ICommonObject
|
||||
const bodyType = nodeData.inputs?.bodyType as 'json' | 'raw' | 'formData' | 'xWwwFormUrlencoded'
|
||||
const body = nodeData.inputs?.body as ICommonObject | string | ICommonObject[]
|
||||
const responseType = nodeData.inputs?.responseType as 'json' | 'text' | 'arraybuffer' | 'base64'
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
||||
try {
|
||||
// Prepare headers
|
||||
const requestHeaders: Record<string, string> = {}
|
||||
|
||||
// Add headers from inputs
|
||||
if (headers && Array.isArray(headers)) {
|
||||
for (const header of headers) {
|
||||
if (header.key && header.value) {
|
||||
requestHeaders[header.key] = header.value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add credentials if provided
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
if (credentialData && Object.keys(credentialData).length !== 0) {
|
||||
const basicAuthUsername = getCredentialParam('basicAuthUsername', credentialData, nodeData)
|
||||
const basicAuthPassword = getCredentialParam('basicAuthPassword', credentialData, nodeData)
|
||||
const bearerToken = getCredentialParam('token', credentialData, nodeData)
|
||||
const apiKeyName = getCredentialParam('key', credentialData, nodeData)
|
||||
const apiKeyValue = getCredentialParam('value', credentialData, nodeData)
|
||||
|
||||
// Determine which type of auth to use based on available credentials
|
||||
if (basicAuthUsername || basicAuthPassword) {
|
||||
// Basic Auth
|
||||
const auth = Buffer.from(`${basicAuthUsername}:${basicAuthPassword}`).toString('base64')
|
||||
requestHeaders['Authorization'] = `Basic ${auth}`
|
||||
} else if (bearerToken) {
|
||||
// Bearer Token
|
||||
requestHeaders['Authorization'] = `Bearer ${bearerToken}`
|
||||
} else if (apiKeyName && apiKeyValue) {
|
||||
// API Key in header
|
||||
requestHeaders[apiKeyName] = apiKeyValue
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare query parameters
|
||||
let queryString = ''
|
||||
if (queryParams && Array.isArray(queryParams)) {
|
||||
const params = new URLSearchParams()
|
||||
for (const param of queryParams) {
|
||||
if (param.key && param.value) {
|
||||
params.append(param.key, param.value)
|
||||
}
|
||||
}
|
||||
queryString = params.toString()
|
||||
}
|
||||
|
||||
// Build final URL with query parameters
|
||||
const finalUrl = queryString ? `${url}${url.includes('?') ? '&' : '?'}${queryString}` : url
|
||||
|
||||
// Prepare request config
|
||||
const requestConfig: AxiosRequestConfig = {
|
||||
method: method as Method,
|
||||
url: finalUrl,
|
||||
headers: requestHeaders,
|
||||
responseType: (responseType || 'json') as ResponseType
|
||||
}
|
||||
|
||||
// Handle request body based on body type
|
||||
if (method !== 'GET' && body) {
|
||||
switch (bodyType) {
|
||||
case 'json': {
|
||||
requestConfig.data = typeof body === 'string' ? parseJsonBody(body) : body
|
||||
requestHeaders['Content-Type'] = 'application/json'
|
||||
break
|
||||
}
|
||||
case 'raw':
|
||||
requestConfig.data = body
|
||||
break
|
||||
case 'formData': {
|
||||
const formData = new FormData()
|
||||
if (Array.isArray(body) && body.length > 0) {
|
||||
for (const item of body) {
|
||||
formData.append(item.key, item.value)
|
||||
}
|
||||
}
|
||||
requestConfig.data = formData
|
||||
break
|
||||
}
|
||||
case 'xWwwFormUrlencoded':
|
||||
requestConfig.data = querystring.stringify(typeof body === 'string' ? parseJsonBody(body) : body)
|
||||
requestHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make the secure HTTP request that validates all URLs in redirect chains
|
||||
const response = await secureAxiosRequest(requestConfig)
|
||||
|
||||
// Process response based on response type
|
||||
let responseData
|
||||
if (responseType === 'base64' && response.data) {
|
||||
responseData = Buffer.from(response.data, 'binary').toString('base64')
|
||||
} else {
|
||||
responseData = response.data
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
http: {
|
||||
method,
|
||||
url,
|
||||
headers,
|
||||
queryParams,
|
||||
bodyType,
|
||||
body,
|
||||
responseType
|
||||
}
|
||||
},
|
||||
output: {
|
||||
http: {
|
||||
data: responseData,
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
headers: response.headers
|
||||
}
|
||||
},
|
||||
state
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
} catch (error) {
|
||||
console.error('HTTP Request Error:', error)
|
||||
|
||||
const errorMessage =
|
||||
error.response?.data?.message || error.response?.data?.error || error.message || 'An error occurred during the HTTP request'
|
||||
|
||||
// Format error response
|
||||
const errorResponse: any = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
http: {
|
||||
method,
|
||||
url,
|
||||
headers,
|
||||
queryParams,
|
||||
bodyType,
|
||||
body,
|
||||
responseType
|
||||
}
|
||||
},
|
||||
error: {
|
||||
name: error.name || 'Error',
|
||||
message: errorMessage
|
||||
},
|
||||
state
|
||||
}
|
||||
|
||||
// Add more error details if available
|
||||
if (error.response) {
|
||||
errorResponse.error.status = error.response.status
|
||||
errorResponse.error.statusText = error.response.statusText
|
||||
errorResponse.error.data = error.response.data
|
||||
errorResponse.error.headers = error.response.headers
|
||||
}
|
||||
|
||||
throw new Error(errorMessage)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: HTTP_Agentflow }
|
||||
|
|
@ -0,0 +1,274 @@
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import {
|
||||
ICommonObject,
|
||||
ICondition,
|
||||
IHumanInput,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeOptionsValue,
|
||||
INodeOutputsValue,
|
||||
INodeParams,
|
||||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike } from '@langchain/core/messages'
|
||||
import { DEFAULT_HUMAN_INPUT_DESCRIPTION, DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML } from '../prompt'
|
||||
|
||||
class HumanInput_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Human Input'
|
||||
this.name = 'humanInputAgentflow'
|
||||
this.version = 1.0
|
||||
this.type = 'HumanInput'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Request human input, approval or rejection during execution'
|
||||
this.color = '#6E6EFD'
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Description Type',
|
||||
name: 'humanInputDescriptionType',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Fixed',
|
||||
name: 'fixed',
|
||||
description: 'Specify a fixed description'
|
||||
},
|
||||
{
|
||||
label: 'Dynamic',
|
||||
name: 'dynamic',
|
||||
description: 'Use LLM to generate a description'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Description',
|
||||
name: 'humanInputDescription',
|
||||
type: 'string',
|
||||
placeholder: 'Are you sure you want to proceed?',
|
||||
acceptVariable: true,
|
||||
rows: 4,
|
||||
show: {
|
||||
humanInputDescriptionType: 'fixed'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'humanInputModel',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
loadConfig: true,
|
||||
show: {
|
||||
humanInputDescriptionType: 'dynamic'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Prompt',
|
||||
name: 'humanInputModelPrompt',
|
||||
type: 'string',
|
||||
default: DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML,
|
||||
acceptVariable: true,
|
||||
generateInstruction: true,
|
||||
rows: 4,
|
||||
show: {
|
||||
humanInputDescriptionType: 'dynamic'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Enable Feedback',
|
||||
name: 'humanInputEnableFeedback',
|
||||
type: 'boolean',
|
||||
default: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Proceed',
|
||||
name: 'proceed'
|
||||
},
|
||||
{
|
||||
label: 'Reject',
|
||||
name: 'reject'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const componentNodes = options.componentNodes as {
|
||||
[key: string]: INode
|
||||
}
|
||||
|
||||
const returnOptions: INodeOptionsValue[] = []
|
||||
for (const nodeName in componentNodes) {
|
||||
const componentNode = componentNodes[nodeName]
|
||||
if (componentNode.category === 'Chat Models') {
|
||||
if (componentNode.tags?.includes('LlamaIndex')) {
|
||||
continue
|
||||
}
|
||||
returnOptions.push({
|
||||
label: componentNode.label,
|
||||
name: nodeName,
|
||||
imageSrc: componentNode.icon
|
||||
})
|
||||
}
|
||||
}
|
||||
return returnOptions
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const _humanInput = nodeData.inputs?.humanInput
|
||||
const humanInput: IHumanInput = typeof _humanInput === 'string' ? JSON.parse(_humanInput) : _humanInput
|
||||
|
||||
const humanInputEnableFeedback = nodeData.inputs?.humanInputEnableFeedback as boolean
|
||||
let humanInputDescriptionType = nodeData.inputs?.humanInputDescriptionType as string
|
||||
const model = nodeData.inputs?.humanInputModel as string
|
||||
const modelConfig = nodeData.inputs?.humanInputModelConfig as ICommonObject
|
||||
const _humanInputModelPrompt = nodeData.inputs?.humanInputModelPrompt as string
|
||||
const humanInputModelPrompt = _humanInputModelPrompt ? _humanInputModelPrompt : DEFAULT_HUMAN_INPUT_DESCRIPTION
|
||||
|
||||
// Extract runtime state and history
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
|
||||
const chatId = options.chatId as string
|
||||
const isStreamable = options.sseStreamer !== undefined
|
||||
|
||||
if (humanInput) {
|
||||
const outcomes: Partial<ICondition>[] & Partial<IHumanInput>[] = [
|
||||
{
|
||||
type: 'proceed',
|
||||
startNodeId: humanInput?.startNodeId,
|
||||
feedback: humanInputEnableFeedback && humanInput?.feedback ? humanInput.feedback : undefined,
|
||||
isFulfilled: false
|
||||
},
|
||||
{
|
||||
type: 'reject',
|
||||
startNodeId: humanInput?.startNodeId,
|
||||
feedback: humanInputEnableFeedback && humanInput?.feedback ? humanInput.feedback : undefined,
|
||||
isFulfilled: false
|
||||
}
|
||||
]
|
||||
|
||||
// Only one outcome can be fulfilled at a time
|
||||
switch (humanInput?.type) {
|
||||
case 'proceed':
|
||||
outcomes[0].isFulfilled = true
|
||||
break
|
||||
case 'reject':
|
||||
outcomes[1].isFulfilled = true
|
||||
break
|
||||
}
|
||||
|
||||
const messages = [
|
||||
...pastChatHistory,
|
||||
...runtimeChatHistory,
|
||||
{
|
||||
role: 'user',
|
||||
content: humanInput.feedback || humanInput.type
|
||||
}
|
||||
]
|
||||
const input = { ...humanInput, messages }
|
||||
const output = { conditions: outcomes }
|
||||
|
||||
const nodeOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input,
|
||||
output,
|
||||
state
|
||||
}
|
||||
|
||||
if (humanInput.feedback) {
|
||||
;(nodeOutput as any).chatHistory = [{ role: 'user', content: humanInput.feedback }]
|
||||
}
|
||||
|
||||
return nodeOutput
|
||||
} else {
|
||||
let humanInputDescription = ''
|
||||
|
||||
if (humanInputDescriptionType === 'fixed') {
|
||||
humanInputDescription = (nodeData.inputs?.humanInputDescription as string) || 'Do you want to proceed?'
|
||||
const messages = [...pastChatHistory, ...runtimeChatHistory]
|
||||
// Find the last message in the messages array
|
||||
const lastMessage = messages.length > 0 ? (messages[messages.length - 1] as any).content || '' : ''
|
||||
humanInputDescription = `${lastMessage}\n\n${humanInputDescription}`
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, humanInputDescription)
|
||||
}
|
||||
} else {
|
||||
if (model && modelConfig) {
|
||||
const nodeInstanceFilePath = options.componentNodes[model].filePath as string
|
||||
const nodeModule = await import(nodeInstanceFilePath)
|
||||
const newNodeInstance = new nodeModule.nodeClass()
|
||||
const newNodeData = {
|
||||
...nodeData,
|
||||
credential: modelConfig['FLOWISE_CREDENTIAL_ID'],
|
||||
inputs: {
|
||||
...nodeData.inputs,
|
||||
...modelConfig
|
||||
}
|
||||
}
|
||||
const llmNodeInstance = (await newNodeInstance.init(newNodeData, '', options)) as BaseChatModel
|
||||
const messages = [
|
||||
...pastChatHistory,
|
||||
...runtimeChatHistory,
|
||||
{
|
||||
role: 'user',
|
||||
content: humanInputModelPrompt || DEFAULT_HUMAN_INPUT_DESCRIPTION
|
||||
}
|
||||
]
|
||||
|
||||
let response: AIMessageChunk = new AIMessageChunk('')
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
for await (const chunk of await llmNodeInstance.stream(messages)) {
|
||||
const content = typeof chunk === 'string' ? chunk : chunk.content.toString()
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
}
|
||||
humanInputDescription = response.content as string
|
||||
} else {
|
||||
const response = await llmNodeInstance.invoke(messages)
|
||||
humanInputDescription = response.content as string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const input = { messages: [...pastChatHistory, ...runtimeChatHistory], humanInputEnableFeedback }
|
||||
const output = { content: humanInputDescription }
|
||||
const nodeOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input,
|
||||
output,
|
||||
state,
|
||||
chatHistory: [{ role: 'assistant', content: humanInputDescription }]
|
||||
}
|
||||
|
||||
return nodeOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: HumanInput_Agentflow }
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
export interface ILLMMessage {
|
||||
role: 'system' | 'assistant' | 'user' | 'tool' | 'developer'
|
||||
content: string
|
||||
}
|
||||
|
||||
export interface IStructuredOutput {
|
||||
key: string
|
||||
type: 'string' | 'stringArray' | 'number' | 'boolean' | 'enum' | 'jsonArray'
|
||||
enumValues?: string
|
||||
description?: string
|
||||
jsonSchema?: string
|
||||
}
|
||||
|
||||
export interface IFlowState {
|
||||
key: string
|
||||
value: string
|
||||
}
|
||||
|
|
@ -0,0 +1,75 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { parseJsonBody } from '../../../src/utils'
|
||||
|
||||
class Iteration_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Iteration'
|
||||
this.name = 'iterationAgentflow'
|
||||
this.version = 1.0
|
||||
this.type = 'Iteration'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute the nodes within the iteration block through N iterations'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#9C89B8'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Array Input',
|
||||
name: 'iterationInput',
|
||||
type: 'string',
|
||||
description: 'The input array to iterate over',
|
||||
acceptVariable: true,
|
||||
rows: 4
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const iterationInput = nodeData.inputs?.iterationInput
|
||||
|
||||
// Helper function to clean JSON strings with redundant backslashes
|
||||
const safeParseJson = (str: string): string => {
|
||||
try {
|
||||
return parseJsonBody(str)
|
||||
} catch {
|
||||
// Try parsing after cleaning
|
||||
return parseJsonBody(str.replace(/\\(["'[\]{}])/g, '$1'))
|
||||
}
|
||||
}
|
||||
|
||||
const iterationInputArray =
|
||||
typeof iterationInput === 'string' && iterationInput !== '' ? safeParseJson(iterationInput) : iterationInput
|
||||
|
||||
if (!iterationInputArray || !Array.isArray(iterationInputArray)) {
|
||||
throw new Error('Invalid input array')
|
||||
}
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
iterationInput: iterationInputArray
|
||||
},
|
||||
output: {},
|
||||
state
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Iteration_Agentflow }
|
||||
|
|
@ -0,0 +1,949 @@
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ICommonObject, IMessage, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages'
|
||||
import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { ILLMMessage } from '../Interface.Agentflow'
|
||||
import {
|
||||
addImageArtifactsToMessages,
|
||||
extractArtifactsFromResponse,
|
||||
getPastChatHistoryImageMessages,
|
||||
getUniqueImageMessages,
|
||||
processMessagesWithImages,
|
||||
replaceBase64ImagesWithFileReferences,
|
||||
replaceInlineDataWithFileReferences,
|
||||
updateFlowState
|
||||
} from '../utils'
|
||||
import { processTemplateVariables, configureStructuredOutput } from '../../../src/utils'
|
||||
import { flatten } from 'lodash'
|
||||
|
||||
class LLM_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'LLM'
|
||||
this.name = 'llmAgentflow'
|
||||
this.version = 1.1
|
||||
this.type = 'LLM'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Large language models to analyze user-provided inputs and generate responses'
|
||||
this.color = '#64B5F6'
|
||||
this.baseClasses = [this.type]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Model',
|
||||
name: 'llmModel',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listModels',
|
||||
loadConfig: true
|
||||
},
|
||||
{
|
||||
label: 'Messages',
|
||||
name: 'llmMessages',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Role',
|
||||
name: 'role',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'System',
|
||||
name: 'system'
|
||||
},
|
||||
{
|
||||
label: 'Assistant',
|
||||
name: 'assistant'
|
||||
},
|
||||
{
|
||||
label: 'Developer',
|
||||
name: 'developer'
|
||||
},
|
||||
{
|
||||
label: 'User',
|
||||
name: 'user'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Content',
|
||||
name: 'content',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
generateInstruction: true,
|
||||
rows: 4
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Enable Memory',
|
||||
name: 'llmEnableMemory',
|
||||
type: 'boolean',
|
||||
description: 'Enable memory for the conversation thread',
|
||||
default: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Memory Type',
|
||||
name: 'llmMemoryType',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'All Messages',
|
||||
name: 'allMessages',
|
||||
description: 'Retrieve all messages from the conversation'
|
||||
},
|
||||
{
|
||||
label: 'Window Size',
|
||||
name: 'windowSize',
|
||||
description: 'Uses a fixed window size to surface the last N messages'
|
||||
},
|
||||
{
|
||||
label: 'Conversation Summary',
|
||||
name: 'conversationSummary',
|
||||
description: 'Summarizes the whole conversation'
|
||||
},
|
||||
{
|
||||
label: 'Conversation Summary Buffer',
|
||||
name: 'conversationSummaryBuffer',
|
||||
description: 'Summarize conversations once token limit is reached. Default to 2000'
|
||||
}
|
||||
],
|
||||
optional: true,
|
||||
default: 'allMessages',
|
||||
show: {
|
||||
llmEnableMemory: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Window Size',
|
||||
name: 'llmMemoryWindowSize',
|
||||
type: 'number',
|
||||
default: '20',
|
||||
description: 'Uses a fixed window size to surface the last N messages',
|
||||
show: {
|
||||
llmMemoryType: 'windowSize'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Max Token Limit',
|
||||
name: 'llmMemoryMaxTokenLimit',
|
||||
type: 'number',
|
||||
default: '2000',
|
||||
description: 'Summarize conversations once token limit is reached. Default to 2000',
|
||||
show: {
|
||||
llmMemoryType: 'conversationSummaryBuffer'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Input Message',
|
||||
name: 'llmUserMessage',
|
||||
type: 'string',
|
||||
description: 'Add an input message as user message at the end of the conversation',
|
||||
rows: 4,
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
show: {
|
||||
llmEnableMemory: true
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Return Response As',
|
||||
name: 'llmReturnResponseAs',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'User Message',
|
||||
name: 'userMessage'
|
||||
},
|
||||
{
|
||||
label: 'Assistant Message',
|
||||
name: 'assistantMessage'
|
||||
}
|
||||
],
|
||||
default: 'userMessage'
|
||||
},
|
||||
{
|
||||
label: 'JSON Structured Output',
|
||||
name: 'llmStructuredOutput',
|
||||
description: 'Instruct the LLM to give output in a JSON structured schema',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Type',
|
||||
name: 'type',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'String',
|
||||
name: 'string'
|
||||
},
|
||||
{
|
||||
label: 'String Array',
|
||||
name: 'stringArray'
|
||||
},
|
||||
{
|
||||
label: 'Number',
|
||||
name: 'number'
|
||||
},
|
||||
{
|
||||
label: 'Boolean',
|
||||
name: 'boolean'
|
||||
},
|
||||
{
|
||||
label: 'Enum',
|
||||
name: 'enum'
|
||||
},
|
||||
{
|
||||
label: 'JSON Array',
|
||||
name: 'jsonArray'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Enum Values',
|
||||
name: 'enumValues',
|
||||
type: 'string',
|
||||
placeholder: 'value1, value2, value3',
|
||||
description: 'Enum values. Separated by comma',
|
||||
optional: true,
|
||||
show: {
|
||||
'llmStructuredOutput[$index].type': 'enum'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'JSON Schema',
|
||||
name: 'jsonSchema',
|
||||
type: 'code',
|
||||
placeholder: `{
|
||||
"answer": {
|
||||
"type": "string",
|
||||
"description": "Value of the answer"
|
||||
},
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Reason for the answer"
|
||||
},
|
||||
"optional": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"count": {
|
||||
"type": "number"
|
||||
},
|
||||
"children": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"value": {
|
||||
"type": "string",
|
||||
"description": "Value of the children's answer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
description: 'JSON schema for the structured output',
|
||||
optional: true,
|
||||
hideCodeExecute: true,
|
||||
show: {
|
||||
'llmStructuredOutput[$index].type': 'jsonArray'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Description',
|
||||
name: 'description',
|
||||
type: 'string',
|
||||
placeholder: 'Description of the key'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'llmUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listModels(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const componentNodes = options.componentNodes as {
|
||||
[key: string]: INode
|
||||
}
|
||||
|
||||
const returnOptions: INodeOptionsValue[] = []
|
||||
for (const nodeName in componentNodes) {
|
||||
const componentNode = componentNodes[nodeName]
|
||||
if (componentNode.category === 'Chat Models') {
|
||||
if (componentNode.tags?.includes('LlamaIndex')) {
|
||||
continue
|
||||
}
|
||||
returnOptions.push({
|
||||
label: componentNode.label,
|
||||
name: nodeName,
|
||||
imageSrc: componentNode.icon
|
||||
})
|
||||
}
|
||||
}
|
||||
return returnOptions
|
||||
},
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string | Record<string, any>, options: ICommonObject): Promise<any> {
|
||||
let llmIds: ICommonObject | undefined
|
||||
let analyticHandlers = options.analyticHandlers as AnalyticHandler
|
||||
|
||||
try {
|
||||
const abortController = options.abortController as AbortController
|
||||
|
||||
// Extract input parameters
|
||||
const model = nodeData.inputs?.llmModel as string
|
||||
const modelConfig = nodeData.inputs?.llmModelConfig as ICommonObject
|
||||
if (!model) {
|
||||
throw new Error('Model is required')
|
||||
}
|
||||
|
||||
// Extract memory and configuration options
|
||||
const enableMemory = nodeData.inputs?.llmEnableMemory as boolean
|
||||
const memoryType = nodeData.inputs?.llmMemoryType as string
|
||||
const userMessage = nodeData.inputs?.llmUserMessage as string
|
||||
const _llmUpdateState = nodeData.inputs?.llmUpdateState
|
||||
const _llmStructuredOutput = nodeData.inputs?.llmStructuredOutput
|
||||
const llmMessages = (nodeData.inputs?.llmMessages as unknown as ILLMMessage[]) ?? []
|
||||
|
||||
// Extract runtime state and history
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const pastChatHistory = (options.pastChatHistory as BaseMessageLike[]) ?? []
|
||||
const runtimeChatHistory = (options.agentflowRuntime?.chatHistory as BaseMessageLike[]) ?? []
|
||||
const prependedChatHistory = options.prependedChatHistory as IMessage[]
|
||||
const chatId = options.chatId as string
|
||||
|
||||
// Initialize the LLM model instance
|
||||
const nodeInstanceFilePath = options.componentNodes[model].filePath as string
|
||||
const nodeModule = await import(nodeInstanceFilePath)
|
||||
const newLLMNodeInstance = new nodeModule.nodeClass()
|
||||
const newNodeData = {
|
||||
...nodeData,
|
||||
credential: modelConfig['FLOWISE_CREDENTIAL_ID'],
|
||||
inputs: {
|
||||
...nodeData.inputs,
|
||||
...modelConfig
|
||||
}
|
||||
}
|
||||
let llmNodeInstance = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel
|
||||
|
||||
// Prepare messages array
|
||||
const messages: BaseMessageLike[] = []
|
||||
// Use to store messages with image file references as we do not want to store the base64 data into database
|
||||
let runtimeImageMessagesWithFileRef: BaseMessageLike[] = []
|
||||
// Use to keep track of past messages with image file references
|
||||
let pastImageMessagesWithFileRef: BaseMessageLike[] = []
|
||||
|
||||
// Prepend history ONLY if it is the first node
|
||||
if (prependedChatHistory.length > 0 && !runtimeChatHistory.length) {
|
||||
for (const msg of prependedChatHistory) {
|
||||
const role: string = msg.role === 'apiMessage' ? 'assistant' : 'user'
|
||||
const content: string = msg.content ?? ''
|
||||
messages.push({
|
||||
role,
|
||||
content
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for (const msg of llmMessages) {
|
||||
const role = msg.role
|
||||
const content = msg.content
|
||||
if (role && content) {
|
||||
if (role === 'system') {
|
||||
messages.unshift({ role, content })
|
||||
} else {
|
||||
messages.push({ role, content })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle memory management if enabled
|
||||
if (enableMemory) {
|
||||
await this.handleMemory({
|
||||
messages,
|
||||
memoryType,
|
||||
pastChatHistory,
|
||||
runtimeChatHistory,
|
||||
llmNodeInstance,
|
||||
nodeData,
|
||||
userMessage,
|
||||
input,
|
||||
abortController,
|
||||
options,
|
||||
modelConfig,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
})
|
||||
} else if (!runtimeChatHistory.length) {
|
||||
/*
|
||||
* If this is the first node:
|
||||
* - Add images to messages if exist
|
||||
* - Add user message if it does not exist in the llmMessages array
|
||||
*/
|
||||
if (options.uploads) {
|
||||
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
|
||||
if (imageContents) {
|
||||
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
|
||||
messages.push(imageMessageWithBase64)
|
||||
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
|
||||
}
|
||||
}
|
||||
|
||||
if (input && typeof input === 'string' && !llmMessages.some((msg) => msg.role === 'user')) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: input
|
||||
})
|
||||
}
|
||||
}
|
||||
delete nodeData.inputs?.llmMessages
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant responses as user messages
|
||||
* Images are converted from FILE-STORAGE::<image_path> to base 64 image_url format
|
||||
*/
|
||||
await addImageArtifactsToMessages(messages, options)
|
||||
|
||||
// Configure structured output if specified
|
||||
const isStructuredOutput = _llmStructuredOutput && Array.isArray(_llmStructuredOutput) && _llmStructuredOutput.length > 0
|
||||
if (isStructuredOutput) {
|
||||
llmNodeInstance = configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
}
|
||||
|
||||
// Initialize response and determine if streaming is possible
|
||||
let response: AIMessageChunk = new AIMessageChunk('')
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined && modelConfig?.streaming !== false && !isStructuredOutput
|
||||
|
||||
// Start analytics
|
||||
if (analyticHandlers && options.parentTraceIds) {
|
||||
const llmLabel = options?.componentNodes?.[model]?.label || model
|
||||
llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds)
|
||||
}
|
||||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
/*
|
||||
* Invoke LLM
|
||||
*/
|
||||
if (isStreamable) {
|
||||
response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
|
||||
|
||||
// Stream whole response back to UI if this is the last node
|
||||
if (isLastNode && options.sseStreamer) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
let finalResponse = ''
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, finalResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate execution time
|
||||
const endTime = Date.now()
|
||||
const timeDelta = endTime - startTime
|
||||
|
||||
// Extract artifacts and file annotations from response metadata
|
||||
let artifacts: any[] = []
|
||||
let fileAnnotations: any[] = []
|
||||
if (response.response_metadata) {
|
||||
const {
|
||||
artifacts: extractedArtifacts,
|
||||
fileAnnotations: extractedFileAnnotations,
|
||||
savedInlineImages
|
||||
} = await extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
|
||||
|
||||
if (extractedArtifacts.length > 0) {
|
||||
artifacts = extractedArtifacts
|
||||
|
||||
// Stream artifacts if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamArtifactsEvent(chatId, artifacts)
|
||||
}
|
||||
}
|
||||
|
||||
if (extractedFileAnnotations.length > 0) {
|
||||
fileAnnotations = extractedFileAnnotations
|
||||
|
||||
// Stream file annotations if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
}
|
||||
|
||||
// Replace inlineData base64 with file references in the response
|
||||
if (savedInlineImages && savedInlineImages.length > 0) {
|
||||
replaceInlineDataWithFileReferences(response, savedInlineImages)
|
||||
}
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_llmUpdateState && Array.isArray(_llmUpdateState) && _llmUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _llmUpdateState)
|
||||
}
|
||||
|
||||
// Clean up empty inputs
|
||||
for (const key in nodeData.inputs) {
|
||||
if (nodeData.inputs[key] === '') {
|
||||
delete nodeData.inputs[key]
|
||||
}
|
||||
}
|
||||
|
||||
// Prepare final response and output object
|
||||
let finalResponse = ''
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else if (response.content === '') {
|
||||
// Empty response content, this could happen when there is only image data
|
||||
finalResponse = ''
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
const output = this.prepareOutputObject(
|
||||
response,
|
||||
finalResponse,
|
||||
startTime,
|
||||
endTime,
|
||||
timeDelta,
|
||||
isStructuredOutput,
|
||||
artifacts,
|
||||
fileAnnotations
|
||||
)
|
||||
|
||||
// End analytics tracking
|
||||
if (analyticHandlers && llmIds) {
|
||||
await analyticHandlers.onLLMEnd(llmIds, finalResponse)
|
||||
}
|
||||
|
||||
// Send additional streaming events if needed
|
||||
if (isStreamable) {
|
||||
this.sendStreamingEvents(options, chatId, response)
|
||||
}
|
||||
|
||||
// Stream file annotations if any were extracted
|
||||
if (fileAnnotations.length > 0 && isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, finalResponse)
|
||||
|
||||
/**
|
||||
* Remove the temporarily added image artifact messages before storing
|
||||
* This is to avoid storing the actual base64 data into database
|
||||
*/
|
||||
const messagesToStore = messages.filter((msg: any) => !msg._isTemporaryImageMessage)
|
||||
|
||||
// Replace the actual messages array with one that includes the file references for images instead of base64 data
|
||||
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
|
||||
messagesToStore,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
)
|
||||
|
||||
// Only add to runtime chat history if this is the first node
|
||||
const inputMessages = []
|
||||
if (!runtimeChatHistory.length) {
|
||||
if (runtimeImageMessagesWithFileRef.length) {
|
||||
inputMessages.push(...runtimeImageMessagesWithFileRef)
|
||||
}
|
||||
if (input && typeof input === 'string') {
|
||||
if (!enableMemory) {
|
||||
if (!llmMessages.some((msg) => msg.role === 'user')) {
|
||||
inputMessages.push({ role: 'user', content: input })
|
||||
} else {
|
||||
llmMessages.map((msg) => {
|
||||
if (msg.role === 'user') {
|
||||
inputMessages.push({ role: 'user', content: msg.content })
|
||||
}
|
||||
})
|
||||
}
|
||||
} else {
|
||||
inputMessages.push({ role: 'user', content: input })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const returnResponseAs = nodeData.inputs?.llmReturnResponseAs as string
|
||||
let returnRole = 'user'
|
||||
if (returnResponseAs === 'assistantMessage') {
|
||||
returnRole = 'assistant'
|
||||
}
|
||||
|
||||
// Prepare and return the final output
|
||||
return {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
messages: messagesWithFileReferences,
|
||||
...nodeData.inputs
|
||||
},
|
||||
output,
|
||||
state: newState,
|
||||
chatHistory: [
|
||||
...inputMessages,
|
||||
|
||||
// LLM response
|
||||
{
|
||||
role: returnRole,
|
||||
content: finalResponse,
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id,
|
||||
...(((artifacts && artifacts.length > 0) || (fileAnnotations && fileAnnotations.length > 0)) && {
|
||||
additional_kwargs: {
|
||||
...(artifacts && artifacts.length > 0 && { artifacts }),
|
||||
...(fileAnnotations && fileAnnotations.length > 0 && { fileAnnotations })
|
||||
}
|
||||
})
|
||||
}
|
||||
]
|
||||
}
|
||||
} catch (error) {
|
||||
if (options.analyticHandlers && llmIds) {
|
||||
await options.analyticHandlers.onLLMError(llmIds, error instanceof Error ? error.message : String(error))
|
||||
}
|
||||
|
||||
if (error instanceof Error && error.message === 'Aborted') {
|
||||
throw error
|
||||
}
|
||||
throw new Error(`Error in LLM node: ${error instanceof Error ? error.message : String(error)}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles memory management based on the specified memory type
|
||||
*/
|
||||
private async handleMemory({
|
||||
messages,
|
||||
memoryType,
|
||||
pastChatHistory,
|
||||
runtimeChatHistory,
|
||||
llmNodeInstance,
|
||||
nodeData,
|
||||
userMessage,
|
||||
input,
|
||||
abortController,
|
||||
options,
|
||||
modelConfig,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
}: {
|
||||
messages: BaseMessageLike[]
|
||||
memoryType: string
|
||||
pastChatHistory: BaseMessageLike[]
|
||||
runtimeChatHistory: BaseMessageLike[]
|
||||
llmNodeInstance: BaseChatModel
|
||||
nodeData: INodeData
|
||||
userMessage: string
|
||||
input: string | Record<string, any>
|
||||
abortController: AbortController
|
||||
options: ICommonObject
|
||||
modelConfig: ICommonObject
|
||||
runtimeImageMessagesWithFileRef: BaseMessageLike[]
|
||||
pastImageMessagesWithFileRef: BaseMessageLike[]
|
||||
}): Promise<void> {
|
||||
const { updatedPastMessages, transformedPastMessages } = await getPastChatHistoryImageMessages(pastChatHistory, options)
|
||||
pastChatHistory = updatedPastMessages
|
||||
pastImageMessagesWithFileRef.push(...transformedPastMessages)
|
||||
|
||||
let pastMessages = [...pastChatHistory, ...runtimeChatHistory]
|
||||
if (!runtimeChatHistory.length && input && typeof input === 'string') {
|
||||
/*
|
||||
* If this is the first node:
|
||||
* - Add images to messages if exist
|
||||
* - Add user message
|
||||
*/
|
||||
if (options.uploads) {
|
||||
const imageContents = await getUniqueImageMessages(options, messages, modelConfig)
|
||||
if (imageContents) {
|
||||
const { imageMessageWithBase64, imageMessageWithFileRef } = imageContents
|
||||
pastMessages.push(imageMessageWithBase64)
|
||||
runtimeImageMessagesWithFileRef.push(imageMessageWithFileRef)
|
||||
}
|
||||
}
|
||||
pastMessages.push({
|
||||
role: 'user',
|
||||
content: input
|
||||
})
|
||||
}
|
||||
const { updatedMessages, transformedMessages } = await processMessagesWithImages(pastMessages, options)
|
||||
pastMessages = updatedMessages
|
||||
pastImageMessagesWithFileRef.push(...transformedMessages)
|
||||
|
||||
if (pastMessages.length > 0) {
|
||||
if (memoryType === 'windowSize') {
|
||||
// Window memory: Keep the last N messages
|
||||
const windowSize = nodeData.inputs?.llmMemoryWindowSize as number
|
||||
const windowedMessages = pastMessages.slice(-windowSize * 2)
|
||||
messages.push(...windowedMessages)
|
||||
} else if (memoryType === 'conversationSummary') {
|
||||
// Summary memory: Summarize all past messages
|
||||
const summary = await llmNodeInstance.invoke(
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: DEFAULT_SUMMARIZER_TEMPLATE.replace(
|
||||
'{conversation}',
|
||||
pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
)
|
||||
}
|
||||
],
|
||||
{ signal: abortController?.signal }
|
||||
)
|
||||
messages.push({ role: 'assistant', content: summary.content as string })
|
||||
} else if (memoryType === 'conversationSummaryBuffer') {
|
||||
// Summary buffer: Summarize messages that exceed token limit
|
||||
await this.handleSummaryBuffer(messages, pastMessages, llmNodeInstance, nodeData, abortController)
|
||||
} else {
|
||||
// Default: Use all messages
|
||||
messages.push(...pastMessages)
|
||||
}
|
||||
}
|
||||
|
||||
// Add user message
|
||||
if (userMessage) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: userMessage
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles conversation summary buffer memory type
|
||||
*/
|
||||
private async handleSummaryBuffer(
|
||||
messages: BaseMessageLike[],
|
||||
pastMessages: BaseMessageLike[],
|
||||
llmNodeInstance: BaseChatModel,
|
||||
nodeData: INodeData,
|
||||
abortController: AbortController
|
||||
): Promise<void> {
|
||||
const maxTokenLimit = (nodeData.inputs?.llmMemoryMaxTokenLimit as number) || 2000
|
||||
|
||||
// Convert past messages to a format suitable for token counting
|
||||
const messagesString = pastMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
const tokenCount = await llmNodeInstance.getNumTokens(messagesString)
|
||||
|
||||
if (tokenCount > maxTokenLimit) {
|
||||
// Calculate how many messages to summarize (messages that exceed the token limit)
|
||||
let currBufferLength = tokenCount
|
||||
const messagesToSummarize = []
|
||||
const remainingMessages = [...pastMessages]
|
||||
|
||||
// Remove messages from the beginning until we're under the token limit
|
||||
while (currBufferLength > maxTokenLimit && remainingMessages.length > 0) {
|
||||
const poppedMessage = remainingMessages.shift()
|
||||
if (poppedMessage) {
|
||||
messagesToSummarize.push(poppedMessage)
|
||||
// Recalculate token count for remaining messages
|
||||
const remainingMessagesString = remainingMessages.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
currBufferLength = await llmNodeInstance.getNumTokens(remainingMessagesString)
|
||||
}
|
||||
}
|
||||
|
||||
// Summarize the messages that were removed
|
||||
const messagesToSummarizeString = messagesToSummarize.map((msg: any) => `${msg.role}: ${msg.content}`).join('\n')
|
||||
|
||||
const summary = await llmNodeInstance.invoke(
|
||||
[
|
||||
{
|
||||
role: 'user',
|
||||
content: DEFAULT_SUMMARIZER_TEMPLATE.replace('{conversation}', messagesToSummarizeString)
|
||||
}
|
||||
],
|
||||
{ signal: abortController?.signal }
|
||||
)
|
||||
|
||||
// Add summary as a system message at the beginning, then add remaining messages
|
||||
messages.push({ role: 'system', content: `Previous conversation summary: ${summary.content}` })
|
||||
messages.push(...remainingMessages)
|
||||
} else {
|
||||
// If under token limit, use all messages
|
||||
messages.push(...pastMessages)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles streaming response from the LLM
|
||||
*/
|
||||
private async handleStreamingResponse(
|
||||
sseStreamer: IServerSideEventStreamer | undefined,
|
||||
llmNodeInstance: BaseChatModel,
|
||||
messages: BaseMessageLike[],
|
||||
chatId: string,
|
||||
abortController: AbortController
|
||||
): Promise<AIMessageChunk> {
|
||||
let response = new AIMessageChunk('')
|
||||
|
||||
try {
|
||||
for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) {
|
||||
if (sseStreamer) {
|
||||
let content = ''
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
content = chunk
|
||||
} else if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
const contents = chunk.content as MessageContentText[]
|
||||
content = contents.map((item) => item.text).join('')
|
||||
} else if (chunk.content) {
|
||||
content = chunk.content.toString()
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
}
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during streaming:', error)
|
||||
throw error
|
||||
}
|
||||
if (Array.isArray(response.content) && response.content.length > 0) {
|
||||
const responseContents = response.content as MessageContentText[]
|
||||
response.content = responseContents.map((item) => item.text).join('')
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepares the output object with response and metadata
|
||||
*/
|
||||
private prepareOutputObject(
|
||||
response: AIMessageChunk,
|
||||
finalResponse: string,
|
||||
startTime: number,
|
||||
endTime: number,
|
||||
timeDelta: number,
|
||||
isStructuredOutput: boolean,
|
||||
artifacts: any[] = [],
|
||||
fileAnnotations: any[] = []
|
||||
): any {
|
||||
const output: any = {
|
||||
content: finalResponse,
|
||||
timeMetadata: {
|
||||
start: startTime,
|
||||
end: endTime,
|
||||
delta: timeDelta
|
||||
}
|
||||
}
|
||||
|
||||
if (response.tool_calls) {
|
||||
output.calledTools = response.tool_calls
|
||||
}
|
||||
|
||||
if (response.usage_metadata) {
|
||||
output.usageMetadata = response.usage_metadata
|
||||
}
|
||||
|
||||
if (response.response_metadata) {
|
||||
output.responseMetadata = response.response_metadata
|
||||
}
|
||||
|
||||
if (isStructuredOutput && typeof response === 'object') {
|
||||
const structuredOutput = response as Record<string, any>
|
||||
for (const key in structuredOutput) {
|
||||
if (structuredOutput[key] !== undefined && structuredOutput[key] !== null) {
|
||||
output[key] = structuredOutput[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (artifacts && artifacts.length > 0) {
|
||||
output.artifacts = flatten(artifacts)
|
||||
}
|
||||
|
||||
if (fileAnnotations && fileAnnotations.length > 0) {
|
||||
output.fileAnnotations = fileAnnotations
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends additional streaming events for tool calls and metadata
|
||||
*/
|
||||
private sendStreamingEvents(options: ICommonObject, chatId: string, response: AIMessageChunk): void {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
|
||||
if (response.tool_calls) {
|
||||
const formattedToolCalls = response.tool_calls.map((toolCall: any) => ({
|
||||
tool: toolCall.name || 'tool',
|
||||
toolInput: toolCall.args,
|
||||
toolOutput: ''
|
||||
}))
|
||||
sseStreamer.streamCalledToolsEvent(chatId, flatten(formattedToolCalls))
|
||||
}
|
||||
|
||||
if (response.usage_metadata) {
|
||||
sseStreamer.streamUsageMetadataEvent(chatId, response.usage_metadata)
|
||||
}
|
||||
|
||||
sseStreamer.streamEndEvent(chatId)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LLM_Agentflow }
|
||||
|
|
@ -0,0 +1,154 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
|
||||
class Loop_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
hideOutput: boolean
|
||||
hint: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Loop'
|
||||
this.name = 'loopAgentflow'
|
||||
this.version = 1.2
|
||||
this.type = 'Loop'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Loop back to a previous node'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#FFA07A'
|
||||
this.hint = 'Make sure to have memory enabled in the LLM/Agent node to retain the chat history'
|
||||
this.hideOutput = true
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Loop Back To',
|
||||
name: 'loopBackToNode',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listPreviousNodes',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Max Loop Count',
|
||||
name: 'maxLoopCount',
|
||||
type: 'number',
|
||||
default: 5
|
||||
},
|
||||
{
|
||||
label: 'Fallback Message',
|
||||
name: 'fallbackMessage',
|
||||
type: 'string',
|
||||
description: 'Message to display if the loop count is exceeded',
|
||||
placeholder: 'Enter your fallback message here',
|
||||
rows: 4,
|
||||
acceptVariable: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'loopUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listPreviousNodes(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
|
||||
const returnOptions: INodeOptionsValue[] = []
|
||||
for (const node of previousNodes) {
|
||||
returnOptions.push({
|
||||
label: node.label,
|
||||
name: `${node.id}-${node.label}`,
|
||||
description: node.id
|
||||
})
|
||||
}
|
||||
return returnOptions
|
||||
},
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const loopBackToNode = nodeData.inputs?.loopBackToNode as string
|
||||
const _maxLoopCount = nodeData.inputs?.maxLoopCount as string
|
||||
const fallbackMessage = nodeData.inputs?.fallbackMessage as string
|
||||
const _loopUpdateState = nodeData.inputs?.loopUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
||||
const loopBackToNodeId = loopBackToNode.split('-')[0]
|
||||
const loopBackToNodeLabel = loopBackToNode.split('-')[1]
|
||||
|
||||
const data = {
|
||||
nodeID: loopBackToNodeId,
|
||||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5
|
||||
}
|
||||
|
||||
const finalOutput = 'Loop back to ' + `${loopBackToNodeLabel} (${loopBackToNodeId})`
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_loopUpdateState && Array.isArray(_loopUpdateState) && _loopUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _loopUpdateState)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
if (newState && Object.keys(newState).length > 0) {
|
||||
for (const key in newState) {
|
||||
if (newState[key].toString().includes('{{ output }}')) {
|
||||
newState[key] = finalOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: data,
|
||||
output: {
|
||||
content: finalOutput,
|
||||
nodeID: loopBackToNodeId,
|
||||
maxLoopCount: _maxLoopCount ? parseInt(_maxLoopCount) : 5,
|
||||
fallbackMessage
|
||||
},
|
||||
state: newState
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Loop_Agentflow }
|
||||
|
|
@ -0,0 +1,221 @@
|
|||
import {
|
||||
ICommonObject,
|
||||
IDatabaseEntity,
|
||||
INode,
|
||||
INodeData,
|
||||
INodeOptionsValue,
|
||||
INodeParams,
|
||||
IServerSideEventStreamer
|
||||
} from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
|
||||
interface IKnowledgeBase {
|
||||
documentStore: string
|
||||
}
|
||||
|
||||
class Retriever_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
hideOutput: boolean
|
||||
hint: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Retriever'
|
||||
this.name = 'retrieverAgentflow'
|
||||
this.version = 1.1
|
||||
this.type = 'Retriever'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Retrieve information from vector database'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#b8bedd'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Knowledge (Document Stores)',
|
||||
name: 'retrieverKnowledgeDocumentStores',
|
||||
type: 'array',
|
||||
description: 'Document stores to retrieve information from. Document stores must be upserted in advance.',
|
||||
array: [
|
||||
{
|
||||
label: 'Document Store',
|
||||
name: 'documentStore',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listStores'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Retriever Query',
|
||||
name: 'retrieverQuery',
|
||||
type: 'string',
|
||||
placeholder: 'Enter your query here',
|
||||
rows: 4,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Output Format',
|
||||
name: 'outputFormat',
|
||||
type: 'options',
|
||||
options: [
|
||||
{ label: 'Text', name: 'text' },
|
||||
{ label: 'Text with Metadata', name: 'textWithMetadata' }
|
||||
],
|
||||
default: 'text'
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'retrieverUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
},
|
||||
async listStores(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const returnData: INodeOptionsValue[] = []
|
||||
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
|
||||
if (appDataSource === undefined || !appDataSource) {
|
||||
return returnData
|
||||
}
|
||||
|
||||
const searchOptions = options.searchOptions || {}
|
||||
const stores = await appDataSource.getRepository(databaseEntities['DocumentStore']).findBy(searchOptions)
|
||||
for (const store of stores) {
|
||||
if (store.status === 'UPSERTED') {
|
||||
const obj = {
|
||||
name: `${store.id}:${store.name}`,
|
||||
label: store.name,
|
||||
description: store.description
|
||||
}
|
||||
returnData.push(obj)
|
||||
}
|
||||
}
|
||||
return returnData
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const retrieverQuery = nodeData.inputs?.retrieverQuery as string
|
||||
const outputFormat = nodeData.inputs?.outputFormat as string
|
||||
const _retrieverUpdateState = nodeData.inputs?.retrieverUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const chatId = options.chatId as string
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined
|
||||
|
||||
const abortController = options.abortController as AbortController
|
||||
|
||||
// Extract knowledge
|
||||
let docs: Document[] = []
|
||||
const knowledgeBases = nodeData.inputs?.retrieverKnowledgeDocumentStores as IKnowledgeBase[]
|
||||
if (knowledgeBases && knowledgeBases.length > 0) {
|
||||
for (const knowledgeBase of knowledgeBases) {
|
||||
const [storeId, _] = knowledgeBase.documentStore.split(':')
|
||||
|
||||
const docStoreVectorInstanceFilePath = options.componentNodes['documentStoreVS'].filePath as string
|
||||
const docStoreVectorModule = await import(docStoreVectorInstanceFilePath)
|
||||
const newDocStoreVectorInstance = new docStoreVectorModule.nodeClass()
|
||||
const docStoreVectorInstance = (await newDocStoreVectorInstance.init(
|
||||
{
|
||||
...nodeData,
|
||||
inputs: {
|
||||
...nodeData.inputs,
|
||||
selectedStore: storeId
|
||||
},
|
||||
outputs: {
|
||||
output: 'retriever'
|
||||
}
|
||||
},
|
||||
'',
|
||||
options
|
||||
)) as BaseRetriever
|
||||
|
||||
docs = await docStoreVectorInstance.invoke(retrieverQuery || input, { signal: abortController?.signal })
|
||||
}
|
||||
}
|
||||
|
||||
const docsText = docs.map((doc) => doc.pageContent).join('\n')
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_retrieverUpdateState && Array.isArray(_retrieverUpdateState) && _retrieverUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _retrieverUpdateState)
|
||||
}
|
||||
|
||||
try {
|
||||
let finalOutput = ''
|
||||
if (outputFormat === 'text') {
|
||||
finalOutput = docsText
|
||||
} else if (outputFormat === 'textWithMetadata') {
|
||||
finalOutput = JSON.stringify(docs, null, 2)
|
||||
}
|
||||
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, finalOutput)
|
||||
}
|
||||
|
||||
newState = processTemplateVariables(newState, finalOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
question: retrieverQuery || input
|
||||
},
|
||||
output: {
|
||||
content: finalOutput
|
||||
},
|
||||
state: newState
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Retriever_Agentflow }
|
||||
|
|
@ -0,0 +1,236 @@
|
|||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class Start_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
hideInput: boolean
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Start'
|
||||
this.name = 'startAgentflow'
|
||||
this.version = 1.1
|
||||
this.type = 'Start'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Starting point of the agentflow'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#7EE787'
|
||||
this.hideInput = true
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Input Type',
|
||||
name: 'startInputType',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Chat Input',
|
||||
name: 'chatInput',
|
||||
description: 'Start the conversation with chat input'
|
||||
},
|
||||
{
|
||||
label: 'Form Input',
|
||||
name: 'formInput',
|
||||
description: 'Start the workflow with form inputs'
|
||||
}
|
||||
],
|
||||
default: 'chatInput'
|
||||
},
|
||||
{
|
||||
label: 'Form Title',
|
||||
name: 'formTitle',
|
||||
type: 'string',
|
||||
placeholder: 'Please Fill Out The Form',
|
||||
show: {
|
||||
startInputType: 'formInput'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Form Description',
|
||||
name: 'formDescription',
|
||||
type: 'string',
|
||||
placeholder: 'Complete all fields below to continue',
|
||||
show: {
|
||||
startInputType: 'formInput'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Form Input Types',
|
||||
name: 'formInputTypes',
|
||||
description: 'Specify the type of form input',
|
||||
type: 'array',
|
||||
show: {
|
||||
startInputType: 'formInput'
|
||||
},
|
||||
array: [
|
||||
{
|
||||
label: 'Type',
|
||||
name: 'type',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'String',
|
||||
name: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Number',
|
||||
name: 'number'
|
||||
},
|
||||
{
|
||||
label: 'Boolean',
|
||||
name: 'boolean'
|
||||
},
|
||||
{
|
||||
label: 'Options',
|
||||
name: 'options'
|
||||
}
|
||||
],
|
||||
default: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Label',
|
||||
name: 'label',
|
||||
type: 'string',
|
||||
placeholder: 'Label for the input'
|
||||
},
|
||||
{
|
||||
label: 'Variable Name',
|
||||
name: 'name',
|
||||
type: 'string',
|
||||
placeholder: 'Variable name for the input (must be camel case)',
|
||||
description: 'Variable name must be camel case. For example: firstName, lastName, etc.'
|
||||
},
|
||||
{
|
||||
label: 'Add Options',
|
||||
name: 'addOptions',
|
||||
type: 'array',
|
||||
show: {
|
||||
'formInputTypes[$index].type': 'options'
|
||||
},
|
||||
array: [
|
||||
{
|
||||
label: 'Option',
|
||||
name: 'option',
|
||||
type: 'string'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Ephemeral Memory',
|
||||
name: 'startEphemeralMemory',
|
||||
type: 'boolean',
|
||||
description: 'Start fresh for every execution without past chat history',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Flow State',
|
||||
name: 'startState',
|
||||
description: 'Runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string',
|
||||
placeholder: 'Foo'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
placeholder: 'Bar',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Persist State',
|
||||
name: 'startPersistState',
|
||||
type: 'boolean',
|
||||
description: 'Persist the state in the same session',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string | Record<string, any>, options: ICommonObject): Promise<any> {
|
||||
const _flowState = nodeData.inputs?.startState as string
|
||||
const startInputType = nodeData.inputs?.startInputType as string
|
||||
const startEphemeralMemory = nodeData.inputs?.startEphemeralMemory as boolean
|
||||
const startPersistState = nodeData.inputs?.startPersistState as boolean
|
||||
|
||||
let flowStateArray = []
|
||||
if (_flowState) {
|
||||
try {
|
||||
flowStateArray = typeof _flowState === 'string' ? JSON.parse(_flowState) : _flowState
|
||||
} catch (error) {
|
||||
throw new Error('Invalid Flow State')
|
||||
}
|
||||
}
|
||||
|
||||
let flowState: Record<string, any> = {}
|
||||
for (const state of flowStateArray) {
|
||||
flowState[state.key] = state.value
|
||||
}
|
||||
|
||||
const runtimeState = options.agentflowRuntime?.state as ICommonObject
|
||||
if (startPersistState === true && runtimeState && Object.keys(runtimeState).length) {
|
||||
for (const state in runtimeState) {
|
||||
flowState[state] = runtimeState[state]
|
||||
}
|
||||
}
|
||||
|
||||
const inputData: ICommonObject = {}
|
||||
const outputData: ICommonObject = {}
|
||||
|
||||
if (startInputType === 'chatInput') {
|
||||
inputData.question = input
|
||||
outputData.question = input
|
||||
}
|
||||
|
||||
if (startInputType === 'formInput') {
|
||||
inputData.form = {
|
||||
title: nodeData.inputs?.formTitle,
|
||||
description: nodeData.inputs?.formDescription,
|
||||
inputs: nodeData.inputs?.formInputTypes
|
||||
}
|
||||
|
||||
let form = input
|
||||
if (options.agentflowRuntime?.form && Object.keys(options.agentflowRuntime.form).length) {
|
||||
form = options.agentflowRuntime.form
|
||||
}
|
||||
outputData.form = form
|
||||
}
|
||||
|
||||
if (startEphemeralMemory) {
|
||||
outputData.ephemeralMemory = true
|
||||
}
|
||||
|
||||
if (startPersistState) {
|
||||
outputData.persistState = true
|
||||
}
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: inputData,
|
||||
output: outputData,
|
||||
state: flowState
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Start_Agentflow }
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
import { INode, INodeParams } from '../../../src/Interface'
|
||||
|
||||
class StickyNote_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
tags: string[]
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Sticky Note'
|
||||
this.name = 'stickyNoteAgentflow'
|
||||
this.version = 1.0
|
||||
this.type = 'StickyNote'
|
||||
this.color = '#fee440'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Add notes to the agent flow'
|
||||
this.inputs = [
|
||||
{
|
||||
label: '',
|
||||
name: 'note',
|
||||
type: 'string',
|
||||
rows: 1,
|
||||
placeholder: 'Type something here',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.baseClasses = [this.type]
|
||||
}
|
||||
|
||||
async run(): Promise<any> {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: StickyNote_Agentflow }
|
||||
|
|
@ -0,0 +1,356 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { updateFlowState } from '../utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
import { ARTIFACTS_PREFIX, TOOL_ARGS_PREFIX } from '../../../src/agents'
|
||||
import zodToJsonSchema from 'zod-to-json-schema'
|
||||
|
||||
interface IToolInputArgs {
|
||||
inputArgName: string
|
||||
inputArgValue: string
|
||||
}
|
||||
|
||||
class Tool_Agentflow implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
color: string
|
||||
hideOutput: boolean
|
||||
hint: string
|
||||
baseClasses: string[]
|
||||
documentation?: string
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Tool'
|
||||
this.name = 'toolAgentflow'
|
||||
this.version = 1.2
|
||||
this.type = 'Tool'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Tools allow LLM to interact with external systems'
|
||||
this.baseClasses = [this.type]
|
||||
this.color = '#d4a373'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tool',
|
||||
name: 'toolAgentflowSelectedTool',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listTools',
|
||||
loadConfig: true
|
||||
},
|
||||
{
|
||||
label: 'Tool Input Arguments',
|
||||
name: 'toolInputArgs',
|
||||
type: 'array',
|
||||
acceptVariable: true,
|
||||
refresh: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Input Argument Name',
|
||||
name: 'inputArgName',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listToolInputArgs',
|
||||
refresh: true
|
||||
},
|
||||
{
|
||||
label: 'Input Argument Value',
|
||||
name: 'inputArgValue',
|
||||
type: 'string',
|
||||
acceptVariable: true
|
||||
}
|
||||
],
|
||||
show: {
|
||||
toolAgentflowSelectedTool: '.+'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'toolUpdateState',
|
||||
description: 'Update runtime state during the execution of the workflow',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
name: 'value',
|
||||
type: 'string',
|
||||
acceptVariable: true,
|
||||
acceptNodeOutputAsVariable: true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
loadMethods = {
|
||||
async listTools(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const componentNodes = options.componentNodes as {
|
||||
[key: string]: INode
|
||||
}
|
||||
|
||||
const removeTools = ['chainTool', 'retrieverTool', 'webBrowser']
|
||||
|
||||
const returnOptions: INodeOptionsValue[] = []
|
||||
for (const nodeName in componentNodes) {
|
||||
const componentNode = componentNodes[nodeName]
|
||||
if (componentNode.category === 'Tools' || componentNode.category === 'Tools (MCP)') {
|
||||
if (componentNode.tags?.includes('LlamaIndex')) {
|
||||
continue
|
||||
}
|
||||
if (removeTools.includes(nodeName)) {
|
||||
continue
|
||||
}
|
||||
returnOptions.push({
|
||||
label: componentNode.label,
|
||||
name: nodeName,
|
||||
imageSrc: componentNode.icon
|
||||
})
|
||||
}
|
||||
}
|
||||
return returnOptions
|
||||
},
|
||||
async listToolInputArgs(nodeData: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const currentNode = options.currentNode as ICommonObject
|
||||
const selectedTool = (currentNode?.inputs?.selectedTool as string) || (currentNode?.inputs?.toolAgentflowSelectedTool as string)
|
||||
const selectedToolConfig =
|
||||
(currentNode?.inputs?.selectedToolConfig as ICommonObject) ||
|
||||
(currentNode?.inputs?.toolAgentflowSelectedToolConfig as ICommonObject) ||
|
||||
{}
|
||||
|
||||
const nodeInstanceFilePath = options.componentNodes[selectedTool].filePath as string
|
||||
|
||||
const nodeModule = await import(nodeInstanceFilePath)
|
||||
const newToolNodeInstance = new nodeModule.nodeClass()
|
||||
|
||||
const newNodeData = {
|
||||
...nodeData,
|
||||
credential: selectedToolConfig['FLOWISE_CREDENTIAL_ID'],
|
||||
inputs: {
|
||||
...nodeData.inputs,
|
||||
...selectedToolConfig
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const toolInstance = (await newToolNodeInstance.init(newNodeData, '', options)) as Tool
|
||||
|
||||
let toolInputArgs: ICommonObject = {}
|
||||
|
||||
if (Array.isArray(toolInstance)) {
|
||||
// Combine schemas from all tools in the array
|
||||
const allProperties = toolInstance.reduce((acc, tool) => {
|
||||
if (tool?.schema) {
|
||||
const schema: Record<string, any> = zodToJsonSchema(tool.schema)
|
||||
return { ...acc, ...(schema.properties || {}) }
|
||||
}
|
||||
return acc
|
||||
}, {})
|
||||
toolInputArgs = { properties: allProperties }
|
||||
} else {
|
||||
// Handle single tool instance
|
||||
toolInputArgs = toolInstance.schema ? zodToJsonSchema(toolInstance.schema as any) : {}
|
||||
}
|
||||
|
||||
if (toolInputArgs && Object.keys(toolInputArgs).length > 0) {
|
||||
delete toolInputArgs.$schema
|
||||
}
|
||||
|
||||
return Object.keys(toolInputArgs.properties || {}).map((item) => ({
|
||||
label: item,
|
||||
name: item,
|
||||
description: toolInputArgs.properties[item].description
|
||||
}))
|
||||
} catch (e) {
|
||||
return []
|
||||
}
|
||||
},
|
||||
async listRuntimeStateKeys(_: INodeData, options: ICommonObject): Promise<INodeOptionsValue[]> {
|
||||
const previousNodes = options.previousNodes as ICommonObject[]
|
||||
const startAgentflowNode = previousNodes.find((node) => node.name === 'startAgentflow')
|
||||
const state = startAgentflowNode?.inputs?.startState as ICommonObject[]
|
||||
return state.map((item) => ({ label: item.key, name: item.key }))
|
||||
}
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const selectedTool = (nodeData.inputs?.selectedTool as string) || (nodeData.inputs?.toolAgentflowSelectedTool as string)
|
||||
const selectedToolConfig =
|
||||
(nodeData?.inputs?.selectedToolConfig as ICommonObject) ||
|
||||
(nodeData?.inputs?.toolAgentflowSelectedToolConfig as ICommonObject) ||
|
||||
{}
|
||||
|
||||
const toolInputArgs = nodeData.inputs?.toolInputArgs as IToolInputArgs[]
|
||||
const _toolUpdateState = nodeData.inputs?.toolUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
const chatId = options.chatId as string
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined
|
||||
|
||||
const abortController = options.abortController as AbortController
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_toolUpdateState && Array.isArray(_toolUpdateState) && _toolUpdateState.length > 0) {
|
||||
newState = updateFlowState(state, _toolUpdateState)
|
||||
}
|
||||
|
||||
if (!selectedTool) {
|
||||
throw new Error('Tool not selected')
|
||||
}
|
||||
|
||||
const nodeInstanceFilePath = options.componentNodes[selectedTool].filePath as string
|
||||
const nodeModule = await import(nodeInstanceFilePath)
|
||||
const newToolNodeInstance = new nodeModule.nodeClass()
|
||||
const newNodeData = {
|
||||
...nodeData,
|
||||
credential: selectedToolConfig['FLOWISE_CREDENTIAL_ID'],
|
||||
inputs: {
|
||||
...nodeData.inputs,
|
||||
...selectedToolConfig
|
||||
}
|
||||
}
|
||||
const toolInstance = (await newToolNodeInstance.init(newNodeData, '', options)) as Tool | Tool[]
|
||||
|
||||
let toolCallArgs: Record<string, any> = {}
|
||||
|
||||
const parseInputValue = (value: string): any => {
|
||||
if (typeof value !== 'string') {
|
||||
return value
|
||||
}
|
||||
|
||||
// Remove escape characters (backslashes before special characters)
|
||||
// ex: \["a", "b", "c", "d", "e"\]
|
||||
let cleanedValue = value
|
||||
.replace(/\\"/g, '"') // \" -> "
|
||||
.replace(/\\\\/g, '\\') // \\ -> \
|
||||
.replace(/\\\[/g, '[') // \[ -> [
|
||||
.replace(/\\\]/g, ']') // \] -> ]
|
||||
.replace(/\\\{/g, '{') // \{ -> {
|
||||
.replace(/\\\}/g, '}') // \} -> }
|
||||
|
||||
// Try to parse as JSON if it looks like JSON/array
|
||||
if (
|
||||
(cleanedValue.startsWith('[') && cleanedValue.endsWith(']')) ||
|
||||
(cleanedValue.startsWith('{') && cleanedValue.endsWith('}'))
|
||||
) {
|
||||
try {
|
||||
return JSON.parse(cleanedValue)
|
||||
} catch (e) {
|
||||
// If parsing fails, return the cleaned value
|
||||
return cleanedValue
|
||||
}
|
||||
}
|
||||
|
||||
return cleanedValue
|
||||
}
|
||||
|
||||
if (newToolNodeInstance.transformNodeInputsToToolArgs) {
|
||||
const defaultParams = newToolNodeInstance.transformNodeInputsToToolArgs(newNodeData)
|
||||
|
||||
toolCallArgs = {
|
||||
...defaultParams,
|
||||
...toolCallArgs
|
||||
}
|
||||
}
|
||||
|
||||
for (const item of toolInputArgs) {
|
||||
const variableName = item.inputArgName
|
||||
const variableValue = item.inputArgValue
|
||||
toolCallArgs[variableName] = parseInputValue(variableValue)
|
||||
}
|
||||
|
||||
const flowConfig = {
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
input: input,
|
||||
state: options.agentflowRuntime?.state
|
||||
}
|
||||
|
||||
try {
|
||||
let toolOutput: string
|
||||
if (Array.isArray(toolInstance)) {
|
||||
// Execute all tools and combine their outputs
|
||||
const outputs = await Promise.all(
|
||||
toolInstance.map((tool) =>
|
||||
//@ts-ignore
|
||||
tool.call(toolCallArgs, { signal: abortController?.signal }, undefined, flowConfig)
|
||||
)
|
||||
)
|
||||
toolOutput = outputs.join('\n')
|
||||
} else {
|
||||
//@ts-ignore
|
||||
toolOutput = await toolInstance.call(toolCallArgs, { signal: abortController?.signal }, undefined, flowConfig)
|
||||
}
|
||||
|
||||
let parsedArtifacts
|
||||
|
||||
// Extract artifacts if present
|
||||
if (typeof toolOutput === 'string' && toolOutput.includes(ARTIFACTS_PREFIX)) {
|
||||
const [output, artifact] = toolOutput.split(ARTIFACTS_PREFIX)
|
||||
toolOutput = output
|
||||
try {
|
||||
parsedArtifacts = JSON.parse(artifact)
|
||||
} catch (e) {
|
||||
console.error('Error parsing artifacts from tool:', e)
|
||||
}
|
||||
}
|
||||
|
||||
let toolInput
|
||||
if (typeof toolOutput === 'string' && toolOutput.includes(TOOL_ARGS_PREFIX)) {
|
||||
const [output, args] = toolOutput.split(TOOL_ARGS_PREFIX)
|
||||
toolOutput = output
|
||||
try {
|
||||
toolInput = JSON.parse(args)
|
||||
} catch (e) {
|
||||
console.error('Error parsing tool input from tool:', e)
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof toolOutput === 'object') {
|
||||
toolOutput = JSON.stringify(toolOutput, null, 2)
|
||||
}
|
||||
|
||||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer
|
||||
sseStreamer.streamTokenEvent(chatId, toolOutput)
|
||||
}
|
||||
|
||||
newState = processTemplateVariables(newState, toolOutput)
|
||||
|
||||
const returnOutput = {
|
||||
id: nodeData.id,
|
||||
name: this.name,
|
||||
input: {
|
||||
toolInputArgs: toolInput ?? toolInputArgs,
|
||||
selectedTool: selectedTool
|
||||
},
|
||||
output: {
|
||||
content: toolOutput,
|
||||
artifacts: parsedArtifacts
|
||||
},
|
||||
state: newState
|
||||
}
|
||||
|
||||
return returnOutput
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Tool_Agentflow }
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
export const DEFAULT_SUMMARIZER_TEMPLATE = `Progressively summarize the conversation provided and return a new summary.
|
||||
|
||||
EXAMPLE:
|
||||
Human: Why do you think artificial intelligence is a force for good?
|
||||
AI: Because artificial intelligence will help humans reach their full potential.
|
||||
|
||||
New summary:
|
||||
The human asks what the AI thinks of artificial intelligence. The AI thinks artificial intelligence is a force for good because it will help humans reach their full potential.
|
||||
END OF EXAMPLE
|
||||
|
||||
Conversation:
|
||||
{conversation}
|
||||
|
||||
New summary:`
|
||||
|
||||
export const DEFAULT_HUMAN_INPUT_DESCRIPTION = `Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback.
|
||||
- Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed.
|
||||
- Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced.
|
||||
- Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message
|
||||
|
||||
## Output Format The output should be structured in three parts in text:
|
||||
|
||||
- A summary of the conversation (1-3 sentences).
|
||||
- The last assistant message (exactly as it appeared).
|
||||
- Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed.
|
||||
`
|
||||
|
||||
export const DEFAULT_HUMAN_INPUT_DESCRIPTION_HTML = `<p>Summarize the conversation between the user and the assistant, reiterate the last message from the assistant, and ask if user would like to proceed or if they have any feedback. </p>
|
||||
<ul>
|
||||
<li>Begin by capturing the key points of the conversation, ensuring that you reflect the main ideas and themes discussed.</li>
|
||||
<li>Then, clearly reproduce the last message sent by the assistant to maintain continuity. Make sure the whole message is reproduced.</li>
|
||||
<li>Finally, ask the user if they would like to proceed, or provide any feedback on the last assistant message</li>
|
||||
</ul>
|
||||
<h2 id="output-format-the-output-should-be-structured-in-three-parts-">Output Format The output should be structured in three parts in text:</h2>
|
||||
<ul>
|
||||
<li>A summary of the conversation (1-3 sentences).</li>
|
||||
<li>The last assistant message (exactly as it appeared).</li>
|
||||
<li>Ask the user if they would like to proceed, or provide any feedback on last assistant message. No other explanation and elaboration is needed.</li>
|
||||
</ul>
|
||||
`
|
||||
|
||||
export const CONDITION_AGENT_SYSTEM_PROMPT = `<p>You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios.</p>
|
||||
<ul>
|
||||
<li><strong>Input</strong>: A string representing the user's query, message or data.</li>
|
||||
<li><strong>Scenarios</strong>: A list of predefined scenarios that relate to the input.</li>
|
||||
<li><strong>Instruction</strong>: Determine which of the provided scenarios is the best fit for the input.</li>
|
||||
</ul>
|
||||
<h2>Steps</h2>
|
||||
<ol>
|
||||
<li><strong>Read the input string</strong> and the list of scenarios.</li>
|
||||
<li><strong>Analyze the content of the input</strong> to identify its main topic or intention.</li>
|
||||
<li><strong>Compare the input with each scenario</strong>: Evaluate how well the input's topic or intention aligns with each of the provided scenarios and select the one that is the best fit.</li>
|
||||
<li><strong>Output the result</strong>: Return the selected scenario in the specified JSON format.</li>
|
||||
</ol>
|
||||
<h2>Output Format</h2>
|
||||
<p>Output should be a JSON object that names the selected scenario, like this: <code>{"output": "<selected_scenario_name>"}</code>. No explanation is needed.</p>
|
||||
<h2>Examples</h2>
|
||||
<ol>
|
||||
<li>
|
||||
<p><strong>Input</strong>: <code>{"input": "Hello", "scenarios": ["user is asking about AI", "user is not asking about AI"], "instruction": "Your task is to check if the user is asking about AI."}</code></p>
|
||||
<p><strong>Output</strong>: <code>{"output": "user is not asking about AI"}</code></p>
|
||||
</li>
|
||||
<li>
|
||||
<p><strong>Input</strong>: <code>{"input": "What is AIGC?", "scenarios": ["user is asking about AI", "user is asking about the weather"], "instruction": "Your task is to check and see if the user is asking a topic about AI."}</code></p>
|
||||
<p><strong>Output</strong>: <code>{"output": "user is asking about AI"}</code></p>
|
||||
</li>
|
||||
<li>
|
||||
<p><strong>Input</strong>: <code>{"input": "Can you explain deep learning?", "scenarios": ["user is interested in AI topics", "user wants to order food"], "instruction": "Determine if the user is interested in learning about AI."}</code></p>
|
||||
<p><strong>Output</strong>: <code>{"output": "user is interested in AI topics"}</code></p>
|
||||
</li>
|
||||
</ol>
|
||||
<h2>Note</h2>
|
||||
<ul>
|
||||
<li>Ensure that the input scenarios align well with potential user queries for accurate matching.</li>
|
||||
<li>DO NOT include anything other than the JSON in your response.</li>
|
||||
</ul>`
|
||||
|
|
@ -0,0 +1,907 @@
|
|||
import { BaseMessage, MessageContentImageUrl, AIMessageChunk } from '@langchain/core/messages'
|
||||
import { getImageUploads } from '../../src/multiModalUtils'
|
||||
import { addSingleFileToStorage, getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload, INodeData } from '../../src/Interface'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { IFlowState } from './Interface.Agentflow'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters, mapMimeTypeToInputField } from '../../src/utils'
|
||||
import fetch from 'node-fetch'
|
||||
|
||||
export const addImagesToMessages = async (
|
||||
options: ICommonObject,
|
||||
allowImageUploads: boolean,
|
||||
imageResolution?: 'auto' | 'low' | 'high'
|
||||
): Promise<MessageContentImageUrl[]> => {
|
||||
const imageContent: MessageContentImageUrl[] = []
|
||||
|
||||
if (allowImageUploads && options?.uploads && options?.uploads.length > 0) {
|
||||
const imageUploads = getImageUploads(options.uploads)
|
||||
for (const upload of imageUploads) {
|
||||
let bf = upload.data
|
||||
if (upload.type == 'stored-file') {
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
|
||||
|
||||
imageContent.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: bf,
|
||||
detail: imageResolution ?? 'low'
|
||||
}
|
||||
})
|
||||
} else if (upload.type == 'url' && bf) {
|
||||
imageContent.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: bf,
|
||||
detail: imageResolution ?? 'low'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return imageContent
|
||||
}
|
||||
|
||||
/**
|
||||
* Process message array to replace stored file references with base64 image data
|
||||
* @param messages Array of messages that may contain image references
|
||||
* @param options Common options object containing chatflowid and chatId
|
||||
* @returns Object containing updated messages array and transformed original messages
|
||||
*/
|
||||
export const processMessagesWithImages = async (
|
||||
messages: BaseMessageLike[],
|
||||
options: ICommonObject
|
||||
): Promise<{
|
||||
updatedMessages: BaseMessageLike[]
|
||||
transformedMessages: BaseMessageLike[]
|
||||
}> => {
|
||||
if (!messages || !options.chatflowid || !options.chatId) {
|
||||
return {
|
||||
updatedMessages: messages,
|
||||
transformedMessages: []
|
||||
}
|
||||
}
|
||||
|
||||
// Create a deep copy of the messages to avoid mutating the original
|
||||
const updatedMessages = JSON.parse(JSON.stringify(messages))
|
||||
// Track which messages were transformed
|
||||
const transformedMessages: BaseMessageLike[] = []
|
||||
|
||||
// Scan through all messages looking for stored-file references
|
||||
for (let i = 0; i < updatedMessages.length; i++) {
|
||||
const message = updatedMessages[i]
|
||||
|
||||
// Skip non-user messages or messages without content
|
||||
if (message.role !== 'user' || !message.content) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle array content (typically containing file references)
|
||||
if (Array.isArray(message.content)) {
|
||||
const imageContents: MessageContentImageUrl[] = []
|
||||
let hasImageReferences = false
|
||||
|
||||
// Process each content item
|
||||
for (const item of message.content) {
|
||||
// Look for stored-file type items
|
||||
if (item.type === 'stored-file' && item.name && item.mime.startsWith('image/')) {
|
||||
hasImageReferences = true
|
||||
try {
|
||||
const fileName = item.name.replace(/^FILE-STORAGE::/, '')
|
||||
// Get file contents from storage
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
|
||||
// Create base64 data URL
|
||||
const base64Data = 'data:' + item.mime + ';base64,' + contents.toString('base64')
|
||||
|
||||
// Add to image content array
|
||||
imageContents.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: base64Data,
|
||||
detail: item.imageResolution ?? 'low'
|
||||
}
|
||||
})
|
||||
} catch (error) {
|
||||
console.error(`Failed to load image ${item.name}:`, error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace the content with the image content array
|
||||
if (imageContents.length > 0) {
|
||||
// Store the original message before modifying
|
||||
if (hasImageReferences) {
|
||||
transformedMessages.push(JSON.parse(JSON.stringify(messages[i])))
|
||||
}
|
||||
updatedMessages[i].content = imageContents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
updatedMessages,
|
||||
transformedMessages
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace base64 image data in messages with file references
|
||||
* @param messages Array of messages that may contain base64 image data
|
||||
* @param uniqueImageMessages Array of messages with file references for new images
|
||||
* @param pastImageMessages Array of messages with file references for previous images
|
||||
* @returns Updated messages array with file references instead of base64 data
|
||||
*/
|
||||
export const replaceBase64ImagesWithFileReferences = (
|
||||
messages: BaseMessageLike[],
|
||||
uniqueImageMessages: BaseMessageLike[] = [],
|
||||
pastImageMessages: BaseMessageLike[] = []
|
||||
): BaseMessageLike[] => {
|
||||
// Create a deep copy to avoid mutating the original
|
||||
const updatedMessages = JSON.parse(JSON.stringify(messages))
|
||||
|
||||
// Track positions in replacement arrays
|
||||
let pastMessageIndex = 0
|
||||
let pastContentIndex = 0
|
||||
let uniqueMessageIndex = 0
|
||||
let uniqueContentIndex = 0
|
||||
|
||||
for (let i = 0; i < updatedMessages.length; i++) {
|
||||
const message = updatedMessages[i]
|
||||
if (message.content && Array.isArray(message.content)) {
|
||||
for (let j = 0; j < message.content.length; j++) {
|
||||
const item = message.content[j]
|
||||
if (item.type === 'image_url') {
|
||||
// Try past images first
|
||||
let replacement = null
|
||||
|
||||
if (pastMessageIndex < pastImageMessages.length) {
|
||||
const pastMessage = pastImageMessages[pastMessageIndex] as BaseMessage | undefined
|
||||
if (pastMessage && Array.isArray(pastMessage.content)) {
|
||||
if (pastContentIndex < pastMessage.content.length) {
|
||||
replacement = pastMessage.content[pastContentIndex]
|
||||
pastContentIndex++
|
||||
|
||||
// Move to next message if we've used all content in current one
|
||||
if (pastContentIndex >= pastMessage.content.length) {
|
||||
pastMessageIndex++
|
||||
pastContentIndex = 0
|
||||
}
|
||||
} else {
|
||||
// Current message has no more content, move to next
|
||||
pastMessageIndex++
|
||||
pastContentIndex = 0
|
||||
|
||||
// Try again with the next message
|
||||
if (pastMessageIndex < pastImageMessages.length) {
|
||||
const nextPastMessage = pastImageMessages[pastMessageIndex] as BaseMessage | undefined
|
||||
if (nextPastMessage && Array.isArray(nextPastMessage.content) && nextPastMessage.content.length > 0) {
|
||||
replacement = nextPastMessage.content[0]
|
||||
pastContentIndex = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try unique images if no past image replacement found
|
||||
if (!replacement && uniqueMessageIndex < uniqueImageMessages.length) {
|
||||
const uniqueMessage = uniqueImageMessages[uniqueMessageIndex] as BaseMessage | undefined
|
||||
if (uniqueMessage && Array.isArray(uniqueMessage.content)) {
|
||||
if (uniqueContentIndex < uniqueMessage.content.length) {
|
||||
replacement = uniqueMessage.content[uniqueContentIndex]
|
||||
uniqueContentIndex++
|
||||
|
||||
// Move to next message if we've used all content in current one
|
||||
if (uniqueContentIndex >= uniqueMessage.content.length) {
|
||||
uniqueMessageIndex++
|
||||
uniqueContentIndex = 0
|
||||
}
|
||||
} else {
|
||||
// Current message has no more content, move to next
|
||||
uniqueMessageIndex++
|
||||
uniqueContentIndex = 0
|
||||
|
||||
// Try again with the next message
|
||||
if (uniqueMessageIndex < uniqueImageMessages.length) {
|
||||
const nextUniqueMessage = uniqueImageMessages[uniqueMessageIndex] as BaseMessage | undefined
|
||||
if (
|
||||
nextUniqueMessage &&
|
||||
Array.isArray(nextUniqueMessage.content) &&
|
||||
nextUniqueMessage.content.length > 0
|
||||
) {
|
||||
replacement = nextUniqueMessage.content[0]
|
||||
uniqueContentIndex = 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Apply replacement if found
|
||||
if (replacement) {
|
||||
message.content[j] = {
|
||||
...replacement
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return updatedMessages
|
||||
}
|
||||
|
||||
/**
|
||||
* Get unique image messages from uploads
|
||||
* @param options Common options object containing uploads
|
||||
* @param messages Array of messages to check for existing images
|
||||
* @param modelConfig Model configuration object containing allowImageUploads and imageResolution
|
||||
* @returns Object containing imageMessageWithFileRef and imageMessageWithBase64
|
||||
*/
|
||||
export const getUniqueImageMessages = async (
|
||||
options: ICommonObject,
|
||||
messages: BaseMessageLike[],
|
||||
modelConfig?: ICommonObject
|
||||
): Promise<{ imageMessageWithFileRef: BaseMessageLike; imageMessageWithBase64: BaseMessageLike } | undefined> => {
|
||||
if (!options.uploads) return undefined
|
||||
|
||||
// Get images from uploads
|
||||
const images = await addImagesToMessages(options, modelConfig?.allowImageUploads, modelConfig?.imageResolution)
|
||||
|
||||
// Filter out images that are already in previous messages
|
||||
const uniqueImages = images.filter((image) => {
|
||||
// Check if this image is already in any existing message
|
||||
return !messages.some((msg: any) => {
|
||||
// For multimodal content (arrays with image objects)
|
||||
if (Array.isArray(msg.content)) {
|
||||
return msg.content.some(
|
||||
(item: any) =>
|
||||
// Compare by image URL/content for image objects
|
||||
item.type === 'image_url' && image.type === 'image_url' && JSON.stringify(item) === JSON.stringify(image)
|
||||
)
|
||||
}
|
||||
// For direct comparison of simple content
|
||||
return JSON.stringify(msg.content) === JSON.stringify(image)
|
||||
})
|
||||
})
|
||||
|
||||
if (uniqueImages.length === 0) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
// Create messages with the original file references for storage/display
|
||||
const imageMessageWithFileRef = {
|
||||
role: 'user',
|
||||
content: options.uploads.map((upload: IFileUpload) => ({
|
||||
type: upload.type,
|
||||
name: upload.name,
|
||||
mime: upload.mime,
|
||||
imageResolution: modelConfig?.imageResolution
|
||||
}))
|
||||
}
|
||||
|
||||
// Create messages with base64 data for the LLM
|
||||
const imageMessageWithBase64 = {
|
||||
role: 'user',
|
||||
content: uniqueImages
|
||||
}
|
||||
|
||||
return {
|
||||
imageMessageWithFileRef,
|
||||
imageMessageWithBase64
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get past chat history image messages
|
||||
* @param pastChatHistory Array of past chat history messages
|
||||
* @param options Common options object
|
||||
* @returns Object containing updatedPastMessages and transformedPastMessages
|
||||
*/
|
||||
export const getPastChatHistoryImageMessages = async (
|
||||
pastChatHistory: BaseMessageLike[],
|
||||
options: ICommonObject
|
||||
): Promise<{ updatedPastMessages: BaseMessageLike[]; transformedPastMessages: BaseMessageLike[] }> => {
|
||||
const chatHistory = []
|
||||
const transformedPastMessages = []
|
||||
|
||||
for (let i = 0; i < pastChatHistory.length; i++) {
|
||||
const message = pastChatHistory[i] as BaseMessage & { role: string }
|
||||
const messageRole = message.role || 'user'
|
||||
if (message.additional_kwargs && message.additional_kwargs.fileUploads) {
|
||||
// example: [{"type":"stored-file","name":"0_DiXc4ZklSTo3M8J4.jpg","mime":"image/jpeg"}]
|
||||
const fileUploads = message.additional_kwargs.fileUploads
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const fileAnnotations = message.additional_kwargs.fileAnnotations
|
||||
const usedTools = message.additional_kwargs.usedTools
|
||||
try {
|
||||
let messageWithFileUploads = ''
|
||||
const uploads: IFileUpload[] = typeof fileUploads === 'string' ? JSON.parse(fileUploads) : fileUploads
|
||||
const imageContents: MessageContentImageUrl[] = []
|
||||
for (const upload of uploads) {
|
||||
if (upload.type === 'stored-file' && upload.mime.startsWith('image/')) {
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const fileData = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
const bf = 'data:' + upload.mime + ';base64,' + fileData.toString('base64')
|
||||
|
||||
imageContents.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: bf
|
||||
}
|
||||
})
|
||||
} else if (upload.type === 'url' && upload.mime.startsWith('image') && upload.data) {
|
||||
imageContents.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: upload.data
|
||||
}
|
||||
})
|
||||
} else if (upload.type === 'stored-file:full') {
|
||||
const fileLoaderNodeModule = await import('../../nodes/documentloaders/File/File')
|
||||
// @ts-ignore
|
||||
const fileLoaderNodeInstance = new fileLoaderNodeModule.nodeClass()
|
||||
const nodeOptions = {
|
||||
retrieveAttachmentChatId: true,
|
||||
chatflowid: options.chatflowid,
|
||||
chatId: options.chatId,
|
||||
orgId: options.orgId
|
||||
}
|
||||
let fileInputFieldFromMimeType = 'txtFile'
|
||||
fileInputFieldFromMimeType = mapMimeTypeToInputField(upload.mime)
|
||||
const nodeData = {
|
||||
inputs: {
|
||||
[fileInputFieldFromMimeType]: `FILE-STORAGE::${JSON.stringify([upload.name])}`
|
||||
}
|
||||
}
|
||||
const documents: string = await fileLoaderNodeInstance.init(nodeData, '', nodeOptions)
|
||||
messageWithFileUploads += `<doc name='${upload.name}'>${handleEscapeCharacters(documents, true)}</doc>\n\n`
|
||||
}
|
||||
}
|
||||
const messageContent = messageWithFileUploads ? `${messageWithFileUploads}\n\n${message.content}` : message.content
|
||||
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
|
||||
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
|
||||
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
|
||||
|
||||
if (imageContents.length > 0) {
|
||||
const imageMessage: any = {
|
||||
role: messageRole,
|
||||
content: imageContents
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
imageMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) imageMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) imageMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) imageMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(imageMessage)
|
||||
transformedPastMessages.push({
|
||||
role: messageRole,
|
||||
content: [...JSON.parse((pastChatHistory[i] as any).additional_kwargs.fileUploads)]
|
||||
})
|
||||
}
|
||||
|
||||
const contentMessage: any = {
|
||||
role: messageRole,
|
||||
content: messageContent
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
contentMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) contentMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) contentMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) contentMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(contentMessage)
|
||||
} catch (e) {
|
||||
// failed to parse fileUploads, continue with text only
|
||||
const hasArtifacts = artifacts && Array.isArray(artifacts) && artifacts.length > 0
|
||||
const hasFileAnnotations = fileAnnotations && Array.isArray(fileAnnotations) && fileAnnotations.length > 0
|
||||
const hasUsedTools = usedTools && Array.isArray(usedTools) && usedTools.length > 0
|
||||
|
||||
const errorMessage: any = {
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
}
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
errorMessage.additional_kwargs = {}
|
||||
if (hasArtifacts) errorMessage.additional_kwargs.artifacts = artifacts
|
||||
if (hasFileAnnotations) errorMessage.additional_kwargs.fileAnnotations = fileAnnotations
|
||||
if (hasUsedTools) errorMessage.additional_kwargs.usedTools = usedTools
|
||||
}
|
||||
chatHistory.push(errorMessage)
|
||||
}
|
||||
} else if (message.additional_kwargs) {
|
||||
const hasArtifacts =
|
||||
message.additional_kwargs.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts) &&
|
||||
message.additional_kwargs.artifacts.length > 0
|
||||
const hasFileAnnotations =
|
||||
message.additional_kwargs.fileAnnotations &&
|
||||
Array.isArray(message.additional_kwargs.fileAnnotations) &&
|
||||
message.additional_kwargs.fileAnnotations.length > 0
|
||||
const hasUsedTools =
|
||||
message.additional_kwargs.usedTools &&
|
||||
Array.isArray(message.additional_kwargs.usedTools) &&
|
||||
message.additional_kwargs.usedTools.length > 0
|
||||
|
||||
if (hasArtifacts || hasFileAnnotations || hasUsedTools) {
|
||||
const messageAdditionalKwargs: any = {}
|
||||
if (hasArtifacts) messageAdditionalKwargs.artifacts = message.additional_kwargs.artifacts
|
||||
if (hasFileAnnotations) messageAdditionalKwargs.fileAnnotations = message.additional_kwargs.fileAnnotations
|
||||
if (hasUsedTools) messageAdditionalKwargs.usedTools = message.additional_kwargs.usedTools
|
||||
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content,
|
||||
additional_kwargs: messageAdditionalKwargs
|
||||
})
|
||||
} else {
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
})
|
||||
}
|
||||
} else {
|
||||
chatHistory.push({
|
||||
role: messageRole,
|
||||
content: message.content
|
||||
})
|
||||
}
|
||||
}
|
||||
return {
|
||||
updatedPastMessages: chatHistory,
|
||||
transformedPastMessages
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets MIME type from filename extension
|
||||
*/
|
||||
export const getMimeTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const mimeTypes: { [key: string]: string } = {
|
||||
png: 'image/png',
|
||||
jpg: 'image/jpeg',
|
||||
jpeg: 'image/jpeg',
|
||||
gif: 'image/gif',
|
||||
pdf: 'application/pdf',
|
||||
txt: 'text/plain',
|
||||
csv: 'text/csv',
|
||||
json: 'application/json',
|
||||
html: 'text/html',
|
||||
xml: 'application/xml'
|
||||
}
|
||||
return mimeTypes[extension || ''] || 'application/octet-stream'
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets artifact type from filename extension for UI rendering
|
||||
*/
|
||||
export const getArtifactTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const artifactTypes: { [key: string]: string } = {
|
||||
png: 'png',
|
||||
jpg: 'jpeg',
|
||||
jpeg: 'jpeg',
|
||||
html: 'html',
|
||||
htm: 'html',
|
||||
md: 'markdown',
|
||||
markdown: 'markdown',
|
||||
json: 'json',
|
||||
js: 'javascript',
|
||||
javascript: 'javascript',
|
||||
tex: 'latex',
|
||||
latex: 'latex',
|
||||
txt: 'text',
|
||||
csv: 'text',
|
||||
pdf: 'text'
|
||||
}
|
||||
return artifactTypes[extension || ''] || 'text'
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves base64 image data to storage and returns file information
|
||||
*/
|
||||
export const saveBase64Image = async (
|
||||
outputItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!outputItem.result) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = outputItem.result
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension and MIME type
|
||||
const outputFormat = outputItem.output_format || 'png'
|
||||
const fileName = `generated_image_${outputItem.id || Date.now()}.${outputFormat}`
|
||||
const mimeType = outputFormat === 'png' ? 'image/png' : 'image/jpeg'
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving base64 image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves Gemini inline image data to storage and returns file information
|
||||
*/
|
||||
export const saveGeminiInlineImage = async (
|
||||
inlineItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!inlineItem.data || !inlineItem.mimeType) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = inlineItem.data
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension from MIME type
|
||||
const mimeType = inlineItem.mimeType
|
||||
let extension = 'png'
|
||||
if (mimeType.includes('jpeg') || mimeType.includes('jpg')) {
|
||||
extension = 'jpg'
|
||||
} else if (mimeType.includes('png')) {
|
||||
extension = 'png'
|
||||
} else if (mimeType.includes('gif')) {
|
||||
extension = 'gif'
|
||||
} else if (mimeType.includes('webp')) {
|
||||
extension = 'webp'
|
||||
}
|
||||
|
||||
const fileName = `gemini_generated_image_${Date.now()}.${extension}`
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving Gemini inline image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads file content from container file citation
|
||||
*/
|
||||
export const downloadContainerFile = async (
|
||||
containerId: string,
|
||||
fileId: string,
|
||||
filename: string,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; totalSize: number } | null> => {
|
||||
try {
|
||||
const credentialData = await getCredentialData(modelNodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, modelNodeData)
|
||||
|
||||
if (!openAIApiKey) {
|
||||
console.warn('No OpenAI API key available for downloading container file')
|
||||
return null
|
||||
}
|
||||
|
||||
// Download the file using OpenAI Container API
|
||||
const response = await fetch(`https://api.openai.com/v1/containers/${containerId}/files/${fileId}/content`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: '*/*',
|
||||
Authorization: `Bearer ${openAIApiKey}`
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`Failed to download container file ${fileId} from container ${containerId}: ${response.status} ${response.statusText}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
const dataBuffer = Buffer.from(data)
|
||||
const mimeType = getMimeTypeFromFilename(filename)
|
||||
|
||||
// Store the file using the same storage utility as OpenAIAssistant
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
dataBuffer,
|
||||
filename,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error downloading container file:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace inlineData base64 with file references in the response content
|
||||
*/
|
||||
export const replaceInlineDataWithFileReferences = (
|
||||
response: AIMessageChunk,
|
||||
savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
): void => {
|
||||
// Check if content is an array
|
||||
if (!Array.isArray(response.content)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Replace base64 data with file references in response content
|
||||
let savedImageIndex = 0
|
||||
for (let i = 0; i < response.content.length; i++) {
|
||||
const contentItem = response.content[i]
|
||||
if (
|
||||
typeof contentItem === 'object' &&
|
||||
contentItem.type === 'inlineData' &&
|
||||
contentItem.inlineData &&
|
||||
savedImageIndex < savedInlineImages.length
|
||||
) {
|
||||
const savedImage = savedInlineImages[savedImageIndex]
|
||||
// Replace with file reference
|
||||
response.content[i] = {
|
||||
type: 'stored-file',
|
||||
name: savedImage.fileName,
|
||||
mime: savedImage.mimeType,
|
||||
path: savedImage.filePath
|
||||
}
|
||||
savedImageIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the inlineData from response_metadata to avoid duplication
|
||||
if (response.response_metadata?.inlineData) {
|
||||
delete response.response_metadata.inlineData
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts artifacts from response metadata (both annotations and built-in tools)
|
||||
*/
|
||||
export const extractArtifactsFromResponse = async (
|
||||
responseMetadata: any,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{
|
||||
artifacts: any[]
|
||||
fileAnnotations: any[]
|
||||
savedInlineImages?: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
}> => {
|
||||
const artifacts: any[] = []
|
||||
const fileAnnotations: any[] = []
|
||||
const savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }> = []
|
||||
|
||||
// Handle Gemini inline data (image generation)
|
||||
if (responseMetadata?.inlineData && Array.isArray(responseMetadata.inlineData)) {
|
||||
for (const inlineItem of responseMetadata.inlineData) {
|
||||
if (inlineItem.type === 'gemini_inline_data' && inlineItem.data && inlineItem.mimeType) {
|
||||
try {
|
||||
const savedImageResult = await saveGeminiInlineImage(inlineItem, options)
|
||||
if (savedImageResult) {
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
|
||||
// Track saved image for replacing base64 data in content
|
||||
savedInlineImages.push({
|
||||
filePath: savedImageResult.filePath,
|
||||
fileName: savedImageResult.fileName,
|
||||
mimeType: inlineItem.mimeType
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing Gemini inline image artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!responseMetadata?.output || !Array.isArray(responseMetadata.output)) {
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
for (const outputItem of responseMetadata.output) {
|
||||
// Handle container file citations from annotations
|
||||
if (outputItem.type === 'message' && outputItem.content && Array.isArray(outputItem.content)) {
|
||||
for (const contentItem of outputItem.content) {
|
||||
if (contentItem.annotations && Array.isArray(contentItem.annotations)) {
|
||||
for (const annotation of contentItem.annotations) {
|
||||
if (annotation.type === 'container_file_citation' && annotation.file_id && annotation.filename) {
|
||||
try {
|
||||
// Download and store the file content
|
||||
const downloadResult = await downloadContainerFile(
|
||||
annotation.container_id,
|
||||
annotation.file_id,
|
||||
annotation.filename,
|
||||
modelNodeData,
|
||||
options
|
||||
)
|
||||
|
||||
if (downloadResult) {
|
||||
const fileType = getArtifactTypeFromFilename(annotation.filename)
|
||||
|
||||
if (fileType === 'png' || fileType === 'jpeg' || fileType === 'jpg') {
|
||||
const artifact = {
|
||||
type: fileType,
|
||||
data: downloadResult.filePath
|
||||
}
|
||||
|
||||
artifacts.push(artifact)
|
||||
} else {
|
||||
fileAnnotations.push({
|
||||
filePath: downloadResult.filePath,
|
||||
fileName: annotation.filename
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing annotation:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle built-in tool artifacts (like image generation)
|
||||
if (outputItem.type === 'image_generation_call' && outputItem.result) {
|
||||
try {
|
||||
const savedImageResult = await saveBase64Image(outputItem, options)
|
||||
if (savedImageResult) {
|
||||
// Replace the base64 result with the file path in the response metadata
|
||||
outputItem.result = savedImageResult.filePath
|
||||
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing image generation artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant messages as user messages
|
||||
* This allows the LLM to see and reference the generated images in the conversation
|
||||
* Messages are marked with a special flag for later removal
|
||||
*/
|
||||
export const addImageArtifactsToMessages = async (messages: BaseMessageLike[], options: ICommonObject): Promise<void> => {
|
||||
const imageExtensions = ['png', 'jpg', 'jpeg', 'gif', 'webp']
|
||||
const messagesToInsert: Array<{ index: number; message: any }> = []
|
||||
|
||||
// Iterate through messages to find assistant messages with image artifacts
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const message = messages[i] as any
|
||||
|
||||
// Check if this is an assistant message with artifacts
|
||||
if (
|
||||
(message.role === 'assistant' || message.role === 'ai') &&
|
||||
message.additional_kwargs?.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts)
|
||||
) {
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const imageArtifacts: Array<{ type: string; name: string; mime: string }> = []
|
||||
|
||||
// Extract image artifacts
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type && artifact.data) {
|
||||
// Check if this is an image artifact by file type
|
||||
if (imageExtensions.includes(artifact.type.toLowerCase())) {
|
||||
// Extract filename from the file path
|
||||
const fileName = artifact.data.split('/').pop() || artifact.data
|
||||
const mimeType = `image/${artifact.type.toLowerCase()}`
|
||||
|
||||
imageArtifacts.push({
|
||||
type: 'stored-file',
|
||||
name: fileName,
|
||||
mime: mimeType
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found image artifacts, prepare to insert a user message after this assistant message
|
||||
if (imageArtifacts.length > 0) {
|
||||
// Check if the next message already contains these image artifacts to avoid duplicates
|
||||
const nextMessage = messages[i + 1] as any
|
||||
const shouldInsert =
|
||||
!nextMessage ||
|
||||
nextMessage.role !== 'user' ||
|
||||
!Array.isArray(nextMessage.content) ||
|
||||
!nextMessage.content.some(
|
||||
(item: any) =>
|
||||
(item.type === 'stored-file' || item.type === 'image_url') &&
|
||||
imageArtifacts.some((artifact) => {
|
||||
// Compare with and without FILE-STORAGE:: prefix
|
||||
const artifactName = artifact.name.replace('FILE-STORAGE::', '')
|
||||
const itemName = item.name?.replace('FILE-STORAGE::', '') || ''
|
||||
return artifactName === itemName
|
||||
})
|
||||
)
|
||||
|
||||
if (shouldInsert) {
|
||||
messagesToInsert.push({
|
||||
index: i + 1,
|
||||
message: {
|
||||
role: 'user',
|
||||
content: imageArtifacts,
|
||||
_isTemporaryImageMessage: true // Mark for later removal
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert messages in reverse order to maintain correct indices
|
||||
for (let i = messagesToInsert.length - 1; i >= 0; i--) {
|
||||
const { index, message } = messagesToInsert[i]
|
||||
messages.splice(index, 0, message)
|
||||
}
|
||||
|
||||
// Convert stored-file references to base64 image_url format
|
||||
if (messagesToInsert.length > 0) {
|
||||
const { updatedMessages } = await processMessagesWithImages(messages, options)
|
||||
// Replace the messages array content with the updated messages
|
||||
messages.length = 0
|
||||
messages.push(...updatedMessages)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the flow state with new values
|
||||
*/
|
||||
export const updateFlowState = (state: ICommonObject, updateState: IFlowState[]): ICommonObject => {
|
||||
let newFlowState: Record<string, any> = {}
|
||||
for (const state of updateState) {
|
||||
newFlowState[state.key] = state.value
|
||||
}
|
||||
|
||||
return {
|
||||
...state,
|
||||
...newFlowState
|
||||
}
|
||||
}
|
||||
|
|
@ -128,7 +128,7 @@ class Airtable_Agents implements INode {
|
|||
|
||||
let base64String = Buffer.from(JSON.stringify(airtableData)).toString('base64')
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger, options?.orgId)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
const pyodide = await LoadPyodide()
|
||||
|
|
@ -163,7 +163,7 @@ json.dumps(my_dict)`
|
|||
const chain = new LLMChain({
|
||||
llm: model,
|
||||
prompt: PromptTemplate.fromTemplate(systemPrompt),
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
const inputs = {
|
||||
dict: dataframeColDict,
|
||||
|
|
@ -183,7 +183,7 @@ json.dumps(my_dict)`
|
|||
// TODO: get print console output
|
||||
finalResult = await pyodide.runPythonAsync(code)
|
||||
} catch (error) {
|
||||
throw new Error(`Sorry, I'm unable to find answer for question: "${input}" using follwoing code: "${pythonCode}"`)
|
||||
throw new Error(`Sorry, I'm unable to find answer for question: "${input}" using following code: "${pythonCode}"`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -192,7 +192,7 @@ json.dumps(my_dict)`
|
|||
const chain = new LLMChain({
|
||||
llm: model,
|
||||
prompt: PromptTemplate.fromTemplate(finalSystemPrompt),
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
const inputs = {
|
||||
question: input,
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ class AutoGPT_Agents implements INode {
|
|||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
badge: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'AutoGPT'
|
||||
|
|
@ -30,6 +31,7 @@ class AutoGPT_Agents implements INode {
|
|||
this.version = 2.0
|
||||
this.type = 'AutoGPT'
|
||||
this.category = 'Agents'
|
||||
this.badge = 'DEPRECATING'
|
||||
this.icon = 'autogpt.svg'
|
||||
this.description = 'Autonomous agent with chain of thoughts for self-guided task completion'
|
||||
this.baseClasses = ['AutoGPT']
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ class BabyAGI_Agents implements INode {
|
|||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
badge: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'BabyAGI'
|
||||
|
|
@ -23,6 +24,7 @@ class BabyAGI_Agents implements INode {
|
|||
this.type = 'BabyAGI'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'babyagi.svg'
|
||||
this.badge = 'DEPRECATING'
|
||||
this.description = 'Task Driven Autonomous Agent which creates new task and reprioritizes task list based on objective'
|
||||
this.baseClasses = ['BabyAGI']
|
||||
this.inputs = [
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class CSV_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger, options?.orgId)
|
||||
const shouldStreamResponse = options.shouldStreamResponse
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
const chatId = options.chatId
|
||||
|
|
@ -114,11 +114,12 @@ class CSV_Agents implements INode {
|
|||
} else {
|
||||
files = [fileName]
|
||||
}
|
||||
const orgId = options.orgId
|
||||
const chatflowid = options.chatflowid
|
||||
|
||||
for (const file of files) {
|
||||
if (!file) continue
|
||||
const fileData = await getFileFromStorage(file, chatflowid)
|
||||
const fileData = await getFileFromStorage(file, orgId, chatflowid)
|
||||
base64String += fileData.toString('base64')
|
||||
}
|
||||
} else {
|
||||
|
|
@ -170,7 +171,7 @@ json.dumps(my_dict)`
|
|||
const chain = new LLMChain({
|
||||
llm: model,
|
||||
prompt: PromptTemplate.fromTemplate(systemPrompt),
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
const inputs = {
|
||||
dict: dataframeColDict,
|
||||
|
|
@ -201,7 +202,7 @@ json.dumps(my_dict)`
|
|||
prompt: PromptTemplate.fromTemplate(
|
||||
systemMessagePrompt ? `${systemMessagePrompt}\n${finalSystemPrompt}` : finalSystemPrompt
|
||||
),
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
const inputs = {
|
||||
question: input,
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ class ConversationalAgent_Agents implements INode {
|
|||
}
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger, options?.orgId)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { getBaseClasses, transformBracesWithColon } from '../../../src/utils'
|
||||
import { getBaseClasses, transformBracesWithColon, convertChatHistoryToText, convertBaseMessagetoIMessage } from '../../../src/utils'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
|
|
@ -23,8 +23,10 @@ import { Moderation, checkInputs, streamResponse } from '../../moderation/Modera
|
|||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import type { Document } from '@langchain/core/documents'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { RESPONSE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { RESPONSE_TEMPLATE, REPHRASE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
|
||||
class ConversationalRetrievalToolAgent_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -42,7 +44,7 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Tool Agent'
|
||||
this.name = 'conversationalRetrievalToolAgent'
|
||||
this.author = 'niztal(falkor)'
|
||||
this.author = 'niztal(falkor) and nikitas-novatix'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -79,6 +81,26 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
optional: true,
|
||||
default: RESPONSE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Prompt',
|
||||
name: 'rephrasePrompt',
|
||||
type: 'string',
|
||||
description: 'Using previous chat history, rephrase question into a standalone question',
|
||||
warning: 'Prompt must include input variables: {chat_history} and {question}',
|
||||
rows: 4,
|
||||
additionalParams: true,
|
||||
optional: true,
|
||||
default: REPHRASE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Model',
|
||||
name: 'rephraseModel',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Optional: Use a different (faster/cheaper) model for rephrasing. If not specified, uses the main Tool Calling Chat Model.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
|
|
@ -103,8 +125,9 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
// The agent will be prepared in run() with the correct user message - it needs the actual runtime input for rephrasing
|
||||
async init(_nodeData: INodeData, _input: string, _options: ICommonObject): Promise<any> {
|
||||
return null
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
|
|
@ -130,7 +153,7 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
|
||||
const executor = await prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger, options?.orgId)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
|
|
@ -148,6 +171,23 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
sseStreamer.streamUsedToolsEvent(chatId, res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
|
||||
// If the tool is set to returnDirect, stream the output to the client
|
||||
if (res.usedTools && res.usedTools.length) {
|
||||
let inputTools = nodeData.inputs?.tools
|
||||
inputTools = flatten(inputTools)
|
||||
for (const tool of res.usedTools) {
|
||||
const inputTool = inputTools.find((inputTool: Tool) => inputTool.name === tool.tool)
|
||||
if (inputTool && (inputTool as any).returnDirect && shouldStreamResponse) {
|
||||
sseStreamer.streamTokenEvent(chatId, tool.toolOutput)
|
||||
// Prevent CustomChainHandler from streaming the same output again
|
||||
if (res.output === tool.toolOutput) {
|
||||
res.output = ''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The CustomChainHandler will send the stream end event
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
|
|
@ -210,9 +250,11 @@ const prepareAgent = async (
|
|||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const rephraseModel = (nodeData.inputs?.rephraseModel as BaseChatModel) || model // Use main model if not specified
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
let systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
|
|
@ -220,6 +262,9 @@ const prepareAgent = async (
|
|||
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
|
||||
|
||||
systemMessage = transformBracesWithColon(systemMessage)
|
||||
if (rephrasePrompt) {
|
||||
rephrasePrompt = transformBracesWithColon(rephrasePrompt)
|
||||
}
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
|
|
@ -263,6 +308,37 @@ const prepareAgent = async (
|
|||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
// Function to get standalone question (either rephrased or original)
|
||||
const getStandaloneQuestion = async (input: string): Promise<string> => {
|
||||
// If no rephrase prompt, return the original input
|
||||
if (!rephrasePrompt) {
|
||||
return input
|
||||
}
|
||||
|
||||
// Get chat history (use empty string if none)
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true)) as BaseMessage[]
|
||||
const iMessages = convertBaseMessagetoIMessage(messages)
|
||||
const chatHistoryString = convertChatHistoryToText(iMessages)
|
||||
|
||||
// Always rephrase to normalize/expand user queries for better retrieval
|
||||
try {
|
||||
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt)
|
||||
const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, rephraseModel, new StringOutputParser()])
|
||||
const res = await condenseQuestionChain.invoke({
|
||||
question: input,
|
||||
chat_history: chatHistoryString
|
||||
})
|
||||
return res
|
||||
} catch (error) {
|
||||
console.error('Error rephrasing question:', error)
|
||||
// On error, fall back to original input
|
||||
return input
|
||||
}
|
||||
}
|
||||
|
||||
// Get standalone question before creating runnable
|
||||
const standaloneQuestion = await getStandaloneQuestion(flowObj?.input || '')
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
|
|
@ -272,7 +348,9 @@ const prepareAgent = async (
|
|||
return messages ?? []
|
||||
},
|
||||
context: async (i: { input: string; chatHistory?: string }) => {
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(i.input)
|
||||
// Use the standalone question (rephrased or original) for retrieval
|
||||
const retrievalQuery = standaloneQuestion || i.input
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(retrievalQuery)
|
||||
const formattedDocs = formatDocs(relevantDocs)
|
||||
return formattedDocs
|
||||
}
|
||||
|
|
@ -288,11 +366,13 @@ const prepareAgent = async (
|
|||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true',
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ConversationalRetrievalToolAgent_Agents }
|
||||
module.exports = {
|
||||
nodeClass: ConversationalRetrievalToolAgent_Agents
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import { flatten } from 'lodash'
|
|||
import { MessageContentTextDetail, ChatMessage, AnthropicAgent, Anthropic } from 'llamaindex'
|
||||
import { getBaseClasses } from '../../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams, IUsedTool } from '../../../../src/Interface'
|
||||
import { EvaluationRunTracerLlama } from '../../../../evaluation/EvaluationRunTracerLlama'
|
||||
|
||||
class AnthropicAgent_LlamaIndex_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -96,13 +97,16 @@ class AnthropicAgent_LlamaIndex_Agents implements INode {
|
|||
tools,
|
||||
llm: model,
|
||||
chatHistory: chatHistory,
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
// these are needed for evaluation runs
|
||||
await EvaluationRunTracerLlama.injectEvaluationMetadata(nodeData, options, agent)
|
||||
|
||||
let text = ''
|
||||
const usedTools: IUsedTool[] = []
|
||||
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' })
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
|
||||
|
||||
if (response.sources.length) {
|
||||
for (const sourceTool of response.sources) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ChatMessage, OpenAI, OpenAIAgent } from 'llamaindex'
|
||||
import { getBaseClasses } from '../../../../src/utils'
|
||||
import { EvaluationRunTracerLlama } from '../../../../evaluation/EvaluationRunTracerLlama'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
ICommonObject,
|
||||
|
|
@ -107,9 +108,12 @@ class OpenAIFunctionAgent_LlamaIndex_Agents implements INode {
|
|||
tools,
|
||||
llm: model,
|
||||
chatHistory: chatHistory,
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
|
||||
// these are needed for evaluation runs
|
||||
await EvaluationRunTracerLlama.injectEvaluationMetadata(nodeData, options, agent)
|
||||
|
||||
let text = ''
|
||||
let isStreamingStarted = false
|
||||
const usedTools: IUsedTool[] = []
|
||||
|
|
@ -119,10 +123,9 @@ class OpenAIFunctionAgent_LlamaIndex_Agents implements INode {
|
|||
message: input,
|
||||
chatHistory,
|
||||
stream: true,
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
})
|
||||
for await (const chunk of stream) {
|
||||
//console.log('chunk', chunk)
|
||||
text += chunk.response.delta
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
|
|
@ -147,7 +150,7 @@ class OpenAIFunctionAgent_LlamaIndex_Agents implements INode {
|
|||
}
|
||||
}
|
||||
} else {
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' })
|
||||
const response = await agent.chat({ message: input, chatHistory, verbose: process.env.DEBUG === 'true' ? true : false })
|
||||
if (response.sources.length) {
|
||||
for (const sourceTool of response.sources) {
|
||||
usedTools.push({
|
||||
|
|
|
|||
|
|
@ -107,7 +107,11 @@ class OpenAIAssistant_Agents implements INode {
|
|||
return returnData
|
||||
}
|
||||
|
||||
const assistants = await appDataSource.getRepository(databaseEntities['Assistant']).find()
|
||||
const searchOptions = options.searchOptions || {}
|
||||
const assistants = await appDataSource.getRepository(databaseEntities['Assistant']).findBy({
|
||||
...searchOptions,
|
||||
type: 'OPENAI'
|
||||
})
|
||||
|
||||
for (let i = 0; i < assistants.length; i += 1) {
|
||||
const assistantDetails = JSON.parse(assistants[i].details)
|
||||
|
|
@ -130,13 +134,14 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const selectedAssistantId = nodeData.inputs?.selectedAssistant as string
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
const orgId = options.orgId
|
||||
|
||||
const assistant = await appDataSource.getRepository(databaseEntities['Assistant']).findOneBy({
|
||||
id: selectedAssistantId
|
||||
})
|
||||
|
||||
if (!assistant) {
|
||||
options.logger.error(`Assistant ${selectedAssistantId} not found`)
|
||||
options.logger.error(`[${orgId}]: Assistant ${selectedAssistantId} not found`)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -149,7 +154,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
chatId
|
||||
})
|
||||
if (!chatmsg) {
|
||||
options.logger.error(`Chat Message with Chat Id: ${chatId} not found`)
|
||||
options.logger.error(`[${orgId}]: Chat Message with Chat Id: ${chatId} not found`)
|
||||
return
|
||||
}
|
||||
sessionId = chatmsg.sessionId
|
||||
|
|
@ -160,21 +165,21 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const credentialData = await getCredentialData(assistant.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
if (!openAIApiKey) {
|
||||
options.logger.error(`OpenAI ApiKey not found`)
|
||||
options.logger.error(`[${orgId}]: OpenAI ApiKey not found`)
|
||||
return
|
||||
}
|
||||
|
||||
const openai = new OpenAI({ apiKey: openAIApiKey })
|
||||
options.logger.info(`Clearing OpenAI Thread ${sessionId}`)
|
||||
options.logger.info(`[${orgId}]: Clearing OpenAI Thread ${sessionId}`)
|
||||
try {
|
||||
if (sessionId && sessionId.startsWith('thread_')) {
|
||||
await openai.beta.threads.del(sessionId)
|
||||
options.logger.info(`Successfully cleared OpenAI Thread ${sessionId}`)
|
||||
options.logger.info(`[${orgId}]: Successfully cleared OpenAI Thread ${sessionId}`)
|
||||
} else {
|
||||
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
|
||||
options.logger.error(`[${orgId}]: Error clearing OpenAI Thread ${sessionId}`)
|
||||
}
|
||||
} catch (e) {
|
||||
options.logger.error(`Error clearing OpenAI Thread ${sessionId}`)
|
||||
options.logger.error(`[${orgId}]: Error clearing OpenAI Thread ${sessionId}`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -190,6 +195,17 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const shouldStreamResponse = options.shouldStreamResponse
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
const chatId = options.chatId
|
||||
const checkStorage = options.checkStorage
|
||||
? (options.checkStorage as (orgId: string, subscriptionId: string, usageCacheManager: any) => Promise<void>)
|
||||
: undefined
|
||||
const updateStorageUsage = options.updateStorageUsage
|
||||
? (options.updateStorageUsage as (
|
||||
orgId: string,
|
||||
workspaceId: string,
|
||||
totalSize: number,
|
||||
usageCacheManager: any
|
||||
) => Promise<void>)
|
||||
: undefined
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
|
|
@ -224,7 +240,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const openai = new OpenAI({ apiKey: openAIApiKey })
|
||||
|
||||
// Start analytics
|
||||
const analyticHandlers = new AnalyticHandler(nodeData, options)
|
||||
const analyticHandlers = AnalyticHandler.getInstance(nodeData, options)
|
||||
await analyticHandlers.init()
|
||||
const parentIds = await analyticHandlers.onChainStart('OpenAIAssistant', input)
|
||||
|
||||
|
|
@ -380,17 +396,30 @@ class OpenAIAssistant_Agents implements INode {
|
|||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
if (checkStorage)
|
||||
await checkStorage(options.orgId, options.subscriptionId, options.usageCacheManager)
|
||||
|
||||
const { path, totalSize } = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
filePath = path
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
|
||||
if (updateStorageUsage)
|
||||
await updateStorageUsage(
|
||||
options.orgId,
|
||||
options.workspaceId,
|
||||
totalSize,
|
||||
options.usageCacheManager
|
||||
)
|
||||
}
|
||||
} else {
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
|
||||
|
|
@ -399,17 +428,30 @@ class OpenAIAssistant_Agents implements INode {
|
|||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
if (checkStorage)
|
||||
await checkStorage(options.orgId, options.subscriptionId, options.usageCacheManager)
|
||||
|
||||
const { path, totalSize } = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
filePath = path
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
})
|
||||
|
||||
if (updateStorageUsage)
|
||||
await updateStorageUsage(
|
||||
options.orgId,
|
||||
options.workspaceId,
|
||||
totalSize,
|
||||
options.usageCacheManager
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -467,15 +509,21 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const fileId = chunk.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
|
||||
const filePath = await downloadImg(
|
||||
if (checkStorage) await checkStorage(options.orgId, options.subscriptionId, options.usageCacheManager)
|
||||
|
||||
const { filePath, totalSize } = await downloadImg(
|
||||
openai,
|
||||
fileId,
|
||||
`${fileObj.filename}.png`,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
artifacts.push({ type: 'png', data: filePath })
|
||||
|
||||
if (updateStorageUsage)
|
||||
await updateStorageUsage(options.orgId, options.workspaceId, totalSize, options.usageCacheManager)
|
||||
|
||||
if (!isStreamingStarted) {
|
||||
isStreamingStarted = true
|
||||
if (sseStreamer) {
|
||||
|
|
@ -530,7 +578,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(
|
||||
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
|
|
@ -655,7 +703,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
clearInterval(timeout)
|
||||
reject(
|
||||
|
|
@ -743,7 +791,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
state = await promise(threadId, newRunThread.id)
|
||||
} else {
|
||||
const errMsg = `Error processing thread: ${state}, Thread ID: ${threadId}`
|
||||
await analyticHandlers.onChainError(parentIds, errMsg)
|
||||
await analyticHandlers.onChainError(parentIds, errMsg, true)
|
||||
throw new Error(errMsg)
|
||||
}
|
||||
}
|
||||
|
|
@ -776,7 +824,21 @@ class OpenAIAssistant_Agents implements INode {
|
|||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(openAIApiKey, cited_file, fileName, options.chatflowid, options.chatId)
|
||||
if (checkStorage) await checkStorage(options.orgId, options.subscriptionId, options.usageCacheManager)
|
||||
|
||||
const { path, totalSize } = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
filePath = path
|
||||
|
||||
if (updateStorageUsage)
|
||||
await updateStorageUsage(options.orgId, options.workspaceId, totalSize, options.usageCacheManager)
|
||||
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
|
|
@ -789,13 +851,27 @@ class OpenAIAssistant_Agents implements INode {
|
|||
// eslint-disable-next-line no-useless-escape
|
||||
const fileName = cited_file.filename.split(/[\/\\]/).pop() ?? cited_file.filename
|
||||
if (!disableFileDownload) {
|
||||
filePath = await downloadFile(
|
||||
if (checkStorage)
|
||||
await checkStorage(options.orgId, options.subscriptionId, options.usageCacheManager)
|
||||
|
||||
const { path, totalSize } = await downloadFile(
|
||||
openAIApiKey,
|
||||
cited_file,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
filePath = path
|
||||
|
||||
if (updateStorageUsage)
|
||||
await updateStorageUsage(
|
||||
options.orgId,
|
||||
options.workspaceId,
|
||||
totalSize,
|
||||
options.usageCacheManager
|
||||
)
|
||||
|
||||
fileAnnotations.push({
|
||||
filePath,
|
||||
fileName
|
||||
|
|
@ -822,7 +898,20 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const fileId = content.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
|
||||
const filePath = await downloadImg(openai, fileId, `${fileObj.filename}.png`, options.chatflowid, options.chatId)
|
||||
if (checkStorage) await checkStorage(options.orgId, options.subscriptionId, options.usageCacheManager)
|
||||
|
||||
const { filePath, totalSize } = await downloadImg(
|
||||
openai,
|
||||
fileId,
|
||||
`${fileObj.filename}.png`,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
if (updateStorageUsage)
|
||||
await updateStorageUsage(options.orgId, options.workspaceId, totalSize, options.usageCacheManager)
|
||||
|
||||
artifacts.push({ type: 'png', data: filePath })
|
||||
}
|
||||
}
|
||||
|
|
@ -847,7 +936,13 @@ class OpenAIAssistant_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const downloadImg = async (openai: OpenAI, fileId: string, fileName: string, ...paths: string[]) => {
|
||||
const downloadImg = async (
|
||||
openai: OpenAI,
|
||||
fileId: string,
|
||||
fileName: string,
|
||||
orgId: string,
|
||||
...paths: string[]
|
||||
): Promise<{ filePath: string; totalSize: number }> => {
|
||||
const response = await openai.files.content(fileId)
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
|
|
@ -857,12 +952,18 @@ const downloadImg = async (openai: OpenAI, fileId: string, fileName: string, ...
|
|||
const image_data_buffer = Buffer.from(image_data)
|
||||
const mime = 'image/png'
|
||||
|
||||
const res = await addSingleFileToStorage(mime, image_data_buffer, fileName, ...paths)
|
||||
const { path, totalSize } = await addSingleFileToStorage(mime, image_data_buffer, fileName, orgId, ...paths)
|
||||
|
||||
return res
|
||||
return { filePath: path, totalSize }
|
||||
}
|
||||
|
||||
const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string, ...paths: string[]) => {
|
||||
const downloadFile = async (
|
||||
openAIApiKey: string,
|
||||
fileObj: any,
|
||||
fileName: string,
|
||||
orgId: string,
|
||||
...paths: string[]
|
||||
): Promise<{ path: string; totalSize: number }> => {
|
||||
try {
|
||||
const response = await fetch(`https://api.openai.com/v1/files/${fileObj.id}/content`, {
|
||||
method: 'GET',
|
||||
|
|
@ -880,10 +981,12 @@ const downloadFile = async (openAIApiKey: string, fileObj: any, fileName: string
|
|||
const data_buffer = Buffer.from(data)
|
||||
const mime = 'application/octet-stream'
|
||||
|
||||
return await addSingleFileToStorage(mime, data_buffer, fileName, ...paths)
|
||||
const { path, totalSize } = await addSingleFileToStorage(mime, data_buffer, fileName, orgId, ...paths)
|
||||
|
||||
return { path, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error downloading or writing the file:', error)
|
||||
return ''
|
||||
return { path: '', totalSize: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -993,7 +1096,7 @@ async function handleToolSubmission(params: ToolSubmissionParams): Promise<ToolS
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class ReActAgentLLM_Agents implements INode {
|
|||
const executor = new AgentExecutor({
|
||||
agent,
|
||||
tools,
|
||||
verbose: process.env.DEBUG === 'true',
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue