Compare commits
1 Commits
main
...
release/3.
| Author | SHA1 | Date |
|---|---|---|
|
|
ab10d823e7 |
|
|
@ -1,72 +0,0 @@
|
|||
name: Docker Image CI - Docker Hub
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: ./docker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise:${{ steps.defaults.outputs.tag_version }}
|
||||
|
||||
# -------------------------
|
||||
# Build and push worker image
|
||||
# -------------------------
|
||||
- name: Build and push worker image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: docker/worker/Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
flowiseai/flowise-worker:${{ steps.defaults.outputs.tag_version }}
|
||||
|
|
@ -1,73 +0,0 @@
|
|||
name: Docker Image CI - AWS ECR
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push main image
|
||||
# -------------------------
|
||||
- name: Build and push main image
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{ format('{0}.dkr.ecr.{1}.amazonaws.com/flowise:{2}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.tag_version) }}
|
||||
|
|
@ -0,0 +1,114 @@
|
|||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
registry:
|
||||
description: 'Container Registry to push the image to.'
|
||||
type: choice
|
||||
required: true
|
||||
default: 'aws_ecr'
|
||||
options:
|
||||
- 'docker_hub'
|
||||
- 'aws_ecr'
|
||||
environment:
|
||||
description: 'Environment to push the image to.'
|
||||
required: true
|
||||
default: 'dev'
|
||||
type: choice
|
||||
options:
|
||||
- dev
|
||||
- prod
|
||||
image_type:
|
||||
description: 'Type of image to build and push.'
|
||||
type: choice
|
||||
required: true
|
||||
default: 'main'
|
||||
options:
|
||||
- 'main'
|
||||
- 'worker'
|
||||
node_version:
|
||||
description: 'Node.js version to build this image with.'
|
||||
type: choice
|
||||
required: true
|
||||
default: '20'
|
||||
options:
|
||||
- '20'
|
||||
tag_version:
|
||||
description: 'Tag version of the image to be pushed.'
|
||||
type: string
|
||||
required: true
|
||||
default: 'latest'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
environment: ${{ github.event.inputs.environment }}
|
||||
steps:
|
||||
- name: Set default values
|
||||
id: defaults
|
||||
run: |
|
||||
echo "registry=${{ github.event.inputs.registry || 'aws_ecr' }}" >> $GITHUB_OUTPUT
|
||||
echo "image_type=${{ github.event.inputs.image_type || 'main' }}" >> $GITHUB_OUTPUT
|
||||
echo "node_version=${{ github.event.inputs.node_version || '20' }}" >> $GITHUB_OUTPUT
|
||||
echo "tag_version=${{ github.event.inputs.tag_version || 'latest' }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4.1.1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.0.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
# ------------------------
|
||||
# Login Steps (conditional)
|
||||
# ------------------------
|
||||
- name: Login to Docker Hub
|
||||
if: steps.defaults.outputs.registry == 'docker_hub'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure AWS Credentials
|
||||
if: steps.defaults.outputs.registry == 'aws_ecr'
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
if: steps.defaults.outputs.registry == 'aws_ecr'
|
||||
uses: aws-actions/amazon-ecr-login@v1
|
||||
|
||||
# -------------------------
|
||||
# Build and push (conditional tags)
|
||||
# -------------------------
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5.3.0
|
||||
with:
|
||||
context: .
|
||||
file: |
|
||||
${{
|
||||
steps.defaults.outputs.image_type == 'worker' && 'docker/worker/Dockerfile' ||
|
||||
(steps.defaults.outputs.registry == 'docker_hub' && './docker/Dockerfile' || 'Dockerfile')
|
||||
}}
|
||||
build-args: |
|
||||
NODE_VERSION=${{ steps.defaults.outputs.node_version }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
${{
|
||||
steps.defaults.outputs.registry == 'docker_hub' &&
|
||||
format('flowiseai/flowise{0}:{1}',
|
||||
steps.defaults.outputs.image_type == 'worker' && '-worker' || '',
|
||||
steps.defaults.outputs.tag_version) ||
|
||||
format('{0}.dkr.ecr.{1}.amazonaws.com/flowise{2}:{3}',
|
||||
secrets.AWS_ACCOUNT_ID,
|
||||
secrets.AWS_REGION,
|
||||
steps.defaults.outputs.image_type == 'worker' && '-worker' || '',
|
||||
steps.defaults.outputs.tag_version)
|
||||
}}
|
||||
39
Dockerfile
39
Dockerfile
|
|
@ -5,41 +5,34 @@
|
|||
# docker run -d -p 3000:3000 flowise
|
||||
|
||||
FROM node:20-alpine
|
||||
RUN apk add --update libc6-compat python3 make g++
|
||||
# needed for pdfjs-dist
|
||||
RUN apk add --no-cache build-base cairo-dev pango-dev
|
||||
|
||||
# Install system dependencies and build tools
|
||||
RUN apk update && \
|
||||
apk add --no-cache \
|
||||
libc6-compat \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
build-base \
|
||||
cairo-dev \
|
||||
pango-dev \
|
||||
chromium \
|
||||
curl && \
|
||||
npm install -g pnpm
|
||||
# Install Chromium
|
||||
RUN apk add --no-cache chromium
|
||||
|
||||
# Install curl for container-level health checks
|
||||
# Fixes: https://github.com/FlowiseAI/Flowise/issues/4126
|
||||
RUN apk add --no-cache curl
|
||||
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
|
||||
|
||||
ENV NODE_OPTIONS=--max-old-space-size=8192
|
||||
|
||||
WORKDIR /usr/src/flowise
|
||||
WORKDIR /usr/src
|
||||
|
||||
# Copy app source
|
||||
COPY . .
|
||||
|
||||
# Install dependencies and build
|
||||
RUN pnpm install && \
|
||||
pnpm build
|
||||
RUN pnpm install
|
||||
|
||||
# Give the node user ownership of the application files
|
||||
RUN chown -R node:node .
|
||||
|
||||
# Switch to non-root user (node user already exists in node:20-alpine)
|
||||
USER node
|
||||
RUN pnpm build
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD [ "pnpm", "start" ]
|
||||
CMD [ "pnpm", "start" ]
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ Deploy Flowise self-hosted in your existing infrastructure, we support various [
|
|||
- [Railway](https://docs.flowiseai.com/configuration/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
|
||||
- [Northflank](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
||||
[](https://northflank.com/stacks/deploy-flowiseai)
|
||||
|
|
|
|||
58
SECURITY.md
58
SECURITY.md
|
|
@ -1,38 +1,38 @@
|
|||
### Responsible Disclosure Policy
|
||||
### Responsible Disclosure Policy
|
||||
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
At Flowise, we prioritize security and continuously work to safeguard our systems. However, vulnerabilities can still exist. If you identify a security issue, please report it to us so we can address it promptly. Your cooperation helps us better protect our platform and users.
|
||||
|
||||
### Out of scope vulnerabilities
|
||||
### Out of scope vulnerabilities
|
||||
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
- Clickjacking on pages without sensitive actions
|
||||
- CSRF on unauthenticated/logout/login pages
|
||||
- Attacks requiring MITM (Man-in-the-Middle) or physical device access
|
||||
- Social engineering attacks
|
||||
- Activities that cause service disruption (DoS)
|
||||
- Content spoofing and text injection without a valid attack vector
|
||||
- Email spoofing
|
||||
- Absence of DNSSEC, CAA, CSP headers
|
||||
- Missing Secure or HTTP-only flag on non-sensitive cookies
|
||||
- Deadlinks
|
||||
- User enumeration
|
||||
|
||||
### Reporting Guidelines
|
||||
### Reporting Guidelines
|
||||
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
- Submit your findings to https://github.com/FlowiseAI/Flowise/security
|
||||
- Provide clear details to help us reproduce and fix the issue quickly.
|
||||
|
||||
### Disclosure Guidelines
|
||||
### Disclosure Guidelines
|
||||
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
- Do not publicly disclose vulnerabilities until we have assessed, resolved, and notified affected users.
|
||||
- If you plan to present your research (e.g., at a conference or in a blog), share a draft with us at least **30 days in advance** for review.
|
||||
- Avoid including:
|
||||
- Data from any Flowise customer projects
|
||||
- Flowise user/customer information
|
||||
- Details about Flowise employees, contractors, or partners
|
||||
|
||||
### Response to Reports
|
||||
### Response to Reports
|
||||
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
- We will acknowledge your report within **5 business days** and provide an estimated resolution timeline.
|
||||
- Your report will be kept **confidential**, and your details will not be shared without your consent.
|
||||
|
||||
We appreciate your efforts in helping us maintain a secure platform and look forward to working together to resolve any issues responsibly.
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ RUN apk add --no-cache build-base cairo-dev pango-dev
|
|||
# Install Chromium and curl for container-level health checks
|
||||
RUN apk add --no-cache chromium curl
|
||||
|
||||
#install PNPM globally
|
||||
#install PNPM globaly
|
||||
RUN npm install -g pnpm
|
||||
|
||||
ENV PUPPETEER_SKIP_DOWNLOAD=true
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "3.0.11",
|
||||
"version": "3.0.9",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
@ -51,7 +51,7 @@
|
|||
"eslint-plugin-react-hooks": "^4.6.0",
|
||||
"eslint-plugin-unused-imports": "^2.0.0",
|
||||
"husky": "^8.0.1",
|
||||
"kill-port": "2.0.1",
|
||||
"kill-port": "^2.0.1",
|
||||
"lint-staged": "^13.0.3",
|
||||
"prettier": "^2.7.1",
|
||||
"pretty-quick": "^3.1.3",
|
||||
|
|
|
|||
|
|
@ -3,13 +3,6 @@
|
|||
{
|
||||
"name": "awsChatBedrock",
|
||||
"models": [
|
||||
{
|
||||
"label": "anthropic.claude-opus-4-5-20251101-v1:0",
|
||||
"name": "anthropic.claude-opus-4-5-20251101-v1:0",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
"name": "anthropic.claude-sonnet-4-5-20250929-v1:0",
|
||||
|
|
@ -322,12 +315,6 @@
|
|||
{
|
||||
"name": "azureChatOpenAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gpt-5.1",
|
||||
"name": "gpt-5.1",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5",
|
||||
"name": "gpt-5",
|
||||
|
|
@ -512,13 +499,6 @@
|
|||
{
|
||||
"name": "chatAnthropic",
|
||||
"models": [
|
||||
{
|
||||
"label": "claude-opus-4-5",
|
||||
"name": "claude-opus-4-5",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-5",
|
||||
"name": "claude-sonnet-4-5",
|
||||
|
|
@ -641,18 +621,6 @@
|
|||
{
|
||||
"name": "chatGoogleGenerativeAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-3-pro-image-preview",
|
||||
"name": "gemini-3-pro-image-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro",
|
||||
"name": "gemini-2.5-pro",
|
||||
|
|
@ -665,12 +633,6 @@
|
|||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-image",
|
||||
"name": "gemini-2.5-flash-image",
|
||||
"input_cost": 1.25e-6,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-flash-lite",
|
||||
"name": "gemini-2.5-flash-lite",
|
||||
|
|
@ -723,12 +685,6 @@
|
|||
{
|
||||
"name": "chatGoogleVertexAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-3-pro-preview",
|
||||
"name": "gemini-3-pro-preview",
|
||||
"input_cost": 0.00002,
|
||||
"output_cost": 0.00012
|
||||
},
|
||||
{
|
||||
"label": "gemini-2.5-pro",
|
||||
"name": "gemini-2.5-pro",
|
||||
|
|
@ -795,13 +751,6 @@
|
|||
"input_cost": 1.25e-7,
|
||||
"output_cost": 3.75e-7
|
||||
},
|
||||
{
|
||||
"label": "claude-opus-4-5@20251101",
|
||||
"name": "claude-opus-4-5@20251101",
|
||||
"description": "Claude 4.5 Opus",
|
||||
"input_cost": 0.000005,
|
||||
"output_cost": 0.000025
|
||||
},
|
||||
{
|
||||
"label": "claude-sonnet-4-5@20250929",
|
||||
"name": "claude-sonnet-4-5@20250929",
|
||||
|
|
@ -1047,12 +996,6 @@
|
|||
{
|
||||
"name": "chatOpenAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gpt-5.1",
|
||||
"name": "gpt-5.1",
|
||||
"input_cost": 0.00000125,
|
||||
"output_cost": 0.00001
|
||||
},
|
||||
{
|
||||
"label": "gpt-5",
|
||||
"name": "gpt-5",
|
||||
|
|
|
|||
|
|
@ -22,16 +22,15 @@ import zodToJsonSchema from 'zod-to-json-schema'
|
|||
import { getErrorMessage } from '../../../src/error'
|
||||
import { DataSource } from 'typeorm'
|
||||
import {
|
||||
addImageArtifactsToMessages,
|
||||
extractArtifactsFromResponse,
|
||||
getPastChatHistoryImageMessages,
|
||||
getUniqueImageMessages,
|
||||
processMessagesWithImages,
|
||||
replaceBase64ImagesWithFileReferences,
|
||||
replaceInlineDataWithFileReferences,
|
||||
updateFlowState
|
||||
} from '../utils'
|
||||
import { convertMultiOptionsToStringArray, processTemplateVariables, configureStructuredOutput } from '../../../src/utils'
|
||||
import { convertMultiOptionsToStringArray, getCredentialData, getCredentialParam, processTemplateVariables } from '../../../src/utils'
|
||||
import { addSingleFileToStorage } from '../../../src/storageUtils'
|
||||
import fetch from 'node-fetch'
|
||||
|
||||
interface ITool {
|
||||
agentSelectedTool: string
|
||||
|
|
@ -82,7 +81,7 @@ class Agent_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Agent'
|
||||
this.name = 'agentAgentflow'
|
||||
this.version = 3.2
|
||||
this.version = 2.2
|
||||
this.type = 'Agent'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Dynamically choose and utilize tools during runtime, enabling multi-step reasoning'
|
||||
|
|
@ -177,11 +176,6 @@ class Agent_Agentflow implements INode {
|
|||
label: 'Google Search',
|
||||
name: 'googleSearch',
|
||||
description: 'Search real-time web content'
|
||||
},
|
||||
{
|
||||
label: 'Code Execution',
|
||||
name: 'codeExecution',
|
||||
description: 'Write and run Python code in a sandboxed environment'
|
||||
}
|
||||
],
|
||||
show: {
|
||||
|
|
@ -400,108 +394,6 @@ class Agent_Agentflow implements INode {
|
|||
],
|
||||
default: 'userMessage'
|
||||
},
|
||||
{
|
||||
label: 'JSON Structured Output',
|
||||
name: 'agentStructuredOutput',
|
||||
description: 'Instruct the Agent to give output in a JSON structured schema',
|
||||
type: 'array',
|
||||
optional: true,
|
||||
acceptVariable: true,
|
||||
array: [
|
||||
{
|
||||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Type',
|
||||
name: 'type',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'String',
|
||||
name: 'string'
|
||||
},
|
||||
{
|
||||
label: 'String Array',
|
||||
name: 'stringArray'
|
||||
},
|
||||
{
|
||||
label: 'Number',
|
||||
name: 'number'
|
||||
},
|
||||
{
|
||||
label: 'Boolean',
|
||||
name: 'boolean'
|
||||
},
|
||||
{
|
||||
label: 'Enum',
|
||||
name: 'enum'
|
||||
},
|
||||
{
|
||||
label: 'JSON Array',
|
||||
name: 'jsonArray'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Enum Values',
|
||||
name: 'enumValues',
|
||||
type: 'string',
|
||||
placeholder: 'value1, value2, value3',
|
||||
description: 'Enum values. Separated by comma',
|
||||
optional: true,
|
||||
show: {
|
||||
'agentStructuredOutput[$index].type': 'enum'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'JSON Schema',
|
||||
name: 'jsonSchema',
|
||||
type: 'code',
|
||||
placeholder: `{
|
||||
"answer": {
|
||||
"type": "string",
|
||||
"description": "Value of the answer"
|
||||
},
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Reason for the answer"
|
||||
},
|
||||
"optional": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"count": {
|
||||
"type": "number"
|
||||
},
|
||||
"children": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"value": {
|
||||
"type": "string",
|
||||
"description": "Value of the children's answer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}`,
|
||||
description: 'JSON schema for the structured output',
|
||||
optional: true,
|
||||
hideCodeExecute: true,
|
||||
show: {
|
||||
'agentStructuredOutput[$index].type': 'jsonArray'
|
||||
}
|
||||
},
|
||||
{
|
||||
label: 'Description',
|
||||
name: 'description',
|
||||
type: 'string',
|
||||
placeholder: 'Description of the key'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
label: 'Update Flow State',
|
||||
name: 'agentUpdateState',
|
||||
|
|
@ -514,7 +406,8 @@ class Agent_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -877,7 +770,6 @@ class Agent_Agentflow implements INode {
|
|||
const memoryType = nodeData.inputs?.agentMemoryType as string
|
||||
const userMessage = nodeData.inputs?.agentUserMessage as string
|
||||
const _agentUpdateState = nodeData.inputs?.agentUpdateState
|
||||
const _agentStructuredOutput = nodeData.inputs?.agentStructuredOutput
|
||||
const agentMessages = (nodeData.inputs?.agentMessages as unknown as ILLMMessage[]) ?? []
|
||||
|
||||
// Extract runtime state and history
|
||||
|
|
@ -903,8 +795,6 @@ class Agent_Agentflow implements INode {
|
|||
const llmWithoutToolsBind = (await newLLMNodeInstance.init(newNodeData, '', options)) as BaseChatModel
|
||||
let llmNodeInstance = llmWithoutToolsBind
|
||||
|
||||
const isStructuredOutput = _agentStructuredOutput && Array.isArray(_agentStructuredOutput) && _agentStructuredOutput.length > 0
|
||||
|
||||
const agentToolsBuiltInOpenAI = convertMultiOptionsToStringArray(nodeData.inputs?.agentToolsBuiltInOpenAI)
|
||||
if (agentToolsBuiltInOpenAI && agentToolsBuiltInOpenAI.length > 0) {
|
||||
for (const tool of agentToolsBuiltInOpenAI) {
|
||||
|
|
@ -1063,7 +953,7 @@ class Agent_Agentflow implements INode {
|
|||
// Initialize response and determine if streaming is possible
|
||||
let response: AIMessageChunk = new AIMessageChunk('')
|
||||
const isLastNode = options.isLastNode as boolean
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined && modelConfig?.streaming !== false && !isStructuredOutput
|
||||
const isStreamable = isLastNode && options.sseStreamer !== undefined && modelConfig?.streaming !== false
|
||||
|
||||
// Start analytics
|
||||
if (analyticHandlers && options.parentTraceIds) {
|
||||
|
|
@ -1071,6 +961,12 @@ class Agent_Agentflow implements INode {
|
|||
llmIds = await analyticHandlers.onLLMStart(llmLabel, messages, options.parentTraceIds)
|
||||
}
|
||||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
|
||||
// Get initial response from LLM
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
// Handle tool calls with support for recursion
|
||||
let usedTools: IUsedTool[] = []
|
||||
let sourceDocuments: Array<any> = []
|
||||
|
|
@ -1083,24 +979,12 @@ class Agent_Agentflow implements INode {
|
|||
const messagesBeforeToolCalls = [...messages]
|
||||
let _toolCallMessages: BaseMessageLike[] = []
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant responses as user messages
|
||||
* Images are converted from FILE-STORAGE::<image_path> to base 64 image_url format
|
||||
*/
|
||||
await addImageArtifactsToMessages(messages, options)
|
||||
|
||||
// Check if this is hummanInput for tool calls
|
||||
const _humanInput = nodeData.inputs?.humanInput
|
||||
const humanInput: IHumanInput = typeof _humanInput === 'string' ? JSON.parse(_humanInput) : _humanInput
|
||||
const humanInputAction = options.humanInputAction
|
||||
const iterationContext = options.iterationContext
|
||||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
|
||||
// Get initial response from LLM
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
if (humanInput) {
|
||||
if (humanInput.type !== 'proceed' && humanInput.type !== 'reject') {
|
||||
throw new Error(`Invalid human input type. Expected 'proceed' or 'reject', but got '${humanInput.type}'`)
|
||||
|
|
@ -1118,8 +1002,7 @@ class Agent_Agentflow implements INode {
|
|||
llmWithoutToolsBind,
|
||||
isStreamable,
|
||||
isLastNode,
|
||||
iterationContext,
|
||||
isStructuredOutput
|
||||
iterationContext
|
||||
})
|
||||
|
||||
response = result.response
|
||||
|
|
@ -1148,14 +1031,7 @@ class Agent_Agentflow implements INode {
|
|||
}
|
||||
} else {
|
||||
if (isStreamable) {
|
||||
response = await this.handleStreamingResponse(
|
||||
sseStreamer,
|
||||
llmNodeInstance,
|
||||
messages,
|
||||
chatId,
|
||||
abortController,
|
||||
isStructuredOutput
|
||||
)
|
||||
response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
response = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
|
||||
}
|
||||
|
|
@ -1177,8 +1053,7 @@ class Agent_Agentflow implements INode {
|
|||
llmNodeInstance,
|
||||
isStreamable,
|
||||
isLastNode,
|
||||
iterationContext,
|
||||
isStructuredOutput
|
||||
iterationContext
|
||||
})
|
||||
|
||||
response = result.response
|
||||
|
|
@ -1205,20 +1080,11 @@ class Agent_Agentflow implements INode {
|
|||
sseStreamer.streamArtifactsEvent(chatId, flatten(artifacts))
|
||||
}
|
||||
}
|
||||
} else if (!humanInput && !isStreamable && isLastNode && sseStreamer && !isStructuredOutput) {
|
||||
} else if (!humanInput && !isStreamable && isLastNode && sseStreamer) {
|
||||
// Stream whole response back to UI if not streaming and no tool calls
|
||||
// Skip this if structured output is enabled - it will be streamed after conversion
|
||||
let finalResponse = ''
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
finalResponse = response.content
|
||||
.map((item: any) => {
|
||||
if ((item.text && !item.type) || (item.type === 'text' && item.text)) {
|
||||
return item.text
|
||||
}
|
||||
return ''
|
||||
})
|
||||
.filter((text: string) => text)
|
||||
.join('\n')
|
||||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else {
|
||||
|
|
@ -1247,53 +1113,9 @@ class Agent_Agentflow implements INode {
|
|||
// Prepare final response and output object
|
||||
let finalResponse = ''
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
// Process items and concatenate consecutive text items
|
||||
const processedParts: string[] = []
|
||||
let currentTextBuffer = ''
|
||||
|
||||
for (const item of response.content) {
|
||||
const itemAny = item as any
|
||||
const isTextItem = (itemAny.text && !itemAny.type) || (itemAny.type === 'text' && itemAny.text)
|
||||
|
||||
if (isTextItem) {
|
||||
// Accumulate consecutive text items
|
||||
currentTextBuffer += itemAny.text
|
||||
} else {
|
||||
// Flush accumulated text before processing other types
|
||||
if (currentTextBuffer) {
|
||||
processedParts.push(currentTextBuffer)
|
||||
currentTextBuffer = ''
|
||||
}
|
||||
|
||||
// Process non-text items
|
||||
if (itemAny.type === 'executableCode' && itemAny.executableCode) {
|
||||
// Format executable code as a code block
|
||||
const language = itemAny.executableCode.language?.toLowerCase() || 'python'
|
||||
processedParts.push(`\n\`\`\`${language}\n${itemAny.executableCode.code}\n\`\`\`\n`)
|
||||
} else if (itemAny.type === 'codeExecutionResult' && itemAny.codeExecutionResult) {
|
||||
// Format code execution result
|
||||
const outcome = itemAny.codeExecutionResult.outcome || 'OUTCOME_OK'
|
||||
const output = itemAny.codeExecutionResult.output || ''
|
||||
if (outcome === 'OUTCOME_OK' && output) {
|
||||
processedParts.push(`**Code Output:**\n\`\`\`\n${output}\n\`\`\`\n`)
|
||||
} else if (outcome !== 'OUTCOME_OK') {
|
||||
processedParts.push(`**Code Execution Error:**\n\`\`\`\n${output}\n\`\`\`\n`)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Flush any remaining text
|
||||
if (currentTextBuffer) {
|
||||
processedParts.push(currentTextBuffer)
|
||||
}
|
||||
|
||||
finalResponse = processedParts.filter((text) => text).join('\n')
|
||||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else if (response.content === '') {
|
||||
// Empty response content, this could happen when there is only image data
|
||||
finalResponse = ''
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
|
|
@ -1309,13 +1131,10 @@ class Agent_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
// Extract artifacts from annotations in response metadata and replace inline data
|
||||
// Extract artifacts from annotations in response metadata
|
||||
if (response.response_metadata) {
|
||||
const {
|
||||
artifacts: extractedArtifacts,
|
||||
fileAnnotations: extractedFileAnnotations,
|
||||
savedInlineImages
|
||||
} = await extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
|
||||
const { artifacts: extractedArtifacts, fileAnnotations: extractedFileAnnotations } =
|
||||
await this.extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
|
||||
if (extractedArtifacts.length > 0) {
|
||||
artifacts = [...artifacts, ...extractedArtifacts]
|
||||
|
||||
|
|
@ -1333,11 +1152,6 @@ class Agent_Agentflow implements INode {
|
|||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
}
|
||||
|
||||
// Replace inlineData base64 with file references in the response
|
||||
if (savedInlineImages && savedInlineImages.length > 0) {
|
||||
replaceInlineDataWithFileReferences(response, savedInlineImages)
|
||||
}
|
||||
}
|
||||
|
||||
// Replace sandbox links with proper download URLs. Example: [Download the script](sandbox:/mnt/data/dummy_bar_graph.py)
|
||||
|
|
@ -1345,23 +1159,6 @@ class Agent_Agentflow implements INode {
|
|||
finalResponse = await this.processSandboxLinks(finalResponse, options.baseURL, options.chatflowid, chatId)
|
||||
}
|
||||
|
||||
// If is structured output, then invoke LLM again with structured output at the very end after all tool calls
|
||||
if (isStructuredOutput) {
|
||||
llmNodeInstance = configureStructuredOutput(llmNodeInstance, _agentStructuredOutput)
|
||||
const prompt = 'Convert the following response to the structured output format: ' + finalResponse
|
||||
response = await llmNodeInstance.invoke(prompt, { signal: abortController?.signal })
|
||||
|
||||
if (typeof response === 'object') {
|
||||
finalResponse = '```json\n' + JSON.stringify(response, null, 2) + '\n```'
|
||||
} else {
|
||||
finalResponse = response
|
||||
}
|
||||
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamTokenEvent(chatId, finalResponse)
|
||||
}
|
||||
}
|
||||
|
||||
const output = this.prepareOutputObject(
|
||||
response,
|
||||
availableTools,
|
||||
|
|
@ -1374,8 +1171,7 @@ class Agent_Agentflow implements INode {
|
|||
artifacts,
|
||||
additionalTokens,
|
||||
isWaitingForHumanInput,
|
||||
fileAnnotations,
|
||||
isStructuredOutput
|
||||
fileAnnotations
|
||||
)
|
||||
|
||||
// End analytics tracking
|
||||
|
|
@ -1396,15 +1192,9 @@ class Agent_Agentflow implements INode {
|
|||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, finalResponse)
|
||||
|
||||
/**
|
||||
* Remove the temporarily added image artifact messages before storing
|
||||
* This is to avoid storing the actual base64 data into database
|
||||
*/
|
||||
const messagesToStore = messages.filter((msg: any) => !msg._isTemporaryImageMessage)
|
||||
|
||||
// Replace the actual messages array with one that includes the file references for images instead of base64 data
|
||||
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
|
||||
messagesToStore,
|
||||
messages,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
)
|
||||
|
|
@ -1543,12 +1333,7 @@ class Agent_Agentflow implements INode {
|
|||
// Handle Gemini googleSearch tool
|
||||
if (groundingMetadata && groundingMetadata.webSearchQueries && Array.isArray(groundingMetadata.webSearchQueries)) {
|
||||
// Check for duplicates
|
||||
const isDuplicate = builtInUsedTools.find(
|
||||
(tool) =>
|
||||
tool.tool === 'googleSearch' &&
|
||||
JSON.stringify((tool.toolInput as any)?.queries) === JSON.stringify(groundingMetadata.webSearchQueries)
|
||||
)
|
||||
if (!isDuplicate) {
|
||||
if (!builtInUsedTools.find((tool) => tool.tool === 'googleSearch')) {
|
||||
builtInUsedTools.push({
|
||||
tool: 'googleSearch',
|
||||
toolInput: {
|
||||
|
|
@ -1562,12 +1347,7 @@ class Agent_Agentflow implements INode {
|
|||
// Handle Gemini urlContext tool
|
||||
if (urlContextMetadata && urlContextMetadata.urlMetadata && Array.isArray(urlContextMetadata.urlMetadata)) {
|
||||
// Check for duplicates
|
||||
const isDuplicate = builtInUsedTools.find(
|
||||
(tool) =>
|
||||
tool.tool === 'urlContext' &&
|
||||
JSON.stringify((tool.toolInput as any)?.urlMetadata) === JSON.stringify(urlContextMetadata.urlMetadata)
|
||||
)
|
||||
if (!isDuplicate) {
|
||||
if (!builtInUsedTools.find((tool) => tool.tool === 'urlContext')) {
|
||||
builtInUsedTools.push({
|
||||
tool: 'urlContext',
|
||||
toolInput: {
|
||||
|
|
@ -1578,55 +1358,47 @@ class Agent_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
// Handle Gemini codeExecution tool
|
||||
if (response.content && Array.isArray(response.content)) {
|
||||
for (let i = 0; i < response.content.length; i++) {
|
||||
const item = response.content[i]
|
||||
|
||||
if (item.type === 'executableCode' && item.executableCode) {
|
||||
const language = item.executableCode.language || 'PYTHON'
|
||||
const code = item.executableCode.code || ''
|
||||
let toolOutput = ''
|
||||
|
||||
// Check for duplicates
|
||||
const isDuplicate = builtInUsedTools.find(
|
||||
(tool) =>
|
||||
tool.tool === 'codeExecution' &&
|
||||
(tool.toolInput as any)?.language === language &&
|
||||
(tool.toolInput as any)?.code === code
|
||||
)
|
||||
if (isDuplicate) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check the next item for the output
|
||||
const nextItem = i + 1 < response.content.length ? response.content[i + 1] : null
|
||||
|
||||
if (nextItem) {
|
||||
if (nextItem.type === 'codeExecutionResult' && nextItem.codeExecutionResult) {
|
||||
const outcome = nextItem.codeExecutionResult.outcome
|
||||
const output = nextItem.codeExecutionResult.output || ''
|
||||
toolOutput = outcome === 'OUTCOME_OK' ? output : `Error: ${output}`
|
||||
} else if (nextItem.type === 'inlineData') {
|
||||
toolOutput = 'Generated image data'
|
||||
}
|
||||
}
|
||||
|
||||
builtInUsedTools.push({
|
||||
tool: 'codeExecution',
|
||||
toolInput: {
|
||||
language,
|
||||
code
|
||||
},
|
||||
toolOutput
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return builtInUsedTools
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves base64 image data to storage and returns file information
|
||||
*/
|
||||
private async saveBase64Image(
|
||||
outputItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> {
|
||||
try {
|
||||
if (!outputItem.result) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = outputItem.result
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension and MIME type
|
||||
const outputFormat = outputItem.output_format || 'png'
|
||||
const fileName = `generated_image_${outputItem.id || Date.now()}.${outputFormat}`
|
||||
const mimeType = outputFormat === 'png' ? 'image/png' : 'image/jpeg'
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving base64 image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles memory management based on the specified memory type
|
||||
*/
|
||||
|
|
@ -1789,62 +1561,32 @@ class Agent_Agentflow implements INode {
|
|||
llmNodeInstance: BaseChatModel,
|
||||
messages: BaseMessageLike[],
|
||||
chatId: string,
|
||||
abortController: AbortController,
|
||||
isStructuredOutput: boolean = false
|
||||
abortController: AbortController
|
||||
): Promise<AIMessageChunk> {
|
||||
let response = new AIMessageChunk('')
|
||||
|
||||
try {
|
||||
for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) {
|
||||
if (sseStreamer && !isStructuredOutput) {
|
||||
if (sseStreamer) {
|
||||
let content = ''
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
content = chunk
|
||||
} else if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
content = chunk.content
|
||||
.map((item: any) => {
|
||||
if ((item.text && !item.type) || (item.type === 'text' && item.text)) {
|
||||
return item.text
|
||||
} else if (item.type === 'executableCode' && item.executableCode) {
|
||||
const language = item.executableCode.language?.toLowerCase() || 'python'
|
||||
return `\n\`\`\`${language}\n${item.executableCode.code}\n\`\`\`\n`
|
||||
} else if (item.type === 'codeExecutionResult' && item.codeExecutionResult) {
|
||||
const outcome = item.codeExecutionResult.outcome || 'OUTCOME_OK'
|
||||
const output = item.codeExecutionResult.output || ''
|
||||
if (outcome === 'OUTCOME_OK' && output) {
|
||||
return `**Code Output:**\n\`\`\`\n${output}\n\`\`\`\n`
|
||||
} else if (outcome !== 'OUTCOME_OK') {
|
||||
return `**Code Execution Error:**\n\`\`\`\n${output}\n\`\`\`\n`
|
||||
}
|
||||
}
|
||||
return ''
|
||||
})
|
||||
.filter((text: string) => text)
|
||||
.join('')
|
||||
} else if (chunk.content) {
|
||||
if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
const contents = chunk.content as MessageContentText[]
|
||||
content = contents.map((item) => item.text).join('')
|
||||
} else {
|
||||
content = chunk.content.toString()
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
}
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
response = response.concat(chunk)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during streaming:', error)
|
||||
throw error
|
||||
}
|
||||
|
||||
// Only convert to string if all content items are text (no inlineData or other special types)
|
||||
if (Array.isArray(response.content) && response.content.length > 0) {
|
||||
const hasNonTextContent = response.content.some(
|
||||
(item: any) => item.type === 'inlineData' || item.type === 'executableCode' || item.type === 'codeExecutionResult'
|
||||
)
|
||||
if (!hasNonTextContent) {
|
||||
const responseContents = response.content as MessageContentText[]
|
||||
response.content = responseContents.map((item) => item.text).join('')
|
||||
}
|
||||
const responseContents = response.content as MessageContentText[]
|
||||
response.content = responseContents.map((item) => item.text).join('')
|
||||
}
|
||||
return response
|
||||
}
|
||||
|
|
@ -1864,8 +1606,7 @@ class Agent_Agentflow implements INode {
|
|||
artifacts: any[],
|
||||
additionalTokens: number = 0,
|
||||
isWaitingForHumanInput: boolean = false,
|
||||
fileAnnotations: any[] = [],
|
||||
isStructuredOutput: boolean = false
|
||||
fileAnnotations: any[] = []
|
||||
): any {
|
||||
const output: any = {
|
||||
content: finalResponse,
|
||||
|
|
@ -1900,15 +1641,6 @@ class Agent_Agentflow implements INode {
|
|||
output.responseMetadata = response.response_metadata
|
||||
}
|
||||
|
||||
if (isStructuredOutput && typeof response === 'object') {
|
||||
const structuredOutput = response as Record<string, any>
|
||||
for (const key in structuredOutput) {
|
||||
if (structuredOutput[key] !== undefined && structuredOutput[key] !== null) {
|
||||
output[key] = structuredOutput[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add used tools, source documents and artifacts to output
|
||||
if (usedTools && usedTools.length > 0) {
|
||||
output.usedTools = flatten(usedTools)
|
||||
|
|
@ -1974,8 +1706,7 @@ class Agent_Agentflow implements INode {
|
|||
llmNodeInstance,
|
||||
isStreamable,
|
||||
isLastNode,
|
||||
iterationContext,
|
||||
isStructuredOutput = false
|
||||
iterationContext
|
||||
}: {
|
||||
response: AIMessageChunk
|
||||
messages: BaseMessageLike[]
|
||||
|
|
@ -1989,7 +1720,6 @@ class Agent_Agentflow implements INode {
|
|||
isStreamable: boolean
|
||||
isLastNode: boolean
|
||||
iterationContext: ICommonObject
|
||||
isStructuredOutput?: boolean
|
||||
}): Promise<{
|
||||
response: AIMessageChunk
|
||||
usedTools: IUsedTool[]
|
||||
|
|
@ -2069,9 +1799,7 @@ class Agent_Agentflow implements INode {
|
|||
const toolCallDetails = '```json\n' + JSON.stringify(toolCall, null, 2) + '\n```'
|
||||
const responseContent = response.content + `\nAttempting to use tool:\n${toolCallDetails}`
|
||||
response.content = responseContent
|
||||
if (!isStructuredOutput) {
|
||||
sseStreamer?.streamTokenEvent(chatId, responseContent)
|
||||
}
|
||||
sseStreamer?.streamTokenEvent(chatId, responseContent)
|
||||
return { response, usedTools, sourceDocuments, artifacts, totalTokens, isWaitingForHumanInput: true }
|
||||
}
|
||||
|
||||
|
|
@ -2177,7 +1905,7 @@ class Agent_Agentflow implements INode {
|
|||
const lastToolOutput = usedTools[0]?.toolOutput || ''
|
||||
const lastToolOutputString = typeof lastToolOutput === 'string' ? lastToolOutput : JSON.stringify(lastToolOutput, null, 2)
|
||||
|
||||
if (sseStreamer && !isStructuredOutput) {
|
||||
if (sseStreamer) {
|
||||
sseStreamer.streamTokenEvent(chatId, lastToolOutputString)
|
||||
}
|
||||
|
||||
|
|
@ -2206,19 +1934,12 @@ class Agent_Agentflow implements INode {
|
|||
let newResponse: AIMessageChunk
|
||||
|
||||
if (isStreamable) {
|
||||
newResponse = await this.handleStreamingResponse(
|
||||
sseStreamer,
|
||||
llmNodeInstance,
|
||||
messages,
|
||||
chatId,
|
||||
abortController,
|
||||
isStructuredOutput
|
||||
)
|
||||
newResponse = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
newResponse = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
|
||||
|
||||
// Stream non-streaming response if this is the last node
|
||||
if (isLastNode && sseStreamer && !isStructuredOutput) {
|
||||
if (isLastNode && sseStreamer) {
|
||||
let responseContent = JSON.stringify(newResponse, null, 2)
|
||||
if (typeof newResponse.content === 'string') {
|
||||
responseContent = newResponse.content
|
||||
|
|
@ -2253,8 +1974,7 @@ class Agent_Agentflow implements INode {
|
|||
llmNodeInstance,
|
||||
isStreamable,
|
||||
isLastNode,
|
||||
iterationContext,
|
||||
isStructuredOutput
|
||||
iterationContext
|
||||
})
|
||||
|
||||
// Merge results from recursive tool calls
|
||||
|
|
@ -2285,8 +2005,7 @@ class Agent_Agentflow implements INode {
|
|||
llmWithoutToolsBind,
|
||||
isStreamable,
|
||||
isLastNode,
|
||||
iterationContext,
|
||||
isStructuredOutput = false
|
||||
iterationContext
|
||||
}: {
|
||||
humanInput: IHumanInput
|
||||
humanInputAction: Record<string, any> | undefined
|
||||
|
|
@ -2301,7 +2020,6 @@ class Agent_Agentflow implements INode {
|
|||
isStreamable: boolean
|
||||
isLastNode: boolean
|
||||
iterationContext: ICommonObject
|
||||
isStructuredOutput?: boolean
|
||||
}): Promise<{
|
||||
response: AIMessageChunk
|
||||
usedTools: IUsedTool[]
|
||||
|
|
@ -2504,7 +2222,7 @@ class Agent_Agentflow implements INode {
|
|||
const lastToolOutput = usedTools[0]?.toolOutput || ''
|
||||
const lastToolOutputString = typeof lastToolOutput === 'string' ? lastToolOutput : JSON.stringify(lastToolOutput, null, 2)
|
||||
|
||||
if (sseStreamer && !isStructuredOutput) {
|
||||
if (sseStreamer) {
|
||||
sseStreamer.streamTokenEvent(chatId, lastToolOutputString)
|
||||
}
|
||||
|
||||
|
|
@ -2535,19 +2253,12 @@ class Agent_Agentflow implements INode {
|
|||
}
|
||||
|
||||
if (isStreamable) {
|
||||
newResponse = await this.handleStreamingResponse(
|
||||
sseStreamer,
|
||||
llmNodeInstance,
|
||||
messages,
|
||||
chatId,
|
||||
abortController,
|
||||
isStructuredOutput
|
||||
)
|
||||
newResponse = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
newResponse = await llmNodeInstance.invoke(messages, { signal: abortController?.signal })
|
||||
|
||||
// Stream non-streaming response if this is the last node
|
||||
if (isLastNode && sseStreamer && !isStructuredOutput) {
|
||||
if (isLastNode && sseStreamer) {
|
||||
let responseContent = JSON.stringify(newResponse, null, 2)
|
||||
if (typeof newResponse.content === 'string') {
|
||||
responseContent = newResponse.content
|
||||
|
|
@ -2582,8 +2293,7 @@ class Agent_Agentflow implements INode {
|
|||
llmNodeInstance,
|
||||
isStreamable,
|
||||
isLastNode,
|
||||
iterationContext,
|
||||
isStructuredOutput
|
||||
iterationContext
|
||||
})
|
||||
|
||||
// Merge results from recursive tool calls
|
||||
|
|
@ -2598,6 +2308,190 @@ class Agent_Agentflow implements INode {
|
|||
return { response: newResponse, usedTools, sourceDocuments, artifacts, totalTokens, isWaitingForHumanInput }
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts artifacts from response metadata (both annotations and built-in tools)
|
||||
*/
|
||||
private async extractArtifactsFromResponse(
|
||||
responseMetadata: any,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{ artifacts: any[]; fileAnnotations: any[] }> {
|
||||
const artifacts: any[] = []
|
||||
const fileAnnotations: any[] = []
|
||||
|
||||
if (!responseMetadata?.output || !Array.isArray(responseMetadata.output)) {
|
||||
return { artifacts, fileAnnotations }
|
||||
}
|
||||
|
||||
for (const outputItem of responseMetadata.output) {
|
||||
// Handle container file citations from annotations
|
||||
if (outputItem.type === 'message' && outputItem.content && Array.isArray(outputItem.content)) {
|
||||
for (const contentItem of outputItem.content) {
|
||||
if (contentItem.annotations && Array.isArray(contentItem.annotations)) {
|
||||
for (const annotation of contentItem.annotations) {
|
||||
if (annotation.type === 'container_file_citation' && annotation.file_id && annotation.filename) {
|
||||
try {
|
||||
// Download and store the file content
|
||||
const downloadResult = await this.downloadContainerFile(
|
||||
annotation.container_id,
|
||||
annotation.file_id,
|
||||
annotation.filename,
|
||||
modelNodeData,
|
||||
options
|
||||
)
|
||||
|
||||
if (downloadResult) {
|
||||
const fileType = this.getArtifactTypeFromFilename(annotation.filename)
|
||||
|
||||
if (fileType === 'png' || fileType === 'jpeg' || fileType === 'jpg') {
|
||||
const artifact = {
|
||||
type: fileType,
|
||||
data: downloadResult.filePath
|
||||
}
|
||||
|
||||
artifacts.push(artifact)
|
||||
} else {
|
||||
fileAnnotations.push({
|
||||
filePath: downloadResult.filePath,
|
||||
fileName: annotation.filename
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing annotation:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle built-in tool artifacts (like image generation)
|
||||
if (outputItem.type === 'image_generation_call' && outputItem.result) {
|
||||
try {
|
||||
const savedImageResult = await this.saveBase64Image(outputItem, options)
|
||||
if (savedImageResult) {
|
||||
// Replace the base64 result with the file path in the response metadata
|
||||
outputItem.result = savedImageResult.filePath
|
||||
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = this.getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing image generation artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { artifacts, fileAnnotations }
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads file content from container file citation
|
||||
*/
|
||||
private async downloadContainerFile(
|
||||
containerId: string,
|
||||
fileId: string,
|
||||
filename: string,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; totalSize: number } | null> {
|
||||
try {
|
||||
const credentialData = await getCredentialData(modelNodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, modelNodeData)
|
||||
|
||||
if (!openAIApiKey) {
|
||||
console.warn('No OpenAI API key available for downloading container file')
|
||||
return null
|
||||
}
|
||||
|
||||
// Download the file using OpenAI Container API
|
||||
const response = await fetch(`https://api.openai.com/v1/containers/${containerId}/files/${fileId}/content`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: '*/*',
|
||||
Authorization: `Bearer ${openAIApiKey}`
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`Failed to download container file ${fileId} from container ${containerId}: ${response.status} ${response.statusText}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
const dataBuffer = Buffer.from(data)
|
||||
const mimeType = this.getMimeTypeFromFilename(filename)
|
||||
|
||||
// Store the file using the same storage utility as OpenAIAssistant
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
dataBuffer,
|
||||
filename,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error downloading container file:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets MIME type from filename extension
|
||||
*/
|
||||
private getMimeTypeFromFilename(filename: string): string {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const mimeTypes: { [key: string]: string } = {
|
||||
png: 'image/png',
|
||||
jpg: 'image/jpeg',
|
||||
jpeg: 'image/jpeg',
|
||||
gif: 'image/gif',
|
||||
pdf: 'application/pdf',
|
||||
txt: 'text/plain',
|
||||
csv: 'text/csv',
|
||||
json: 'application/json',
|
||||
html: 'text/html',
|
||||
xml: 'application/xml'
|
||||
}
|
||||
return mimeTypes[extension || ''] || 'application/octet-stream'
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets artifact type from filename extension for UI rendering
|
||||
*/
|
||||
private getArtifactTypeFromFilename(filename: string): string {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const artifactTypes: { [key: string]: string } = {
|
||||
png: 'png',
|
||||
jpg: 'jpeg',
|
||||
jpeg: 'jpeg',
|
||||
html: 'html',
|
||||
htm: 'html',
|
||||
md: 'markdown',
|
||||
markdown: 'markdown',
|
||||
json: 'json',
|
||||
js: 'javascript',
|
||||
javascript: 'javascript',
|
||||
tex: 'latex',
|
||||
latex: 'latex',
|
||||
txt: 'text',
|
||||
csv: 'text',
|
||||
pdf: 'text'
|
||||
}
|
||||
return artifactTypes[extension || ''] || 'text'
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes sandbox links in the response text and converts them to file annotations
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -317,7 +317,7 @@ class Condition_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
// If no condition is fulfilled, add isFulfilled to the ELSE condition
|
||||
// If no condition is fullfilled, add isFulfilled to the ELSE condition
|
||||
const dummyElseConditionData = {
|
||||
type: 'string',
|
||||
value1: '',
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Custom Function'
|
||||
this.name = 'customFunctionAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.0
|
||||
this.type = 'CustomFunction'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute custom function'
|
||||
|
|
@ -107,7 +107,8 @@ class CustomFunction_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -133,7 +134,7 @@ class CustomFunction_Agentflow implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const javascriptFunction = nodeData.inputs?.customFunctionJavascriptFunction as string
|
||||
const functionInputVariables = (nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]) ?? []
|
||||
const functionInputVariables = nodeData.inputs?.customFunctionInputVariables as ICustomFunctionInputVariables[]
|
||||
const _customFunctionUpdateState = nodeData.inputs?.customFunctionUpdateState
|
||||
|
||||
const state = options.agentflowRuntime?.state as ICommonObject
|
||||
|
|
@ -146,17 +147,11 @@ class CustomFunction_Agentflow implements INode {
|
|||
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, options)
|
||||
const flow = {
|
||||
input,
|
||||
state,
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
rawOutput: options.postProcessing?.rawOutput || '',
|
||||
chatHistory: options.postProcessing?.chatHistory || [],
|
||||
sourceDocuments: options.postProcessing?.sourceDocuments,
|
||||
usedTools: options.postProcessing?.usedTools,
|
||||
artifacts: options.postProcessing?.artifacts,
|
||||
fileAnnotations: options.postProcessing?.fileAnnotations
|
||||
input,
|
||||
state
|
||||
}
|
||||
|
||||
// Create additional sandbox variables for custom function inputs
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Execute Flow'
|
||||
this.name = 'executeFlowAgentflow'
|
||||
this.version = 1.2
|
||||
this.version = 1.1
|
||||
this.type = 'ExecuteFlow'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Execute another flow'
|
||||
|
|
@ -102,7 +102,8 @@ class ExecuteFlow_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
|
|||
|
|
@ -241,11 +241,8 @@ class HumanInput_Agentflow implements INode {
|
|||
if (isStreamable) {
|
||||
const sseStreamer: IServerSideEventStreamer = options.sseStreamer as IServerSideEventStreamer
|
||||
for await (const chunk of await llmNodeInstance.stream(messages)) {
|
||||
const content = typeof chunk === 'string' ? chunk : chunk.content.toString()
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
sseStreamer.streamTokenEvent(chatId, chunk.content.toString())
|
||||
response = response.concat(chunk)
|
||||
}
|
||||
humanInputDescription = response.content as string
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -2,19 +2,17 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { ICommonObject, IMessage, INode, INodeData, INodeOptionsValue, INodeParams, IServerSideEventStreamer } from '../../../src/Interface'
|
||||
import { AIMessageChunk, BaseMessageLike, MessageContentText } from '@langchain/core/messages'
|
||||
import { DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt'
|
||||
import { z } from 'zod'
|
||||
import { AnalyticHandler } from '../../../src/handler'
|
||||
import { ILLMMessage } from '../Interface.Agentflow'
|
||||
import { ILLMMessage, IStructuredOutput } from '../Interface.Agentflow'
|
||||
import {
|
||||
addImageArtifactsToMessages,
|
||||
extractArtifactsFromResponse,
|
||||
getPastChatHistoryImageMessages,
|
||||
getUniqueImageMessages,
|
||||
processMessagesWithImages,
|
||||
replaceBase64ImagesWithFileReferences,
|
||||
replaceInlineDataWithFileReferences,
|
||||
updateFlowState
|
||||
} from '../utils'
|
||||
import { processTemplateVariables, configureStructuredOutput } from '../../../src/utils'
|
||||
import { processTemplateVariables } from '../../../src/utils'
|
||||
import { flatten } from 'lodash'
|
||||
|
||||
class LLM_Agentflow implements INode {
|
||||
|
|
@ -34,7 +32,7 @@ class LLM_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'LLM'
|
||||
this.name = 'llmAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.0
|
||||
this.type = 'LLM'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Large language models to analyze user-provided inputs and generate responses'
|
||||
|
|
@ -290,7 +288,8 @@ class LLM_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
@ -450,16 +449,10 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
delete nodeData.inputs?.llmMessages
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant responses as user messages
|
||||
* Images are converted from FILE-STORAGE::<image_path> to base 64 image_url format
|
||||
*/
|
||||
await addImageArtifactsToMessages(messages, options)
|
||||
|
||||
// Configure structured output if specified
|
||||
const isStructuredOutput = _llmStructuredOutput && Array.isArray(_llmStructuredOutput) && _llmStructuredOutput.length > 0
|
||||
if (isStructuredOutput) {
|
||||
llmNodeInstance = configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
llmNodeInstance = this.configureStructuredOutput(llmNodeInstance, _llmStructuredOutput)
|
||||
}
|
||||
|
||||
// Initialize response and determine if streaming is possible
|
||||
|
|
@ -475,11 +468,9 @@ class LLM_Agentflow implements INode {
|
|||
|
||||
// Track execution time
|
||||
const startTime = Date.now()
|
||||
|
||||
const sseStreamer: IServerSideEventStreamer | undefined = options.sseStreamer
|
||||
|
||||
/*
|
||||
* Invoke LLM
|
||||
*/
|
||||
if (isStreamable) {
|
||||
response = await this.handleStreamingResponse(sseStreamer, llmNodeInstance, messages, chatId, abortController)
|
||||
} else {
|
||||
|
|
@ -504,40 +495,6 @@ class LLM_Agentflow implements INode {
|
|||
const endTime = Date.now()
|
||||
const timeDelta = endTime - startTime
|
||||
|
||||
// Extract artifacts and file annotations from response metadata
|
||||
let artifacts: any[] = []
|
||||
let fileAnnotations: any[] = []
|
||||
if (response.response_metadata) {
|
||||
const {
|
||||
artifacts: extractedArtifacts,
|
||||
fileAnnotations: extractedFileAnnotations,
|
||||
savedInlineImages
|
||||
} = await extractArtifactsFromResponse(response.response_metadata, newNodeData, options)
|
||||
|
||||
if (extractedArtifacts.length > 0) {
|
||||
artifacts = extractedArtifacts
|
||||
|
||||
// Stream artifacts if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamArtifactsEvent(chatId, artifacts)
|
||||
}
|
||||
}
|
||||
|
||||
if (extractedFileAnnotations.length > 0) {
|
||||
fileAnnotations = extractedFileAnnotations
|
||||
|
||||
// Stream file annotations if this is the last node
|
||||
if (isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
}
|
||||
|
||||
// Replace inlineData base64 with file references in the response
|
||||
if (savedInlineImages && savedInlineImages.length > 0) {
|
||||
replaceInlineDataWithFileReferences(response, savedInlineImages)
|
||||
}
|
||||
}
|
||||
|
||||
// Update flow state if needed
|
||||
let newState = { ...state }
|
||||
if (_llmUpdateState && Array.isArray(_llmUpdateState) && _llmUpdateState.length > 0) {
|
||||
|
|
@ -557,22 +514,10 @@ class LLM_Agentflow implements INode {
|
|||
finalResponse = response.content.map((item: any) => item.text).join('\n')
|
||||
} else if (response.content && typeof response.content === 'string') {
|
||||
finalResponse = response.content
|
||||
} else if (response.content === '') {
|
||||
// Empty response content, this could happen when there is only image data
|
||||
finalResponse = ''
|
||||
} else {
|
||||
finalResponse = JSON.stringify(response, null, 2)
|
||||
}
|
||||
const output = this.prepareOutputObject(
|
||||
response,
|
||||
finalResponse,
|
||||
startTime,
|
||||
endTime,
|
||||
timeDelta,
|
||||
isStructuredOutput,
|
||||
artifacts,
|
||||
fileAnnotations
|
||||
)
|
||||
const output = this.prepareOutputObject(response, finalResponse, startTime, endTime, timeDelta, isStructuredOutput)
|
||||
|
||||
// End analytics tracking
|
||||
if (analyticHandlers && llmIds) {
|
||||
|
|
@ -584,23 +529,12 @@ class LLM_Agentflow implements INode {
|
|||
this.sendStreamingEvents(options, chatId, response)
|
||||
}
|
||||
|
||||
// Stream file annotations if any were extracted
|
||||
if (fileAnnotations.length > 0 && isLastNode && sseStreamer) {
|
||||
sseStreamer.streamFileAnnotationsEvent(chatId, fileAnnotations)
|
||||
}
|
||||
|
||||
// Process template variables in state
|
||||
newState = processTemplateVariables(newState, finalResponse)
|
||||
|
||||
/**
|
||||
* Remove the temporarily added image artifact messages before storing
|
||||
* This is to avoid storing the actual base64 data into database
|
||||
*/
|
||||
const messagesToStore = messages.filter((msg: any) => !msg._isTemporaryImageMessage)
|
||||
|
||||
// Replace the actual messages array with one that includes the file references for images instead of base64 data
|
||||
const messagesWithFileReferences = replaceBase64ImagesWithFileReferences(
|
||||
messagesToStore,
|
||||
messages,
|
||||
runtimeImageMessagesWithFileRef,
|
||||
pastImageMessagesWithFileRef
|
||||
)
|
||||
|
|
@ -651,13 +585,7 @@ class LLM_Agentflow implements INode {
|
|||
{
|
||||
role: returnRole,
|
||||
content: finalResponse,
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id,
|
||||
...(((artifacts && artifacts.length > 0) || (fileAnnotations && fileAnnotations.length > 0)) && {
|
||||
additional_kwargs: {
|
||||
...(artifacts && artifacts.length > 0 && { artifacts }),
|
||||
...(fileAnnotations && fileAnnotations.length > 0 && { fileAnnotations })
|
||||
}
|
||||
})
|
||||
name: nodeData?.label ? nodeData?.label.toLowerCase().replace(/\s/g, '_').trim() : nodeData?.id
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -827,6 +755,59 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures structured output for the LLM
|
||||
*/
|
||||
private configureStructuredOutput(llmNodeInstance: BaseChatModel, llmStructuredOutput: IStructuredOutput[]): BaseChatModel {
|
||||
try {
|
||||
const zodObj: ICommonObject = {}
|
||||
for (const sch of llmStructuredOutput) {
|
||||
if (sch.type === 'string') {
|
||||
zodObj[sch.key] = z.string().describe(sch.description || '')
|
||||
} else if (sch.type === 'stringArray') {
|
||||
zodObj[sch.key] = z.array(z.string()).describe(sch.description || '')
|
||||
} else if (sch.type === 'number') {
|
||||
zodObj[sch.key] = z.number().describe(sch.description || '')
|
||||
} else if (sch.type === 'boolean') {
|
||||
zodObj[sch.key] = z.boolean().describe(sch.description || '')
|
||||
} else if (sch.type === 'enum') {
|
||||
const enumValues = sch.enumValues?.split(',').map((item: string) => item.trim()) || []
|
||||
zodObj[sch.key] = z
|
||||
.enum(enumValues.length ? (enumValues as [string, ...string[]]) : ['default'])
|
||||
.describe(sch.description || '')
|
||||
} else if (sch.type === 'jsonArray') {
|
||||
const jsonSchema = sch.jsonSchema
|
||||
if (jsonSchema) {
|
||||
try {
|
||||
// Parse the JSON schema
|
||||
const schemaObj = JSON.parse(jsonSchema)
|
||||
|
||||
// Create a Zod schema from the JSON schema
|
||||
const itemSchema = this.createZodSchemaFromJSON(schemaObj)
|
||||
|
||||
// Create an array schema of the item schema
|
||||
zodObj[sch.key] = z.array(itemSchema).describe(sch.description || '')
|
||||
} catch (err) {
|
||||
console.error(`Error parsing JSON schema for ${sch.key}:`, err)
|
||||
// Fallback to generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
} else {
|
||||
// If no schema provided, use generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
}
|
||||
}
|
||||
const structuredOutput = z.object(zodObj)
|
||||
|
||||
// @ts-ignore
|
||||
return llmNodeInstance.withStructuredOutput(structuredOutput)
|
||||
} catch (exception) {
|
||||
console.error(exception)
|
||||
return llmNodeInstance
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handles streaming response from the LLM
|
||||
*/
|
||||
|
|
@ -843,20 +824,16 @@ class LLM_Agentflow implements INode {
|
|||
for await (const chunk of await llmNodeInstance.stream(messages, { signal: abortController?.signal })) {
|
||||
if (sseStreamer) {
|
||||
let content = ''
|
||||
|
||||
if (typeof chunk === 'string') {
|
||||
content = chunk
|
||||
} else if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
if (Array.isArray(chunk.content) && chunk.content.length > 0) {
|
||||
const contents = chunk.content as MessageContentText[]
|
||||
content = contents.map((item) => item.text).join('')
|
||||
} else if (chunk.content) {
|
||||
} else {
|
||||
content = chunk.content.toString()
|
||||
}
|
||||
sseStreamer.streamTokenEvent(chatId, content)
|
||||
}
|
||||
|
||||
const messageChunk = typeof chunk === 'string' ? new AIMessageChunk(chunk) : chunk
|
||||
response = response.concat(messageChunk)
|
||||
response = response.concat(chunk)
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error during streaming:', error)
|
||||
|
|
@ -878,9 +855,7 @@ class LLM_Agentflow implements INode {
|
|||
startTime: number,
|
||||
endTime: number,
|
||||
timeDelta: number,
|
||||
isStructuredOutput: boolean,
|
||||
artifacts: any[] = [],
|
||||
fileAnnotations: any[] = []
|
||||
isStructuredOutput: boolean
|
||||
): any {
|
||||
const output: any = {
|
||||
content: finalResponse,
|
||||
|
|
@ -899,10 +874,6 @@ class LLM_Agentflow implements INode {
|
|||
output.usageMetadata = response.usage_metadata
|
||||
}
|
||||
|
||||
if (response.response_metadata) {
|
||||
output.responseMetadata = response.response_metadata
|
||||
}
|
||||
|
||||
if (isStructuredOutput && typeof response === 'object') {
|
||||
const structuredOutput = response as Record<string, any>
|
||||
for (const key in structuredOutput) {
|
||||
|
|
@ -912,14 +883,6 @@ class LLM_Agentflow implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
if (artifacts && artifacts.length > 0) {
|
||||
output.artifacts = flatten(artifacts)
|
||||
}
|
||||
|
||||
if (fileAnnotations && fileAnnotations.length > 0) {
|
||||
output.fileAnnotations = fileAnnotations
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
|
|
@ -944,6 +907,107 @@ class LLM_Agentflow implements INode {
|
|||
|
||||
sseStreamer.streamEndEvent(chatId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Zod schema from a JSON schema object
|
||||
* @param jsonSchema The JSON schema object
|
||||
* @returns A Zod schema
|
||||
*/
|
||||
private createZodSchemaFromJSON(jsonSchema: any): z.ZodTypeAny {
|
||||
// If the schema is an object with properties, create an object schema
|
||||
if (typeof jsonSchema === 'object' && jsonSchema !== null) {
|
||||
const schemaObj: Record<string, z.ZodTypeAny> = {}
|
||||
|
||||
// Process each property in the schema
|
||||
for (const [key, value] of Object.entries(jsonSchema)) {
|
||||
if (value === null) {
|
||||
// Handle null values
|
||||
schemaObj[key] = z.null()
|
||||
} else if (typeof value === 'object' && !Array.isArray(value)) {
|
||||
// Check if the property has a type definition
|
||||
if ('type' in value) {
|
||||
const type = value.type as string
|
||||
const description = ('description' in value ? (value.description as string) : '') || ''
|
||||
|
||||
// Create the appropriate Zod type based on the type property
|
||||
if (type === 'string') {
|
||||
schemaObj[key] = z.string().describe(description)
|
||||
} else if (type === 'number') {
|
||||
schemaObj[key] = z.number().describe(description)
|
||||
} else if (type === 'boolean') {
|
||||
schemaObj[key] = z.boolean().describe(description)
|
||||
} else if (type === 'array') {
|
||||
// If it's an array type, check if items is defined
|
||||
if ('items' in value && value.items) {
|
||||
const itemSchema = this.createZodSchemaFromJSON(value.items)
|
||||
schemaObj[key] = z.array(itemSchema).describe(description)
|
||||
} else {
|
||||
// Default to array of any if items not specified
|
||||
schemaObj[key] = z.array(z.any()).describe(description)
|
||||
}
|
||||
} else if (type === 'object') {
|
||||
// If it's an object type, check if properties is defined
|
||||
if ('properties' in value && value.properties) {
|
||||
const nestedSchema = this.createZodSchemaFromJSON(value.properties)
|
||||
schemaObj[key] = nestedSchema.describe(description)
|
||||
} else {
|
||||
// Default to record of any if properties not specified
|
||||
schemaObj[key] = z.record(z.any()).describe(description)
|
||||
}
|
||||
} else {
|
||||
// Default to any for unknown types
|
||||
schemaObj[key] = z.any().describe(description)
|
||||
}
|
||||
|
||||
// Check if the property is optional
|
||||
if ('optional' in value && value.optional === true) {
|
||||
schemaObj[key] = schemaObj[key].optional()
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values without a type property
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = this.createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// It's a nested object without a type property, recursively create schema
|
||||
schemaObj[key] = this.createZodSchemaFromJSON(value)
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = this.createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// For primitive values (which shouldn't be in the schema directly)
|
||||
// Use the corresponding Zod type
|
||||
if (typeof value === 'string') {
|
||||
schemaObj[key] = z.string()
|
||||
} else if (typeof value === 'number') {
|
||||
schemaObj[key] = z.number()
|
||||
} else if (typeof value === 'boolean') {
|
||||
schemaObj[key] = z.boolean()
|
||||
} else {
|
||||
schemaObj[key] = z.any()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return z.object(schemaObj)
|
||||
}
|
||||
|
||||
// Fallback to any for unknown types
|
||||
return z.any()
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LLM_Agentflow }
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class Loop_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Loop'
|
||||
this.name = 'loopAgentflow'
|
||||
this.version = 1.2
|
||||
this.version = 1.1
|
||||
this.type = 'Loop'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Loop back to a previous node'
|
||||
|
|
@ -64,7 +64,8 @@ class Loop_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class Retriever_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Retriever'
|
||||
this.name = 'retrieverAgentflow'
|
||||
this.version = 1.1
|
||||
this.version = 1.0
|
||||
this.type = 'Retriever'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Retrieve information from vector database'
|
||||
|
|
@ -87,7 +87,8 @@ class Retriever_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ class Tool_Agentflow implements INode {
|
|||
constructor() {
|
||||
this.label = 'Tool'
|
||||
this.name = 'toolAgentflow'
|
||||
this.version = 1.2
|
||||
this.version = 1.1
|
||||
this.type = 'Tool'
|
||||
this.category = 'Agent Flows'
|
||||
this.description = 'Tools allow LLM to interact with external systems'
|
||||
|
|
@ -80,7 +80,8 @@ class Tool_Agentflow implements INode {
|
|||
label: 'Key',
|
||||
name: 'key',
|
||||
type: 'asyncOptions',
|
||||
loadMethod: 'listRuntimeStateKeys'
|
||||
loadMethod: 'listRuntimeStateKeys',
|
||||
freeSolo: true
|
||||
},
|
||||
{
|
||||
label: 'Value',
|
||||
|
|
|
|||
|
|
@ -1,11 +1,10 @@
|
|||
import { BaseMessage, MessageContentImageUrl, AIMessageChunk } from '@langchain/core/messages'
|
||||
import { BaseMessage, MessageContentImageUrl } from '@langchain/core/messages'
|
||||
import { getImageUploads } from '../../src/multiModalUtils'
|
||||
import { addSingleFileToStorage, getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload, INodeData } from '../../src/Interface'
|
||||
import { getFileFromStorage } from '../../src/storageUtils'
|
||||
import { ICommonObject, IFileUpload } from '../../src/Interface'
|
||||
import { BaseMessageLike } from '@langchain/core/messages'
|
||||
import { IFlowState } from './Interface.Agentflow'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters, mapMimeTypeToInputField } from '../../src/utils'
|
||||
import fetch from 'node-fetch'
|
||||
import { handleEscapeCharacters, mapMimeTypeToInputField } from '../../src/utils'
|
||||
|
||||
export const addImagesToMessages = async (
|
||||
options: ICommonObject,
|
||||
|
|
@ -19,8 +18,7 @@ export const addImagesToMessages = async (
|
|||
for (const upload of imageUploads) {
|
||||
let bf = upload.data
|
||||
if (upload.type == 'stored-file') {
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
const contents = await getFileFromStorage(upload.name, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
|
||||
|
||||
|
|
@ -91,9 +89,8 @@ export const processMessagesWithImages = async (
|
|||
if (item.type === 'stored-file' && item.name && item.mime.startsWith('image/')) {
|
||||
hasImageReferences = true
|
||||
try {
|
||||
const fileName = item.name.replace(/^FILE-STORAGE::/, '')
|
||||
// Get file contents from storage
|
||||
const contents = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
const contents = await getFileFromStorage(item.name, options.orgId, options.chatflowid, options.chatId)
|
||||
|
||||
// Create base64 data URL
|
||||
const base64Data = 'data:' + item.mime + ';base64,' + contents.toString('base64')
|
||||
|
|
@ -325,8 +322,7 @@ export const getPastChatHistoryImageMessages = async (
|
|||
const imageContents: MessageContentImageUrl[] = []
|
||||
for (const upload of uploads) {
|
||||
if (upload.type === 'stored-file' && upload.mime.startsWith('image/')) {
|
||||
const fileName = upload.name.replace(/^FILE-STORAGE::/, '')
|
||||
const fileData = await getFileFromStorage(fileName, options.orgId, options.chatflowid, options.chatId)
|
||||
const fileData = await getFileFromStorage(upload.name, options.orgId, options.chatflowid, options.chatId)
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
const bf = 'data:' + upload.mime + ';base64,' + fileData.toString('base64')
|
||||
|
||||
|
|
@ -460,437 +456,6 @@ export const getPastChatHistoryImageMessages = async (
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets MIME type from filename extension
|
||||
*/
|
||||
export const getMimeTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const mimeTypes: { [key: string]: string } = {
|
||||
png: 'image/png',
|
||||
jpg: 'image/jpeg',
|
||||
jpeg: 'image/jpeg',
|
||||
gif: 'image/gif',
|
||||
pdf: 'application/pdf',
|
||||
txt: 'text/plain',
|
||||
csv: 'text/csv',
|
||||
json: 'application/json',
|
||||
html: 'text/html',
|
||||
xml: 'application/xml'
|
||||
}
|
||||
return mimeTypes[extension || ''] || 'application/octet-stream'
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets artifact type from filename extension for UI rendering
|
||||
*/
|
||||
export const getArtifactTypeFromFilename = (filename: string): string => {
|
||||
const extension = filename.toLowerCase().split('.').pop()
|
||||
const artifactTypes: { [key: string]: string } = {
|
||||
png: 'png',
|
||||
jpg: 'jpeg',
|
||||
jpeg: 'jpeg',
|
||||
html: 'html',
|
||||
htm: 'html',
|
||||
md: 'markdown',
|
||||
markdown: 'markdown',
|
||||
json: 'json',
|
||||
js: 'javascript',
|
||||
javascript: 'javascript',
|
||||
tex: 'latex',
|
||||
latex: 'latex',
|
||||
txt: 'text',
|
||||
csv: 'text',
|
||||
pdf: 'text'
|
||||
}
|
||||
return artifactTypes[extension || ''] || 'text'
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves base64 image data to storage and returns file information
|
||||
*/
|
||||
export const saveBase64Image = async (
|
||||
outputItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!outputItem.result) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = outputItem.result
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension and MIME type
|
||||
const outputFormat = outputItem.output_format || 'png'
|
||||
const fileName = `generated_image_${outputItem.id || Date.now()}.${outputFormat}`
|
||||
const mimeType = outputFormat === 'png' ? 'image/png' : 'image/jpeg'
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving base64 image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Saves Gemini inline image data to storage and returns file information
|
||||
*/
|
||||
export const saveGeminiInlineImage = async (
|
||||
inlineItem: any,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; fileName: string; totalSize: number } | null> => {
|
||||
try {
|
||||
if (!inlineItem.data || !inlineItem.mimeType) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract base64 data and create buffer
|
||||
const base64Data = inlineItem.data
|
||||
const imageBuffer = Buffer.from(base64Data, 'base64')
|
||||
|
||||
// Determine file extension from MIME type
|
||||
const mimeType = inlineItem.mimeType
|
||||
let extension = 'png'
|
||||
if (mimeType.includes('jpeg') || mimeType.includes('jpg')) {
|
||||
extension = 'jpg'
|
||||
} else if (mimeType.includes('png')) {
|
||||
extension = 'png'
|
||||
} else if (mimeType.includes('gif')) {
|
||||
extension = 'gif'
|
||||
} else if (mimeType.includes('webp')) {
|
||||
extension = 'webp'
|
||||
}
|
||||
|
||||
const fileName = `gemini_generated_image_${Date.now()}.${extension}`
|
||||
|
||||
// Save the image using the existing storage utility
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
imageBuffer,
|
||||
fileName,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, fileName, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error saving Gemini inline image:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Downloads file content from container file citation
|
||||
*/
|
||||
export const downloadContainerFile = async (
|
||||
containerId: string,
|
||||
fileId: string,
|
||||
filename: string,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{ filePath: string; totalSize: number } | null> => {
|
||||
try {
|
||||
const credentialData = await getCredentialData(modelNodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, modelNodeData)
|
||||
|
||||
if (!openAIApiKey) {
|
||||
console.warn('No OpenAI API key available for downloading container file')
|
||||
return null
|
||||
}
|
||||
|
||||
// Download the file using OpenAI Container API
|
||||
const response = await fetch(`https://api.openai.com/v1/containers/${containerId}/files/${fileId}/content`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Accept: '*/*',
|
||||
Authorization: `Bearer ${openAIApiKey}`
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
console.warn(
|
||||
`Failed to download container file ${fileId} from container ${containerId}: ${response.status} ${response.statusText}`
|
||||
)
|
||||
return null
|
||||
}
|
||||
|
||||
// Extract the binary data from the Response object
|
||||
const data = await response.arrayBuffer()
|
||||
const dataBuffer = Buffer.from(data)
|
||||
const mimeType = getMimeTypeFromFilename(filename)
|
||||
|
||||
// Store the file using the same storage utility as OpenAIAssistant
|
||||
const { path, totalSize } = await addSingleFileToStorage(
|
||||
mimeType,
|
||||
dataBuffer,
|
||||
filename,
|
||||
options.orgId,
|
||||
options.chatflowid,
|
||||
options.chatId
|
||||
)
|
||||
|
||||
return { filePath: path, totalSize }
|
||||
} catch (error) {
|
||||
console.error('Error downloading container file:', error)
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Replace inlineData base64 with file references in the response content
|
||||
*/
|
||||
export const replaceInlineDataWithFileReferences = (
|
||||
response: AIMessageChunk,
|
||||
savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
): void => {
|
||||
// Check if content is an array
|
||||
if (!Array.isArray(response.content)) {
|
||||
return
|
||||
}
|
||||
|
||||
// Replace base64 data with file references in response content
|
||||
let savedImageIndex = 0
|
||||
for (let i = 0; i < response.content.length; i++) {
|
||||
const contentItem = response.content[i]
|
||||
if (
|
||||
typeof contentItem === 'object' &&
|
||||
contentItem.type === 'inlineData' &&
|
||||
contentItem.inlineData &&
|
||||
savedImageIndex < savedInlineImages.length
|
||||
) {
|
||||
const savedImage = savedInlineImages[savedImageIndex]
|
||||
// Replace with file reference
|
||||
response.content[i] = {
|
||||
type: 'stored-file',
|
||||
name: savedImage.fileName,
|
||||
mime: savedImage.mimeType,
|
||||
path: savedImage.filePath
|
||||
}
|
||||
savedImageIndex++
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the inlineData from response_metadata to avoid duplication
|
||||
if (response.response_metadata?.inlineData) {
|
||||
delete response.response_metadata.inlineData
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts artifacts from response metadata (both annotations and built-in tools)
|
||||
*/
|
||||
export const extractArtifactsFromResponse = async (
|
||||
responseMetadata: any,
|
||||
modelNodeData: INodeData,
|
||||
options: ICommonObject
|
||||
): Promise<{
|
||||
artifacts: any[]
|
||||
fileAnnotations: any[]
|
||||
savedInlineImages?: Array<{ filePath: string; fileName: string; mimeType: string }>
|
||||
}> => {
|
||||
const artifacts: any[] = []
|
||||
const fileAnnotations: any[] = []
|
||||
const savedInlineImages: Array<{ filePath: string; fileName: string; mimeType: string }> = []
|
||||
|
||||
// Handle Gemini inline data (image generation)
|
||||
if (responseMetadata?.inlineData && Array.isArray(responseMetadata.inlineData)) {
|
||||
for (const inlineItem of responseMetadata.inlineData) {
|
||||
if (inlineItem.type === 'gemini_inline_data' && inlineItem.data && inlineItem.mimeType) {
|
||||
try {
|
||||
const savedImageResult = await saveGeminiInlineImage(inlineItem, options)
|
||||
if (savedImageResult) {
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
|
||||
// Track saved image for replacing base64 data in content
|
||||
savedInlineImages.push({
|
||||
filePath: savedImageResult.filePath,
|
||||
fileName: savedImageResult.fileName,
|
||||
mimeType: inlineItem.mimeType
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing Gemini inline image artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!responseMetadata?.output || !Array.isArray(responseMetadata.output)) {
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
for (const outputItem of responseMetadata.output) {
|
||||
// Handle container file citations from annotations
|
||||
if (outputItem.type === 'message' && outputItem.content && Array.isArray(outputItem.content)) {
|
||||
for (const contentItem of outputItem.content) {
|
||||
if (contentItem.annotations && Array.isArray(contentItem.annotations)) {
|
||||
for (const annotation of contentItem.annotations) {
|
||||
if (annotation.type === 'container_file_citation' && annotation.file_id && annotation.filename) {
|
||||
try {
|
||||
// Download and store the file content
|
||||
const downloadResult = await downloadContainerFile(
|
||||
annotation.container_id,
|
||||
annotation.file_id,
|
||||
annotation.filename,
|
||||
modelNodeData,
|
||||
options
|
||||
)
|
||||
|
||||
if (downloadResult) {
|
||||
const fileType = getArtifactTypeFromFilename(annotation.filename)
|
||||
|
||||
if (fileType === 'png' || fileType === 'jpeg' || fileType === 'jpg') {
|
||||
const artifact = {
|
||||
type: fileType,
|
||||
data: downloadResult.filePath
|
||||
}
|
||||
|
||||
artifacts.push(artifact)
|
||||
} else {
|
||||
fileAnnotations.push({
|
||||
filePath: downloadResult.filePath,
|
||||
fileName: annotation.filename
|
||||
})
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing annotation:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle built-in tool artifacts (like image generation)
|
||||
if (outputItem.type === 'image_generation_call' && outputItem.result) {
|
||||
try {
|
||||
const savedImageResult = await saveBase64Image(outputItem, options)
|
||||
if (savedImageResult) {
|
||||
// Replace the base64 result with the file path in the response metadata
|
||||
outputItem.result = savedImageResult.filePath
|
||||
|
||||
// Create artifact in the same format as other image artifacts
|
||||
const fileType = getArtifactTypeFromFilename(savedImageResult.fileName)
|
||||
artifacts.push({
|
||||
type: fileType,
|
||||
data: savedImageResult.filePath
|
||||
})
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Error processing image generation artifact:', error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return { artifacts, fileAnnotations, savedInlineImages: savedInlineImages.length > 0 ? savedInlineImages : undefined }
|
||||
}
|
||||
|
||||
/**
|
||||
* Add image artifacts from previous assistant messages as user messages
|
||||
* This allows the LLM to see and reference the generated images in the conversation
|
||||
* Messages are marked with a special flag for later removal
|
||||
*/
|
||||
export const addImageArtifactsToMessages = async (messages: BaseMessageLike[], options: ICommonObject): Promise<void> => {
|
||||
const imageExtensions = ['png', 'jpg', 'jpeg', 'gif', 'webp']
|
||||
const messagesToInsert: Array<{ index: number; message: any }> = []
|
||||
|
||||
// Iterate through messages to find assistant messages with image artifacts
|
||||
for (let i = 0; i < messages.length; i++) {
|
||||
const message = messages[i] as any
|
||||
|
||||
// Check if this is an assistant message with artifacts
|
||||
if (
|
||||
(message.role === 'assistant' || message.role === 'ai') &&
|
||||
message.additional_kwargs?.artifacts &&
|
||||
Array.isArray(message.additional_kwargs.artifacts)
|
||||
) {
|
||||
const artifacts = message.additional_kwargs.artifacts
|
||||
const imageArtifacts: Array<{ type: string; name: string; mime: string }> = []
|
||||
|
||||
// Extract image artifacts
|
||||
for (const artifact of artifacts) {
|
||||
if (artifact.type && artifact.data) {
|
||||
// Check if this is an image artifact by file type
|
||||
if (imageExtensions.includes(artifact.type.toLowerCase())) {
|
||||
// Extract filename from the file path
|
||||
const fileName = artifact.data.split('/').pop() || artifact.data
|
||||
const mimeType = `image/${artifact.type.toLowerCase()}`
|
||||
|
||||
imageArtifacts.push({
|
||||
type: 'stored-file',
|
||||
name: fileName,
|
||||
mime: mimeType
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we found image artifacts, prepare to insert a user message after this assistant message
|
||||
if (imageArtifacts.length > 0) {
|
||||
// Check if the next message already contains these image artifacts to avoid duplicates
|
||||
const nextMessage = messages[i + 1] as any
|
||||
const shouldInsert =
|
||||
!nextMessage ||
|
||||
nextMessage.role !== 'user' ||
|
||||
!Array.isArray(nextMessage.content) ||
|
||||
!nextMessage.content.some(
|
||||
(item: any) =>
|
||||
(item.type === 'stored-file' || item.type === 'image_url') &&
|
||||
imageArtifacts.some((artifact) => {
|
||||
// Compare with and without FILE-STORAGE:: prefix
|
||||
const artifactName = artifact.name.replace('FILE-STORAGE::', '')
|
||||
const itemName = item.name?.replace('FILE-STORAGE::', '') || ''
|
||||
return artifactName === itemName
|
||||
})
|
||||
)
|
||||
|
||||
if (shouldInsert) {
|
||||
messagesToInsert.push({
|
||||
index: i + 1,
|
||||
message: {
|
||||
role: 'user',
|
||||
content: imageArtifacts,
|
||||
_isTemporaryImageMessage: true // Mark for later removal
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert messages in reverse order to maintain correct indices
|
||||
for (let i = messagesToInsert.length - 1; i >= 0; i--) {
|
||||
const { index, message } = messagesToInsert[i]
|
||||
messages.splice(index, 0, message)
|
||||
}
|
||||
|
||||
// Convert stored-file references to base64 image_url format
|
||||
if (messagesToInsert.length > 0) {
|
||||
const { updatedMessages } = await processMessagesWithImages(messages, options)
|
||||
// Replace the messages array content with the updated messages
|
||||
messages.length = 0
|
||||
messages.push(...updatedMessages)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Updates the flow state with new values
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { RunnableSequence } from '@langchain/core/runnables'
|
|||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { getBaseClasses, transformBracesWithColon, convertChatHistoryToText, convertBaseMessagetoIMessage } from '../../../src/utils'
|
||||
import { getBaseClasses, transformBracesWithColon } from '../../../src/utils'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import {
|
||||
FlowiseMemory,
|
||||
|
|
@ -23,10 +23,8 @@ import { Moderation, checkInputs, streamResponse } from '../../moderation/Modera
|
|||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import type { Document } from '@langchain/core/documents'
|
||||
import { BaseRetriever } from '@langchain/core/retrievers'
|
||||
import { RESPONSE_TEMPLATE, REPHRASE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { RESPONSE_TEMPLATE } from '../../chains/ConversationalRetrievalQAChain/prompts'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { Tool } from '@langchain/core/tools'
|
||||
|
||||
class ConversationalRetrievalToolAgent_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -44,7 +42,7 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Conversational Retrieval Tool Agent'
|
||||
this.name = 'conversationalRetrievalToolAgent'
|
||||
this.author = 'niztal(falkor) and nikitas-novatix'
|
||||
this.author = 'niztal(falkor)'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -81,26 +79,6 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
optional: true,
|
||||
default: RESPONSE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Prompt',
|
||||
name: 'rephrasePrompt',
|
||||
type: 'string',
|
||||
description: 'Using previous chat history, rephrase question into a standalone question',
|
||||
warning: 'Prompt must include input variables: {chat_history} and {question}',
|
||||
rows: 4,
|
||||
additionalParams: true,
|
||||
optional: true,
|
||||
default: REPHRASE_TEMPLATE
|
||||
},
|
||||
{
|
||||
label: 'Rephrase Model',
|
||||
name: 'rephraseModel',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Optional: Use a different (faster/cheaper) model for rephrasing. If not specified, uses the main Tool Calling Chat Model.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
|
|
@ -125,9 +103,8 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
// The agent will be prepared in run() with the correct user message - it needs the actual runtime input for rephrasing
|
||||
async init(_nodeData: INodeData, _input: string, _options: ICommonObject): Promise<any> {
|
||||
return null
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
|
|
@ -171,23 +148,6 @@ class ConversationalRetrievalToolAgent_Agents implements INode {
|
|||
sseStreamer.streamUsedToolsEvent(chatId, res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
|
||||
// If the tool is set to returnDirect, stream the output to the client
|
||||
if (res.usedTools && res.usedTools.length) {
|
||||
let inputTools = nodeData.inputs?.tools
|
||||
inputTools = flatten(inputTools)
|
||||
for (const tool of res.usedTools) {
|
||||
const inputTool = inputTools.find((inputTool: Tool) => inputTool.name === tool.tool)
|
||||
if (inputTool && (inputTool as any).returnDirect && shouldStreamResponse) {
|
||||
sseStreamer.streamTokenEvent(chatId, tool.toolOutput)
|
||||
// Prevent CustomChainHandler from streaming the same output again
|
||||
if (res.output === tool.toolOutput) {
|
||||
res.output = ''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// The CustomChainHandler will send the stream end event
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
|
|
@ -250,11 +210,9 @@ const prepareAgent = async (
|
|||
flowObj: { sessionId?: string; chatId?: string; input?: string }
|
||||
) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const rephraseModel = (nodeData.inputs?.rephraseModel as BaseChatModel) || model // Use main model if not specified
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
let systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let rephrasePrompt = nodeData.inputs?.rephrasePrompt as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
|
|
@ -262,9 +220,6 @@ const prepareAgent = async (
|
|||
const vectorStoreRetriever = nodeData.inputs?.vectorStoreRetriever as BaseRetriever
|
||||
|
||||
systemMessage = transformBracesWithColon(systemMessage)
|
||||
if (rephrasePrompt) {
|
||||
rephrasePrompt = transformBracesWithColon(rephrasePrompt)
|
||||
}
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
|
|
@ -308,37 +263,6 @@ const prepareAgent = async (
|
|||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
// Function to get standalone question (either rephrased or original)
|
||||
const getStandaloneQuestion = async (input: string): Promise<string> => {
|
||||
// If no rephrase prompt, return the original input
|
||||
if (!rephrasePrompt) {
|
||||
return input
|
||||
}
|
||||
|
||||
// Get chat history (use empty string if none)
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true)) as BaseMessage[]
|
||||
const iMessages = convertBaseMessagetoIMessage(messages)
|
||||
const chatHistoryString = convertChatHistoryToText(iMessages)
|
||||
|
||||
// Always rephrase to normalize/expand user queries for better retrieval
|
||||
try {
|
||||
const CONDENSE_QUESTION_PROMPT = PromptTemplate.fromTemplate(rephrasePrompt)
|
||||
const condenseQuestionChain = RunnableSequence.from([CONDENSE_QUESTION_PROMPT, rephraseModel, new StringOutputParser()])
|
||||
const res = await condenseQuestionChain.invoke({
|
||||
question: input,
|
||||
chat_history: chatHistoryString
|
||||
})
|
||||
return res
|
||||
} catch (error) {
|
||||
console.error('Error rephrasing question:', error)
|
||||
// On error, fall back to original input
|
||||
return input
|
||||
}
|
||||
}
|
||||
|
||||
// Get standalone question before creating runnable
|
||||
const standaloneQuestion = await getStandaloneQuestion(flowObj?.input || '')
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
|
|
@ -348,9 +272,7 @@ const prepareAgent = async (
|
|||
return messages ?? []
|
||||
},
|
||||
context: async (i: { input: string; chatHistory?: string }) => {
|
||||
// Use the standalone question (rephrased or original) for retrieval
|
||||
const retrievalQuery = standaloneQuestion || i.input
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(retrievalQuery)
|
||||
const relevantDocs = await vectorStoreRetriever.invoke(i.input)
|
||||
const formattedDocs = formatDocs(relevantDocs)
|
||||
return formattedDocs
|
||||
}
|
||||
|
|
@ -373,6 +295,4 @@ const prepareAgent = async (
|
|||
return executor
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
nodeClass: ConversationalRetrievalToolAgent_Agents
|
||||
}
|
||||
module.exports = { nodeClass: ConversationalRetrievalToolAgent_Agents }
|
||||
|
|
|
|||
|
|
@ -578,7 +578,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(
|
||||
`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`
|
||||
|
|
@ -703,7 +703,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
clearInterval(timeout)
|
||||
reject(
|
||||
|
|
@ -1096,7 +1096,7 @@ async function handleToolSubmission(params: ToolSubmissionParams): Promise<ToolS
|
|||
toolOutput
|
||||
})
|
||||
} catch (e) {
|
||||
await analyticHandlers.onToolError(toolIds, e)
|
||||
await analyticHandlers.onToolEnd(toolIds, e)
|
||||
console.error('Error executing tool', e)
|
||||
throw new Error(`Error executing tool. Tool: ${tool.name}. Thread ID: ${threadId}. Run ID: ${runThreadId}`)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -607,12 +607,7 @@ export class LangchainChatGoogleGenerativeAI
|
|||
private client: GenerativeModel
|
||||
|
||||
get _isMultimodalModel() {
|
||||
return (
|
||||
this.model.includes('vision') ||
|
||||
this.model.startsWith('gemini-1.5') ||
|
||||
this.model.startsWith('gemini-2') ||
|
||||
this.model.startsWith('gemini-3')
|
||||
)
|
||||
return this.model.includes('vision') || this.model.startsWith('gemini-1.5') || this.model.startsWith('gemini-2')
|
||||
}
|
||||
|
||||
constructor(fields: GoogleGenerativeAIChatInput) {
|
||||
|
|
|
|||
|
|
@ -452,7 +452,6 @@ export function mapGenerateContentResultToChatResult(
|
|||
const [candidate] = response.candidates
|
||||
const { content: candidateContent, ...generationInfo } = candidate
|
||||
let content: MessageContent | undefined
|
||||
const inlineDataItems: any[] = []
|
||||
|
||||
if (Array.isArray(candidateContent?.parts) && candidateContent.parts.length === 1 && candidateContent.parts[0].text) {
|
||||
content = candidateContent.parts[0].text
|
||||
|
|
@ -473,18 +472,6 @@ export function mapGenerateContentResultToChatResult(
|
|||
type: 'codeExecutionResult',
|
||||
codeExecutionResult: p.codeExecutionResult
|
||||
}
|
||||
} else if ('inlineData' in p && p.inlineData) {
|
||||
// Extract inline image data for processing by Agent
|
||||
inlineDataItems.push({
|
||||
type: 'gemini_inline_data',
|
||||
mimeType: p.inlineData.mimeType,
|
||||
data: p.inlineData.data
|
||||
})
|
||||
// Return the inline data as part of the content structure
|
||||
return {
|
||||
type: 'inlineData',
|
||||
inlineData: p.inlineData
|
||||
}
|
||||
}
|
||||
return p
|
||||
})
|
||||
|
|
@ -501,12 +488,6 @@ export function mapGenerateContentResultToChatResult(
|
|||
text = block?.text ?? text
|
||||
}
|
||||
|
||||
// Build response_metadata with inline data if present
|
||||
const response_metadata: any = {}
|
||||
if (inlineDataItems.length > 0) {
|
||||
response_metadata.inlineData = inlineDataItems
|
||||
}
|
||||
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
|
|
@ -521,8 +502,7 @@ export function mapGenerateContentResultToChatResult(
|
|||
additional_kwargs: {
|
||||
...generationInfo
|
||||
},
|
||||
usage_metadata: extra?.usageMetadata,
|
||||
response_metadata: Object.keys(response_metadata).length > 0 ? response_metadata : undefined
|
||||
usage_metadata: extra?.usageMetadata
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
|
|
@ -553,8 +533,6 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
const [candidate] = response.candidates
|
||||
const { content: candidateContent, ...generationInfo } = candidate
|
||||
let content: MessageContent | undefined
|
||||
const inlineDataItems: any[] = []
|
||||
|
||||
// Checks if some parts do not have text. If false, it means that the content is a string.
|
||||
if (Array.isArray(candidateContent?.parts) && candidateContent.parts.every((p) => 'text' in p)) {
|
||||
content = candidateContent.parts.map((p) => p.text).join('')
|
||||
|
|
@ -575,18 +553,6 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
type: 'codeExecutionResult',
|
||||
codeExecutionResult: p.codeExecutionResult
|
||||
}
|
||||
} else if ('inlineData' in p && p.inlineData) {
|
||||
// Extract inline image data for processing by Agent
|
||||
inlineDataItems.push({
|
||||
type: 'gemini_inline_data',
|
||||
mimeType: p.inlineData.mimeType,
|
||||
data: p.inlineData.data
|
||||
})
|
||||
// Return the inline data as part of the content structure
|
||||
return {
|
||||
type: 'inlineData',
|
||||
inlineData: p.inlineData
|
||||
}
|
||||
}
|
||||
return p
|
||||
})
|
||||
|
|
@ -616,12 +582,6 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
)
|
||||
}
|
||||
|
||||
// Build response_metadata with inline data if present
|
||||
const response_metadata: any = {}
|
||||
if (inlineDataItems.length > 0) {
|
||||
response_metadata.inlineData = inlineDataItems
|
||||
}
|
||||
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
|
|
@ -631,8 +591,7 @@ export function convertResponseContentToChatGenerationChunk(
|
|||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {},
|
||||
usage_metadata: extra.usageMetadata,
|
||||
response_metadata: Object.keys(response_metadata).length > 0 ? response_metadata : undefined
|
||||
usage_metadata: extra.usageMetadata
|
||||
}),
|
||||
generationInfo
|
||||
})
|
||||
|
|
|
|||
|
|
@ -41,17 +41,15 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
label: 'Model',
|
||||
name: 'model',
|
||||
type: 'string',
|
||||
description:
|
||||
'Model name (e.g., deepseek-ai/DeepSeek-V3.2-Exp:novita). If model includes provider (:) or using router endpoint, leave Endpoint blank.',
|
||||
placeholder: 'deepseek-ai/DeepSeek-V3.2-Exp:novita'
|
||||
description: 'If using own inference endpoint, leave this blank',
|
||||
placeholder: 'gpt2'
|
||||
},
|
||||
{
|
||||
label: 'Endpoint',
|
||||
name: 'endpoint',
|
||||
type: 'string',
|
||||
placeholder: 'https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2',
|
||||
description:
|
||||
'Custom inference endpoint (optional). Not needed for models with providers (:) or router endpoints. Leave blank to use Inference Providers.',
|
||||
description: 'Using your own inference endpoint',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
|
|
@ -105,7 +103,7 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
type: 'string',
|
||||
rows: 4,
|
||||
placeholder: 'AI assistant:',
|
||||
description: 'Sets the stop sequences to use. Use comma to separate different sequences.',
|
||||
description: 'Sets the stop sequences to use. Use comma to seperate different sequences.',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
|
|
@ -126,15 +124,6 @@ class ChatHuggingFace_ChatModels implements INode {
|
|||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const huggingFaceApiKey = getCredentialParam('huggingFaceApiKey', credentialData, nodeData)
|
||||
|
||||
if (!huggingFaceApiKey) {
|
||||
console.error('[ChatHuggingFace] API key validation failed: No API key found')
|
||||
throw new Error('HuggingFace API key is required. Please configure it in the credential settings.')
|
||||
}
|
||||
|
||||
if (!huggingFaceApiKey.startsWith('hf_')) {
|
||||
console.warn('[ChatHuggingFace] API key format warning: Key does not start with "hf_"')
|
||||
}
|
||||
|
||||
const obj: Partial<HFInput> = {
|
||||
model,
|
||||
apiKey: huggingFaceApiKey
|
||||
|
|
|
|||
|
|
@ -56,9 +56,9 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpointUrl = fields?.endpointUrl
|
||||
this.includeCredentials = fields?.includeCredentials
|
||||
if (!this.apiKey || this.apiKey.trim() === '') {
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'Please set an API key for HuggingFace Hub. Either configure it in the credential settings in the UI, or set the environment variable HUGGINGFACEHUB_API_KEY.'
|
||||
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
@ -68,21 +68,19 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
}
|
||||
|
||||
invocationParams(options?: this['ParsedCallOptions']) {
|
||||
// Return parameters compatible with chatCompletion API (OpenAI-compatible format)
|
||||
const params: any = {
|
||||
temperature: this.temperature,
|
||||
max_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP
|
||||
return {
|
||||
model: this.model,
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
return_full_text: false,
|
||||
temperature: this.temperature,
|
||||
max_new_tokens: this.maxTokens,
|
||||
stop: options?.stop ?? this.stopSequences,
|
||||
top_p: this.topP,
|
||||
top_k: this.topK,
|
||||
repetition_penalty: this.frequencyPenalty
|
||||
}
|
||||
}
|
||||
// Include optional parameters if they are defined
|
||||
if (this.topK !== undefined) {
|
||||
params.top_k = this.topK
|
||||
}
|
||||
if (this.frequencyPenalty !== undefined) {
|
||||
params.frequency_penalty = this.frequencyPenalty
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
|
|
@ -90,109 +88,51 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<GenerationChunk> {
|
||||
try {
|
||||
const client = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
client.chatCompletionStream({
|
||||
model: this.model,
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
...this.invocationParams(options)
|
||||
const hfi = await this._prepareHFInference()
|
||||
const stream = await this.caller.call(async () =>
|
||||
hfi.textGenerationStream({
|
||||
...this.invocationParams(options),
|
||||
inputs: prompt
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.token.text
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token ?? '')
|
||||
|
||||
// stream is done
|
||||
if (chunk.generated_text)
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
})
|
||||
)
|
||||
for await (const chunk of stream) {
|
||||
const token = chunk.choices[0]?.delta?.content || ''
|
||||
if (token) {
|
||||
yield new GenerationChunk({ text: token, generationInfo: chunk })
|
||||
await runManager?.handleLLMNewToken(token)
|
||||
}
|
||||
// stream is done when finish_reason is set
|
||||
if (chunk.choices[0]?.finish_reason) {
|
||||
yield new GenerationChunk({
|
||||
text: '',
|
||||
generationInfo: { finished: true }
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('[ChatHuggingFace] Error in _streamResponseChunks:', error)
|
||||
// Provide more helpful error messages
|
||||
if (error?.message?.includes('endpointUrl') || error?.message?.includes('third-party provider')) {
|
||||
throw new Error(
|
||||
`Cannot use custom endpoint with model "${this.model}" that includes a provider. Please leave the Endpoint field blank in the UI. Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
try {
|
||||
const client = await this._prepareHFInference()
|
||||
// Use chatCompletion for chat models (v4 supports conversational models via Inference Providers)
|
||||
const args = {
|
||||
model: this.model,
|
||||
messages: [{ role: 'user', content: prompt }],
|
||||
...this.invocationParams(options)
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, client.chatCompletion.bind(client), args)
|
||||
const content = res.choices[0]?.message?.content || ''
|
||||
if (!content) {
|
||||
console.error('[ChatHuggingFace] No content in response:', JSON.stringify(res))
|
||||
throw new Error(`No content received from HuggingFace API. Response: ${JSON.stringify(res)}`)
|
||||
}
|
||||
return content
|
||||
} catch (error: any) {
|
||||
console.error('[ChatHuggingFace] Error in _call:', error.message)
|
||||
// Provide more helpful error messages
|
||||
if (error?.message?.includes('endpointUrl') || error?.message?.includes('third-party provider')) {
|
||||
throw new Error(
|
||||
`Cannot use custom endpoint with model "${this.model}" that includes a provider. Please leave the Endpoint field blank in the UI. Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
if (error?.message?.includes('Invalid username or password') || error?.message?.includes('authentication')) {
|
||||
throw new Error(
|
||||
`HuggingFace API authentication failed. Please verify your API key is correct and starts with "hf_". Original error: ${error.message}`
|
||||
)
|
||||
}
|
||||
throw error
|
||||
}
|
||||
const hfi = await this._prepareHFInference()
|
||||
const args = { ...this.invocationParams(options), inputs: prompt }
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfi.textGeneration.bind(hfi), args)
|
||||
return res.generated_text
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
private async _prepareHFInference() {
|
||||
if (!this.apiKey || this.apiKey.trim() === '') {
|
||||
console.error('[ChatHuggingFace] API key validation failed: Empty or undefined')
|
||||
throw new Error('HuggingFace API key is required. Please configure it in the credential settings.')
|
||||
}
|
||||
|
||||
const { InferenceClient } = await HuggingFaceInference.imports()
|
||||
// Use InferenceClient for chat models (works better with Inference Providers)
|
||||
const client = new InferenceClient(this.apiKey)
|
||||
|
||||
// Don't override endpoint if model uses a provider (contains ':') or if endpoint is router-based
|
||||
// When using Inference Providers, endpoint should be left blank - InferenceClient handles routing automatically
|
||||
if (
|
||||
this.endpointUrl &&
|
||||
!this.model.includes(':') &&
|
||||
!this.endpointUrl.includes('/v1/chat/completions') &&
|
||||
!this.endpointUrl.includes('router.huggingface.co')
|
||||
) {
|
||||
return client.endpoint(this.endpointUrl)
|
||||
}
|
||||
|
||||
// Return client without endpoint override - InferenceClient will use Inference Providers automatically
|
||||
return client
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hfi = new HfInference(this.apiKey, {
|
||||
includeCredentials: this.includeCredentials
|
||||
})
|
||||
return this.endpointUrl ? hfi.endpoint(this.endpointUrl) : hfi
|
||||
}
|
||||
|
||||
/** @ignore */
|
||||
static async imports(): Promise<{
|
||||
InferenceClient: typeof import('@huggingface/inference').InferenceClient
|
||||
HfInference: typeof import('@huggingface/inference').HfInference
|
||||
}> {
|
||||
try {
|
||||
const { InferenceClient } = await import('@huggingface/inference')
|
||||
return { InferenceClient }
|
||||
const { HfInference } = await import('@huggingface/inference')
|
||||
return { HfInference }
|
||||
} catch (e) {
|
||||
throw new Error('Please install huggingface as a dependency with, e.g. `pnpm install @huggingface/inference`')
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { ChatOpenRouter } from './FlowiseChatOpenRouter'
|
||||
|
||||
class ChatOpenRouter_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -24,7 +23,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
this.icon = 'openRouter.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around Open Router Inference API'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatOpenAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -115,40 +114,6 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
type: 'json',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Allow image input. Refer to the <a href="https://docs.flowiseai.com/using-flowise/uploads#image" target="_blank">docs</a> for more details.',
|
||||
default: false,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Image Resolution',
|
||||
description: 'This parameter controls the resolution in which the model views the image.',
|
||||
name: 'imageResolution',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'Low',
|
||||
name: 'low'
|
||||
},
|
||||
{
|
||||
label: 'High',
|
||||
name: 'high'
|
||||
},
|
||||
{
|
||||
label: 'Auto',
|
||||
name: 'auto'
|
||||
}
|
||||
],
|
||||
default: 'low',
|
||||
optional: false,
|
||||
show: {
|
||||
allowImageUploads: true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -165,8 +130,6 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
const basePath = (nodeData.inputs?.basepath as string) || 'https://openrouter.ai/api/v1'
|
||||
const baseOptions = nodeData.inputs?.baseOptions
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
const imageResolution = nodeData.inputs?.imageResolution as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openRouterApiKey = getCredentialParam('openRouterApiKey', credentialData, nodeData)
|
||||
|
|
@ -192,7 +155,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
try {
|
||||
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
|
||||
} catch (exception) {
|
||||
throw new Error("Invalid JSON in the ChatOpenRouter's BaseOptions: " + exception)
|
||||
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -203,15 +166,7 @@ class ChatOpenRouter_ChatModels implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false,
|
||||
imageResolution
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatOpenRouter(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
const model = new ChatOpenAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,29 +0,0 @@
|
|||
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
|
||||
import { IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
|
||||
export class ChatOpenRouter extends LangchainChatOpenAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: ChatOpenAIFields) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
this.model = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
// pass - OpenRouter models don't need model switching
|
||||
}
|
||||
}
|
||||
|
|
@ -47,7 +47,7 @@ class Json_DocumentLoaders implements INode {
|
|||
constructor() {
|
||||
this.label = 'Json File'
|
||||
this.name = 'jsonFile'
|
||||
this.version = 3.1
|
||||
this.version = 3.0
|
||||
this.type = 'Document'
|
||||
this.icon = 'json.svg'
|
||||
this.category = 'Document Loaders'
|
||||
|
|
@ -66,14 +66,6 @@ class Json_DocumentLoaders implements INode {
|
|||
type: 'TextSplitter',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Separate by JSON Object (JSON Array)',
|
||||
name: 'separateByObject',
|
||||
type: 'boolean',
|
||||
description: 'If enabled and the file is a JSON Array, each JSON object will be extracted as a chunk',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Pointers Extraction (separated by commas)',
|
||||
name: 'pointersName',
|
||||
|
|
@ -81,10 +73,7 @@ class Json_DocumentLoaders implements INode {
|
|||
description:
|
||||
'Ex: { "key": "value" }, Pointer Extraction = "key", "value" will be extracted as pageContent of the chunk. Use comma to separate multiple pointers',
|
||||
placeholder: 'key1, key2',
|
||||
optional: true,
|
||||
hide: {
|
||||
separateByObject: true
|
||||
}
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Metadata',
|
||||
|
|
@ -133,7 +122,6 @@ class Json_DocumentLoaders implements INode {
|
|||
const pointersName = nodeData.inputs?.pointersName as string
|
||||
const metadata = nodeData.inputs?.metadata
|
||||
const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string
|
||||
const separateByObject = nodeData.inputs?.separateByObject as boolean
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
let omitMetadataKeys: string[] = []
|
||||
|
|
@ -165,7 +153,7 @@ class Json_DocumentLoaders implements INode {
|
|||
if (!file) continue
|
||||
const fileData = await getFileFromStorage(file, orgId, chatflowid)
|
||||
const blob = new Blob([fileData])
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata, separateByObject)
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata)
|
||||
|
||||
if (textSplitter) {
|
||||
let splittedDocs = await loader.load()
|
||||
|
|
@ -188,7 +176,7 @@ class Json_DocumentLoaders implements INode {
|
|||
splitDataURI.pop()
|
||||
const bf = Buffer.from(splitDataURI.pop() || '', 'base64')
|
||||
const blob = new Blob([bf])
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata, separateByObject)
|
||||
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined, metadata)
|
||||
|
||||
if (textSplitter) {
|
||||
let splittedDocs = await loader.load()
|
||||
|
|
@ -318,20 +306,13 @@ class TextLoader extends BaseDocumentLoader {
|
|||
class JSONLoader extends TextLoader {
|
||||
public pointers: string[]
|
||||
private metadataMapping: Record<string, string>
|
||||
private separateByObject: boolean
|
||||
|
||||
constructor(
|
||||
filePathOrBlob: string | Blob,
|
||||
pointers: string | string[] = [],
|
||||
metadataMapping: Record<string, string> = {},
|
||||
separateByObject: boolean = false
|
||||
) {
|
||||
constructor(filePathOrBlob: string | Blob, pointers: string | string[] = [], metadataMapping: Record<string, string> = {}) {
|
||||
super(filePathOrBlob)
|
||||
this.pointers = Array.isArray(pointers) ? pointers : [pointers]
|
||||
if (metadataMapping) {
|
||||
this.metadataMapping = typeof metadataMapping === 'object' ? metadataMapping : JSON.parse(metadataMapping)
|
||||
}
|
||||
this.separateByObject = separateByObject
|
||||
}
|
||||
|
||||
protected async parse(raw: string): Promise<Document[]> {
|
||||
|
|
@ -342,24 +323,14 @@ class JSONLoader extends TextLoader {
|
|||
const jsonArray = Array.isArray(json) ? json : [json]
|
||||
|
||||
for (const item of jsonArray) {
|
||||
if (this.separateByObject) {
|
||||
if (typeof item === 'object' && item !== null && !Array.isArray(item)) {
|
||||
const metadata = this.extractMetadata(item)
|
||||
const pageContent = this.formatObjectAsKeyValue(item)
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
} else {
|
||||
const content = this.extractContent(item)
|
||||
const metadata = this.extractMetadata(item)
|
||||
for (const pageContent of content) {
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
const content = this.extractContent(item)
|
||||
const metadata = this.extractMetadata(item)
|
||||
|
||||
for (const pageContent of content) {
|
||||
documents.push({
|
||||
pageContent,
|
||||
metadata
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -399,30 +370,6 @@ class JSONLoader extends TextLoader {
|
|||
return metadata
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a JSON object as readable key-value pairs
|
||||
*/
|
||||
private formatObjectAsKeyValue(obj: any, prefix: string = ''): string {
|
||||
const lines: string[] = []
|
||||
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const fullKey = prefix ? `${prefix}.${key}` : key
|
||||
|
||||
if (value === null || value === undefined) {
|
||||
lines.push(`${fullKey}: ${value}`)
|
||||
} else if (Array.isArray(value)) {
|
||||
lines.push(`${fullKey}: ${JSON.stringify(value)}`)
|
||||
} else if (typeof value === 'object') {
|
||||
// Recursively format nested objects
|
||||
lines.push(this.formatObjectAsKeyValue(value, fullKey))
|
||||
} else {
|
||||
lines.push(`${fullKey}: ${value}`)
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n')
|
||||
}
|
||||
|
||||
/**
|
||||
* If JSON pointers are specified, return all strings below any of them
|
||||
* and exclude all other nodes expect if they match a JSON pointer.
|
||||
|
|
|
|||
|
|
@ -190,14 +190,11 @@ class Playwright_DocumentLoaders implements INode {
|
|||
async function playwrightLoader(url: string): Promise<Document[] | undefined> {
|
||||
try {
|
||||
let docs = []
|
||||
|
||||
const executablePath = process.env.PLAYWRIGHT_EXECUTABLE_PATH
|
||||
|
||||
const config: PlaywrightWebBaseLoaderOptions = {
|
||||
launchOptions: {
|
||||
args: ['--no-sandbox'],
|
||||
headless: true,
|
||||
executablePath: executablePath
|
||||
executablePath: process.env.PLAYWRIGHT_EXECUTABLE_FILE_PATH
|
||||
}
|
||||
}
|
||||
if (waitUntilGoToOption) {
|
||||
|
|
|
|||
|
|
@ -181,14 +181,11 @@ class Puppeteer_DocumentLoaders implements INode {
|
|||
async function puppeteerLoader(url: string): Promise<Document[] | undefined> {
|
||||
try {
|
||||
let docs: Document[] = []
|
||||
|
||||
const executablePath = process.env.PUPPETEER_EXECUTABLE_PATH
|
||||
|
||||
const config: PuppeteerWebBaseLoaderOptions = {
|
||||
launchOptions: {
|
||||
args: ['--no-sandbox'],
|
||||
headless: 'new',
|
||||
executablePath: executablePath
|
||||
executablePath: process.env.PUPPETEER_EXECUTABLE_FILE_PATH
|
||||
}
|
||||
}
|
||||
if (waitUntilGoToOption) {
|
||||
|
|
|
|||
|
|
@ -27,6 +27,8 @@ type Element = {
|
|||
}
|
||||
|
||||
export class UnstructuredLoader extends BaseDocumentLoader {
|
||||
public filePath: string
|
||||
|
||||
private apiUrl = process.env.UNSTRUCTURED_API_URL || 'https://api.unstructuredapp.io/general/v0/general'
|
||||
|
||||
private apiKey: string | undefined = process.env.UNSTRUCTURED_API_KEY
|
||||
|
|
@ -136,7 +138,7 @@ export class UnstructuredLoader extends BaseDocumentLoader {
|
|||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to partition file with error ${response.status} and message ${await response.text()}`)
|
||||
throw new Error(`Failed to partition file ${this.filePath} with error ${response.status} and message ${await response.text()}`)
|
||||
}
|
||||
|
||||
const elements = await response.json()
|
||||
|
|
|
|||
|
|
@ -4,11 +4,15 @@ import {
|
|||
UnstructuredLoaderOptions,
|
||||
UnstructuredLoaderStrategy,
|
||||
SkipInferTableTypes,
|
||||
HiResModelName
|
||||
HiResModelName,
|
||||
UnstructuredLoader as LCUnstructuredLoader
|
||||
} from '@langchain/community/document_loaders/fs/unstructured'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { getFileFromStorage, INodeOutputsValue } from '../../../src'
|
||||
import { UnstructuredLoader } from './Unstructured'
|
||||
import { isPathTraversal } from '../../../src/validator'
|
||||
import sanitize from 'sanitize-filename'
|
||||
import path from 'path'
|
||||
|
||||
class UnstructuredFile_DocumentLoaders implements INode {
|
||||
label: string
|
||||
|
|
@ -40,6 +44,17 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
/** Deprecated
|
||||
{
|
||||
label: 'File Path',
|
||||
name: 'filePath',
|
||||
type: 'string',
|
||||
placeholder: '',
|
||||
optional: true,
|
||||
warning:
|
||||
'Use the File Upload instead of File path. If file is uploaded, this path is ignored. Path will be deprecated in future releases.'
|
||||
},
|
||||
*/
|
||||
{
|
||||
label: 'Files Upload',
|
||||
name: 'fileObject',
|
||||
|
|
@ -440,6 +455,7 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const filePath = nodeData.inputs?.filePath as string
|
||||
const unstructuredAPIUrl = nodeData.inputs?.unstructuredAPIUrl as string
|
||||
const strategy = nodeData.inputs?.strategy as UnstructuredLoaderStrategy
|
||||
const encoding = nodeData.inputs?.encoding as string
|
||||
|
|
@ -544,8 +560,37 @@ class UnstructuredFile_DocumentLoaders implements INode {
|
|||
docs.push(...loaderDocs)
|
||||
}
|
||||
}
|
||||
} else if (filePath) {
|
||||
if (!filePath || typeof filePath !== 'string') {
|
||||
throw new Error('Invalid file path format')
|
||||
}
|
||||
|
||||
if (isPathTraversal(filePath)) {
|
||||
throw new Error('Invalid path characters detected in filePath - path traversal not allowed')
|
||||
}
|
||||
|
||||
const parsedPath = path.parse(filePath)
|
||||
const sanitizedFilename = sanitize(parsedPath.base)
|
||||
|
||||
if (!sanitizedFilename || sanitizedFilename.trim() === '') {
|
||||
throw new Error('Invalid filename after sanitization')
|
||||
}
|
||||
|
||||
const sanitizedFilePath = path.join(parsedPath.dir, sanitizedFilename)
|
||||
|
||||
if (!path.isAbsolute(sanitizedFilePath)) {
|
||||
throw new Error('File path must be absolute')
|
||||
}
|
||||
|
||||
if (sanitizedFilePath.includes('..')) {
|
||||
throw new Error('Invalid file path - directory traversal not allowed')
|
||||
}
|
||||
|
||||
const loader = new LCUnstructuredLoader(sanitizedFilePath, obj)
|
||||
const loaderDocs = await loader.load()
|
||||
docs.push(...loaderDocs)
|
||||
} else {
|
||||
throw new Error('File upload is required')
|
||||
throw new Error('File path or File upload is required')
|
||||
}
|
||||
|
||||
if (metadata) {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,3 @@
|
|||
/*
|
||||
* Uncomment this if you want to use the UnstructuredFolder to load a folder from the file system
|
||||
|
||||
import { omit } from 'lodash'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import {
|
||||
|
|
@ -519,4 +516,3 @@ class UnstructuredFolder_DocumentLoaders implements INode {
|
|||
}
|
||||
|
||||
module.exports = { nodeClass: UnstructuredFolder_DocumentLoaders }
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ class AWSBedrockEmbedding_Embeddings implements INode {
|
|||
{
|
||||
label: 'Max AWS API retries',
|
||||
name: 'maxRetries',
|
||||
description: 'This will limit the number of AWS API for Titan model embeddings call retries. Used to avoid throttling.',
|
||||
description: 'This will limit the nubmer of AWS API for Titan model embeddings call retries. Used to avoid throttling.',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
default: 5,
|
||||
|
|
|
|||
|
|
@ -23,22 +23,24 @@ export class HuggingFaceInferenceEmbeddings extends Embeddings implements Huggin
|
|||
this.model = fields?.model ?? 'sentence-transformers/distilbert-base-nli-mean-tokens'
|
||||
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
|
||||
this.endpoint = fields?.endpoint ?? ''
|
||||
const hf = new HfInference(this.apiKey)
|
||||
// v4 uses Inference Providers by default; only override if custom endpoint provided
|
||||
this.client = this.endpoint ? hf.endpoint(this.endpoint) : hf
|
||||
this.client = new HfInference(this.apiKey)
|
||||
if (this.endpoint) this.client.endpoint(this.endpoint)
|
||||
}
|
||||
|
||||
async _embed(texts: string[]): Promise<number[][]> {
|
||||
// replace newlines, which can negatively affect performance.
|
||||
const clean = texts.map((text) => text.replace(/\n/g, ' '))
|
||||
const hf = new HfInference(this.apiKey)
|
||||
const obj: any = {
|
||||
inputs: clean
|
||||
}
|
||||
if (!this.endpoint) {
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
obj.model = this.model
|
||||
}
|
||||
|
||||
const res = await this.caller.callWithOptions({}, this.client.featureExtraction.bind(this.client), obj)
|
||||
const res = await this.caller.callWithOptions({}, hf.featureExtraction.bind(hf), obj)
|
||||
return res as number[][]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class SubQuestionQueryEngine_LlamaIndex implements INode {
|
|||
this.icon = 'subQueryEngine.svg'
|
||||
this.category = 'Engine'
|
||||
this.description =
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate responses and synthesizes a final response'
|
||||
'Breaks complex query into sub questions for each relevant data source, then gather all the intermediate reponses and synthesizes a final response'
|
||||
this.baseClasses = [this.type, 'BaseQueryEngine']
|
||||
this.tags = ['LlamaIndex']
|
||||
this.inputs = [
|
||||
|
|
|
|||
|
|
@ -78,8 +78,6 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
|
||||
const { HfInference } = await HuggingFaceInference.imports()
|
||||
const hf = new HfInference(this.apiKey)
|
||||
// v4 uses Inference Providers by default; only override if custom endpoint provided
|
||||
const hfClient = this.endpoint ? hf.endpoint(this.endpoint) : hf
|
||||
const obj: any = {
|
||||
parameters: {
|
||||
// make it behave similar to openai, returning only the generated text
|
||||
|
|
@ -92,10 +90,12 @@ export class HuggingFaceInference extends LLM implements HFInput {
|
|||
},
|
||||
inputs: prompt
|
||||
}
|
||||
if (!this.endpoint) {
|
||||
if (this.endpoint) {
|
||||
hf.endpoint(this.endpoint)
|
||||
} else {
|
||||
obj.model = this.model
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hfClient.textGeneration.bind(hfClient), obj)
|
||||
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), obj)
|
||||
return res.generated_text
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
|||
import { ChatAnthropic } from '../../chatmodels/ChatAnthropic/FlowiseChatAnthropic'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
import { ChatGoogleGenerativeAI } from '../../chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI'
|
||||
import { AzureChatOpenAI } from '../../chatmodels/AzureChatOpenAI/FlowiseAzureChatOpenAI'
|
||||
|
||||
const sysPrompt = `You are a supervisor tasked with managing a conversation between the following workers: {team_members}.
|
||||
Given the following user request, respond with the worker to act next.
|
||||
|
|
@ -243,7 +242,7 @@ class Supervisor_MultiAgents implements INode {
|
|||
}
|
||||
}
|
||||
})
|
||||
} else if (llm instanceof ChatOpenAI || llm instanceof AzureChatOpenAI) {
|
||||
} else if (llm instanceof ChatOpenAI) {
|
||||
let prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemPrompt],
|
||||
new MessagesPlaceholder('messages'),
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ return [
|
|||
tool_calls: [
|
||||
{
|
||||
id: "12345",
|
||||
name: "calculator",
|
||||
name: "calulator",
|
||||
args: {
|
||||
number1: 333382,
|
||||
number2: 1932,
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ class MySQLRecordManager_RecordManager implements INode {
|
|||
label: 'Namespace',
|
||||
name: 'namespace',
|
||||
type: 'string',
|
||||
description: 'If not specified, chatflowid will be used',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -218,16 +219,7 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
unique key \`unique_key_namespace\` (\`key\`,
|
||||
\`namespace\`));`)
|
||||
|
||||
// Add doc_id column if it doesn't exist (migration for existing tables)
|
||||
const checkColumn = await queryRunner.manager.query(
|
||||
`SELECT COUNT(1) ColumnExists FROM INFORMATION_SCHEMA.COLUMNS
|
||||
WHERE table_schema=DATABASE() AND table_name='${tableName}' AND column_name='doc_id';`
|
||||
)
|
||||
if (checkColumn[0].ColumnExists === 0) {
|
||||
await queryRunner.manager.query(`ALTER TABLE \`${tableName}\` ADD COLUMN \`doc_id\` longtext;`)
|
||||
}
|
||||
|
||||
const columns = [`updated_at`, `key`, `namespace`, `group_id`, `doc_id`]
|
||||
const columns = [`updated_at`, `key`, `namespace`, `group_id`]
|
||||
for (const column of columns) {
|
||||
// MySQL does not support 'IF NOT EXISTS' function for Index
|
||||
const Check = await queryRunner.manager.query(
|
||||
|
|
@ -269,7 +261,7 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
}
|
||||
|
||||
async update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
async update(keys: string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
if (keys.length === 0) {
|
||||
return
|
||||
}
|
||||
|
|
@ -285,23 +277,23 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
throw new Error(`Time sync issue with database ${updatedAt} < ${timeAtLeast}`)
|
||||
}
|
||||
|
||||
// Handle both new format (objects with uid and docId) and old format (strings)
|
||||
const isNewFormat = keys.length > 0 && typeof keys[0] === 'object' && 'uid' in keys[0]
|
||||
const keyStrings = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.uid) : (keys as string[])
|
||||
const docIds = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.docId) : keys.map(() => null)
|
||||
const groupIds = _groupIds ?? keys.map(() => null)
|
||||
|
||||
const groupIds = _groupIds ?? keyStrings.map(() => null)
|
||||
|
||||
if (groupIds.length !== keyStrings.length) {
|
||||
throw new Error(`Number of keys (${keyStrings.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
if (groupIds.length !== keys.length) {
|
||||
throw new Error(`Number of keys (${keys.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
}
|
||||
|
||||
const recordsToUpsert = keyStrings.map((key, i) => [key, this.namespace, updatedAt, groupIds[i] ?? null, docIds[i] ?? null])
|
||||
const recordsToUpsert = keys.map((key, i) => [
|
||||
key,
|
||||
this.namespace,
|
||||
updatedAt,
|
||||
groupIds[i] ?? null // Ensure groupIds[i] is null if undefined
|
||||
])
|
||||
|
||||
const query = `
|
||||
INSERT INTO \`${tableName}\` (\`key\`, \`namespace\`, \`updated_at\`, \`group_id\`, \`doc_id\`)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON DUPLICATE KEY UPDATE \`updated_at\` = VALUES(\`updated_at\`), \`doc_id\` = VALUES(\`doc_id\`)`
|
||||
INSERT INTO \`${tableName}\` (\`key\`, \`namespace\`, \`updated_at\`, \`group_id\`)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON DUPLICATE KEY UPDATE \`updated_at\` = VALUES(\`updated_at\`)`
|
||||
|
||||
// To handle multiple files upsert
|
||||
try {
|
||||
|
|
@ -357,13 +349,13 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
}
|
||||
|
||||
async listKeys(options?: ListKeyOptions & { docId?: string }): Promise<string[]> {
|
||||
async listKeys(options?: ListKeyOptions): Promise<string[]> {
|
||||
const dataSource = await this.getDataSource()
|
||||
const queryRunner = dataSource.createQueryRunner()
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
try {
|
||||
const { before, after, limit, groupIds, docId } = options ?? {}
|
||||
const { before, after, limit, groupIds } = options ?? {}
|
||||
let query = `SELECT \`key\` FROM \`${tableName}\` WHERE \`namespace\` = ?`
|
||||
const values: (string | number | string[])[] = [this.namespace]
|
||||
|
||||
|
|
@ -390,11 +382,6 @@ class MySQLRecordManager implements RecordManagerInterface {
|
|||
values.push(...groupIds.filter((gid): gid is string => gid !== null))
|
||||
}
|
||||
|
||||
if (docId) {
|
||||
query += ` AND \`doc_id\` = ?`
|
||||
values.push(docId)
|
||||
}
|
||||
|
||||
query += ';'
|
||||
|
||||
// Directly using try/catch with async/await for cleaner flow
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@ class PostgresRecordManager_RecordManager implements INode {
|
|||
label: 'Namespace',
|
||||
name: 'namespace',
|
||||
type: 'string',
|
||||
description: 'If not specified, chatflowid will be used',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -240,19 +241,6 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
CREATE INDEX IF NOT EXISTS namespace_index ON "${tableName}" (namespace);
|
||||
CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
||||
|
||||
// Add doc_id column if it doesn't exist (migration for existing tables)
|
||||
await queryRunner.manager.query(`
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM information_schema.columns
|
||||
WHERE table_name = '${tableName}' AND column_name = 'doc_id'
|
||||
) THEN
|
||||
ALTER TABLE "${tableName}" ADD COLUMN doc_id TEXT;
|
||||
CREATE INDEX IF NOT EXISTS doc_id_index ON "${tableName}" (doc_id);
|
||||
END IF;
|
||||
END $$;`)
|
||||
|
||||
await queryRunner.release()
|
||||
} catch (e: any) {
|
||||
// This error indicates that the table already exists
|
||||
|
|
@ -298,7 +286,7 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
return `(${placeholders.join(', ')})`
|
||||
}
|
||||
|
||||
async update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
async update(keys: string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
if (keys.length === 0) {
|
||||
return
|
||||
}
|
||||
|
|
@ -314,22 +302,17 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
throw new Error(`Time sync issue with database ${updatedAt} < ${timeAtLeast}`)
|
||||
}
|
||||
|
||||
// Handle both new format (objects with uid and docId) and old format (strings)
|
||||
const isNewFormat = keys.length > 0 && typeof keys[0] === 'object' && 'uid' in keys[0]
|
||||
const keyStrings = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.uid) : (keys as string[])
|
||||
const docIds = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.docId) : keys.map(() => null)
|
||||
const groupIds = _groupIds ?? keys.map(() => null)
|
||||
|
||||
const groupIds = _groupIds ?? keyStrings.map(() => null)
|
||||
|
||||
if (groupIds.length !== keyStrings.length) {
|
||||
throw new Error(`Number of keys (${keyStrings.length}) does not match number of group_ids ${groupIds.length})`)
|
||||
if (groupIds.length !== keys.length) {
|
||||
throw new Error(`Number of keys (${keys.length}) does not match number of group_ids ${groupIds.length})`)
|
||||
}
|
||||
|
||||
const recordsToUpsert = keyStrings.map((key, i) => [key, this.namespace, updatedAt, groupIds[i], docIds[i]])
|
||||
const recordsToUpsert = keys.map((key, i) => [key, this.namespace, updatedAt, groupIds[i]])
|
||||
|
||||
const valuesPlaceholders = recordsToUpsert.map((_, j) => this.generatePlaceholderForRowAt(j, recordsToUpsert[0].length)).join(', ')
|
||||
|
||||
const query = `INSERT INTO "${tableName}" (key, namespace, updated_at, group_id, doc_id) VALUES ${valuesPlaceholders} ON CONFLICT (key, namespace) DO UPDATE SET updated_at = EXCLUDED.updated_at, doc_id = EXCLUDED.doc_id;`
|
||||
const query = `INSERT INTO "${tableName}" (key, namespace, updated_at, group_id) VALUES ${valuesPlaceholders} ON CONFLICT (key, namespace) DO UPDATE SET updated_at = EXCLUDED.updated_at;`
|
||||
try {
|
||||
await queryRunner.manager.query(query, recordsToUpsert.flat())
|
||||
await queryRunner.release()
|
||||
|
|
@ -368,8 +351,8 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
}
|
||||
}
|
||||
|
||||
async listKeys(options?: ListKeyOptions & { docId?: string }): Promise<string[]> {
|
||||
const { before, after, limit, groupIds, docId } = options ?? {}
|
||||
async listKeys(options?: ListKeyOptions): Promise<string[]> {
|
||||
const { before, after, limit, groupIds } = options ?? {}
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
let query = `SELECT key FROM "${tableName}" WHERE namespace = $1`
|
||||
|
|
@ -400,12 +383,6 @@ class PostgresRecordManager implements RecordManagerInterface {
|
|||
index += 1
|
||||
}
|
||||
|
||||
if (docId) {
|
||||
values.push(docId)
|
||||
query += ` AND doc_id = $${index}`
|
||||
index += 1
|
||||
}
|
||||
|
||||
query += ';'
|
||||
|
||||
const dataSource = await this.getDataSource()
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ class SQLiteRecordManager_RecordManager implements INode {
|
|||
label: 'Namespace',
|
||||
name: 'namespace',
|
||||
type: 'string',
|
||||
description: 'If not specified, chatflowid will be used',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
|
|
@ -197,15 +198,6 @@ CREATE INDEX IF NOT EXISTS key_index ON "${tableName}" (key);
|
|||
CREATE INDEX IF NOT EXISTS namespace_index ON "${tableName}" (namespace);
|
||||
CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
||||
|
||||
// Add doc_id column if it doesn't exist (migration for existing tables)
|
||||
const checkColumn = await queryRunner.manager.query(
|
||||
`SELECT COUNT(*) as count FROM pragma_table_info('${tableName}') WHERE name='doc_id';`
|
||||
)
|
||||
if (checkColumn[0].count === 0) {
|
||||
await queryRunner.manager.query(`ALTER TABLE "${tableName}" ADD COLUMN doc_id TEXT;`)
|
||||
await queryRunner.manager.query(`CREATE INDEX IF NOT EXISTS doc_id_index ON "${tableName}" (doc_id);`)
|
||||
}
|
||||
|
||||
await queryRunner.release()
|
||||
} catch (e: any) {
|
||||
// This error indicates that the table already exists
|
||||
|
|
@ -236,7 +228,7 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
}
|
||||
}
|
||||
|
||||
async update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
async update(keys: string[], updateOptions?: UpdateOptions): Promise<void> {
|
||||
if (keys.length === 0) {
|
||||
return
|
||||
}
|
||||
|
|
@ -251,23 +243,23 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
throw new Error(`Time sync issue with database ${updatedAt} < ${timeAtLeast}`)
|
||||
}
|
||||
|
||||
// Handle both new format (objects with uid and docId) and old format (strings)
|
||||
const isNewFormat = keys.length > 0 && typeof keys[0] === 'object' && 'uid' in keys[0]
|
||||
const keyStrings = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.uid) : (keys as string[])
|
||||
const docIds = isNewFormat ? (keys as Array<{ uid: string; docId: string }>).map((k) => k.docId) : keys.map(() => null)
|
||||
const groupIds = _groupIds ?? keys.map(() => null)
|
||||
|
||||
const groupIds = _groupIds ?? keyStrings.map(() => null)
|
||||
|
||||
if (groupIds.length !== keyStrings.length) {
|
||||
throw new Error(`Number of keys (${keyStrings.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
if (groupIds.length !== keys.length) {
|
||||
throw new Error(`Number of keys (${keys.length}) does not match number of group_ids (${groupIds.length})`)
|
||||
}
|
||||
|
||||
const recordsToUpsert = keyStrings.map((key, i) => [key, this.namespace, updatedAt, groupIds[i] ?? null, docIds[i] ?? null])
|
||||
const recordsToUpsert = keys.map((key, i) => [
|
||||
key,
|
||||
this.namespace,
|
||||
updatedAt,
|
||||
groupIds[i] ?? null // Ensure groupIds[i] is null if undefined
|
||||
])
|
||||
|
||||
const query = `
|
||||
INSERT INTO "${tableName}" (key, namespace, updated_at, group_id, doc_id)
|
||||
VALUES (?, ?, ?, ?, ?)
|
||||
ON CONFLICT (key, namespace) DO UPDATE SET updated_at = excluded.updated_at, doc_id = excluded.doc_id`
|
||||
INSERT INTO "${tableName}" (key, namespace, updated_at, group_id)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON CONFLICT (key, namespace) DO UPDATE SET updated_at = excluded.updated_at`
|
||||
|
||||
try {
|
||||
// To handle multiple files upsert
|
||||
|
|
@ -322,8 +314,8 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
}
|
||||
}
|
||||
|
||||
async listKeys(options?: ListKeyOptions & { docId?: string }): Promise<string[]> {
|
||||
const { before, after, limit, groupIds, docId } = options ?? {}
|
||||
async listKeys(options?: ListKeyOptions): Promise<string[]> {
|
||||
const { before, after, limit, groupIds } = options ?? {}
|
||||
const tableName = this.sanitizeTableName(this.tableName)
|
||||
|
||||
let query = `SELECT key FROM "${tableName}" WHERE namespace = ?`
|
||||
|
|
@ -352,11 +344,6 @@ CREATE INDEX IF NOT EXISTS group_id_index ON "${tableName}" (group_id);`)
|
|||
values.push(...groupIds.filter((gid): gid is string => gid !== null))
|
||||
}
|
||||
|
||||
if (docId) {
|
||||
query += ` AND doc_id = ?`
|
||||
values.push(docId)
|
||||
}
|
||||
|
||||
query += ';'
|
||||
|
||||
const dataSource = await this.getDataSource()
|
||||
|
|
|
|||
|
|
@ -136,17 +136,17 @@ class Custom_MCP implements INode {
|
|||
}
|
||||
|
||||
let sandbox: ICommonObject = {}
|
||||
const workspaceId = options?.searchOptions?.workspaceId?._value || options?.workspaceId
|
||||
|
||||
if (mcpServerConfig.includes('$vars')) {
|
||||
const appDataSource = options.appDataSource as DataSource
|
||||
const databaseEntities = options.databaseEntities as IDatabaseEntity
|
||||
// If options.workspaceId is not set, create a new options object with the workspaceId for getVars.
|
||||
const optionsWithWorkspaceId = options.workspaceId ? options : { ...options, workspaceId }
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, optionsWithWorkspaceId)
|
||||
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, options)
|
||||
sandbox['$vars'] = prepareSandboxVars(variables)
|
||||
}
|
||||
|
||||
const workspaceId = options?.searchOptions?.workspaceId?._value || options?.workspaceId
|
||||
|
||||
let canonicalConfig
|
||||
try {
|
||||
canonicalConfig = JSON.parse(mcpServerConfig)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,147 @@
|
|||
import { z } from 'zod'
|
||||
import path from 'path'
|
||||
import { StructuredTool, ToolParams } from '@langchain/core/tools'
|
||||
import { Serializable } from '@langchain/core/load/serializable'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getUserHome } from '../../../src/utils'
|
||||
import { SecureFileStore, FileSecurityConfig } from '../../../src/SecureFileStore'
|
||||
|
||||
abstract class BaseFileStore extends Serializable {
|
||||
abstract readFile(path: string): Promise<string>
|
||||
abstract writeFile(path: string, contents: string): Promise<void>
|
||||
}
|
||||
|
||||
class ReadFile_Tools implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
warning: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Read File'
|
||||
this.name = 'readFile'
|
||||
this.version = 2.0
|
||||
this.type = 'ReadFile'
|
||||
this.icon = 'readfile.svg'
|
||||
this.category = 'Tools'
|
||||
this.warning = 'This tool can be used to read files from the disk. It is recommended to use this tool with caution.'
|
||||
this.description = 'Read file from disk'
|
||||
this.baseClasses = [this.type, 'Tool', ...getBaseClasses(ReadFileTool)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Workspace Path',
|
||||
name: 'workspacePath',
|
||||
placeholder: `C:\\Users\\User\\MyProject`,
|
||||
type: 'string',
|
||||
description: 'Base workspace directory for file operations. All file paths will be relative to this directory.',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Enforce Workspace Boundaries',
|
||||
name: 'enforceWorkspaceBoundaries',
|
||||
type: 'boolean',
|
||||
description: 'When enabled, restricts file access to the workspace directory for security. Recommended: true',
|
||||
default: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max File Size (MB)',
|
||||
name: 'maxFileSize',
|
||||
type: 'number',
|
||||
description: 'Maximum file size in megabytes that can be read',
|
||||
default: 10,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Allowed Extensions',
|
||||
name: 'allowedExtensions',
|
||||
type: 'string',
|
||||
description: 'Comma-separated list of allowed file extensions (e.g., .txt,.json,.md). Leave empty to allow all.',
|
||||
placeholder: '.txt,.json,.md,.py,.js',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const workspacePath = nodeData.inputs?.workspacePath as string
|
||||
const enforceWorkspaceBoundaries = nodeData.inputs?.enforceWorkspaceBoundaries !== false // Default to true
|
||||
const maxFileSize = nodeData.inputs?.maxFileSize as number
|
||||
const allowedExtensions = nodeData.inputs?.allowedExtensions as string
|
||||
|
||||
// Parse allowed extensions
|
||||
const allowedExtensionsList = allowedExtensions ? allowedExtensions.split(',').map((ext) => ext.trim().toLowerCase()) : []
|
||||
|
||||
let store: BaseFileStore
|
||||
|
||||
if (workspacePath) {
|
||||
// Create secure file store with workspace boundaries
|
||||
const config: FileSecurityConfig = {
|
||||
workspacePath,
|
||||
enforceWorkspaceBoundaries,
|
||||
maxFileSize: maxFileSize ? maxFileSize * 1024 * 1024 : undefined, // Convert MB to bytes
|
||||
allowedExtensions: allowedExtensionsList.length > 0 ? allowedExtensionsList : undefined
|
||||
}
|
||||
store = new SecureFileStore(config)
|
||||
} else {
|
||||
// Fallback to current working directory with security warnings
|
||||
if (enforceWorkspaceBoundaries) {
|
||||
const fallbackWorkspacePath = path.join(getUserHome(), '.flowise')
|
||||
console.warn(`[ReadFile] No workspace path specified, using ${fallbackWorkspacePath} with security restrictions`)
|
||||
store = new SecureFileStore({
|
||||
workspacePath: fallbackWorkspacePath,
|
||||
enforceWorkspaceBoundaries: true,
|
||||
maxFileSize: maxFileSize ? maxFileSize * 1024 * 1024 : undefined,
|
||||
allowedExtensions: allowedExtensionsList.length > 0 ? allowedExtensionsList : undefined
|
||||
})
|
||||
} else {
|
||||
console.warn('[ReadFile] SECURITY WARNING: Workspace boundaries disabled - unrestricted file access enabled')
|
||||
store = SecureFileStore.createUnsecure()
|
||||
}
|
||||
}
|
||||
|
||||
return new ReadFileTool({ store })
|
||||
}
|
||||
}
|
||||
|
||||
interface ReadFileParams extends ToolParams {
|
||||
store: BaseFileStore
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for reading files from the disk. Extends the StructuredTool
|
||||
* class.
|
||||
*/
|
||||
export class ReadFileTool extends StructuredTool {
|
||||
static lc_name() {
|
||||
return 'ReadFileTool'
|
||||
}
|
||||
|
||||
schema = z.object({
|
||||
file_path: z.string().describe('name of file')
|
||||
}) as any
|
||||
|
||||
name = 'read_file'
|
||||
|
||||
description = 'Read file from disk'
|
||||
|
||||
store: BaseFileStore
|
||||
|
||||
constructor({ store }: ReadFileParams) {
|
||||
super(...arguments)
|
||||
|
||||
this.store = store
|
||||
}
|
||||
|
||||
async _call({ file_path }: z.infer<typeof this.schema>) {
|
||||
return await this.store.readFile(file_path)
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ReadFile_Tools }
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M18 5H9C7.89543 5 7 5.89543 7 7V25C7 26.1046 7.89543 27 9 27H12M18 5L25 12M18 5V12H25M25 12V25C25 26.1046 24.1046 27 23 27H20" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M16 17V29M16 17L13 20.1361M16 17L19 20.1361" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 455 B |
|
|
@ -0,0 +1,149 @@
|
|||
import { z } from 'zod'
|
||||
import path from 'path'
|
||||
import { StructuredTool, ToolParams } from '@langchain/core/tools'
|
||||
import { Serializable } from '@langchain/core/load/serializable'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getUserHome } from '../../../src/utils'
|
||||
import { SecureFileStore, FileSecurityConfig } from '../../../src/SecureFileStore'
|
||||
|
||||
abstract class BaseFileStore extends Serializable {
|
||||
abstract readFile(path: string): Promise<string>
|
||||
abstract writeFile(path: string, contents: string): Promise<void>
|
||||
}
|
||||
|
||||
class WriteFile_Tools implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
warning: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Write File'
|
||||
this.name = 'writeFile'
|
||||
this.version = 2.0
|
||||
this.type = 'WriteFile'
|
||||
this.icon = 'writefile.svg'
|
||||
this.category = 'Tools'
|
||||
this.warning = 'This tool can be used to write files to the disk. It is recommended to use this tool with caution.'
|
||||
this.description = 'Write file to disk'
|
||||
this.baseClasses = [this.type, 'Tool', ...getBaseClasses(WriteFileTool)]
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Workspace Path',
|
||||
name: 'workspacePath',
|
||||
placeholder: `C:\\Users\\User\\MyProject`,
|
||||
type: 'string',
|
||||
description: 'Base workspace directory for file operations. All file paths will be relative to this directory.',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Enforce Workspace Boundaries',
|
||||
name: 'enforceWorkspaceBoundaries',
|
||||
type: 'boolean',
|
||||
description: 'When enabled, restricts file access to the workspace directory for security. Recommended: true',
|
||||
default: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max File Size (MB)',
|
||||
name: 'maxFileSize',
|
||||
type: 'number',
|
||||
description: 'Maximum file size in megabytes that can be written',
|
||||
default: 10,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Allowed Extensions',
|
||||
name: 'allowedExtensions',
|
||||
type: 'string',
|
||||
description: 'Comma-separated list of allowed file extensions (e.g., .txt,.json,.md). Leave empty to allow all.',
|
||||
placeholder: '.txt,.json,.md,.py,.js',
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const workspacePath = nodeData.inputs?.workspacePath as string
|
||||
const enforceWorkspaceBoundaries = nodeData.inputs?.enforceWorkspaceBoundaries !== false // Default to true
|
||||
const maxFileSize = nodeData.inputs?.maxFileSize as number
|
||||
const allowedExtensions = nodeData.inputs?.allowedExtensions as string
|
||||
|
||||
// Parse allowed extensions
|
||||
const allowedExtensionsList = allowedExtensions ? allowedExtensions.split(',').map((ext) => ext.trim().toLowerCase()) : []
|
||||
|
||||
let store: BaseFileStore
|
||||
|
||||
if (workspacePath) {
|
||||
// Create secure file store with workspace boundaries
|
||||
const config: FileSecurityConfig = {
|
||||
workspacePath,
|
||||
enforceWorkspaceBoundaries,
|
||||
maxFileSize: maxFileSize ? maxFileSize * 1024 * 1024 : undefined, // Convert MB to bytes
|
||||
allowedExtensions: allowedExtensionsList.length > 0 ? allowedExtensionsList : undefined
|
||||
}
|
||||
store = new SecureFileStore(config)
|
||||
} else {
|
||||
// Fallback to current working directory with security warnings
|
||||
if (enforceWorkspaceBoundaries) {
|
||||
const fallbackWorkspacePath = path.join(getUserHome(), '.flowise')
|
||||
console.warn(`[WriteFile] No workspace path specified, using ${fallbackWorkspacePath} with security restrictions`)
|
||||
store = new SecureFileStore({
|
||||
workspacePath: fallbackWorkspacePath,
|
||||
enforceWorkspaceBoundaries: true,
|
||||
maxFileSize: maxFileSize ? maxFileSize * 1024 * 1024 : undefined,
|
||||
allowedExtensions: allowedExtensionsList.length > 0 ? allowedExtensionsList : undefined
|
||||
})
|
||||
} else {
|
||||
console.warn('[WriteFile] SECURITY WARNING: Workspace boundaries disabled - unrestricted file access enabled')
|
||||
store = SecureFileStore.createUnsecure()
|
||||
}
|
||||
}
|
||||
|
||||
return new WriteFileTool({ store })
|
||||
}
|
||||
}
|
||||
|
||||
interface WriteFileParams extends ToolParams {
|
||||
store: BaseFileStore
|
||||
}
|
||||
|
||||
/**
|
||||
* Class for writing data to files on the disk. Extends the StructuredTool
|
||||
* class.
|
||||
*/
|
||||
export class WriteFileTool extends StructuredTool {
|
||||
static lc_name() {
|
||||
return 'WriteFileTool'
|
||||
}
|
||||
|
||||
schema = z.object({
|
||||
file_path: z.string().describe('name of file'),
|
||||
text: z.string().describe('text to write to file')
|
||||
}) as any
|
||||
|
||||
name = 'write_file'
|
||||
|
||||
description = 'Write file to disk'
|
||||
|
||||
store: BaseFileStore
|
||||
|
||||
constructor({ store, ...rest }: WriteFileParams) {
|
||||
super(rest)
|
||||
|
||||
this.store = store
|
||||
}
|
||||
|
||||
async _call({ file_path, text }: z.infer<typeof this.schema>) {
|
||||
await this.store.writeFile(file_path, text)
|
||||
return `File written to ${file_path} successfully.`
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: WriteFile_Tools }
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M25 18V25C25 26.1046 24.1046 27 23 27H9C7.89543 27 7 26.1046 7 25V7C7 5.89543 7.89543 5 9 5H18L19 6" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M12 19.3284V22H14.6716C15.202 22 15.7107 21.7893 16.0858 21.4142L24.5858 12.9142C25.3668 12.1332 25.3668 10.8668 24.5858 10.0858L23.9142 9.41421C23.1332 8.63316 21.8668 8.63317 21.0858 9.41421L12.5858 17.9142C12.2107 18.2893 12 18.798 12 19.3284Z" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 632 B |
|
|
@ -84,16 +84,11 @@ class CustomFunction_Utilities implements INode {
|
|||
|
||||
const variables = await getVars(appDataSource, databaseEntities, nodeData, options)
|
||||
const flow = {
|
||||
input,
|
||||
chatflowId: options.chatflowid,
|
||||
sessionId: options.sessionId,
|
||||
chatId: options.chatId,
|
||||
rawOutput: options.postProcessing?.rawOutput || '',
|
||||
chatHistory: options.postProcessing?.chatHistory || [],
|
||||
sourceDocuments: options.postProcessing?.sourceDocuments,
|
||||
usedTools: options.postProcessing?.usedTools,
|
||||
artifacts: options.postProcessing?.artifacts,
|
||||
fileAnnotations: options.postProcessing?.fileAnnotations
|
||||
rawOutput: options.rawOutput || '',
|
||||
input
|
||||
}
|
||||
|
||||
let inputVars: ICommonObject = {}
|
||||
|
|
|
|||
|
|
@ -186,11 +186,7 @@ class Chroma_VectorStores implements INode {
|
|||
const vectorStoreName = collectionName
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
const chromaStore = new ChromaExtended(embeddings, obj)
|
||||
|
||||
|
|
|
|||
|
|
@ -198,11 +198,7 @@ class Elasticsearch_VectorStores implements INode {
|
|||
const vectorStoreName = indexName
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await vectorStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -212,11 +212,7 @@ class Pinecone_VectorStores implements INode {
|
|||
const vectorStoreName = pineconeNamespace
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await pineconeStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class Postgres_VectorStores implements INode {
|
|||
constructor() {
|
||||
this.label = 'Postgres'
|
||||
this.name = 'postgres'
|
||||
this.version = 7.1
|
||||
this.version = 7.0
|
||||
this.type = 'Postgres'
|
||||
this.icon = 'postgres.svg'
|
||||
this.category = 'Vector Stores'
|
||||
|
|
@ -173,15 +173,6 @@ class Postgres_VectorStores implements INode {
|
|||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Upsert Batch Size',
|
||||
name: 'batchSize',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
description: 'Upsert in batches of size N',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Additional Configuration',
|
||||
name: 'additionalConfig',
|
||||
|
|
@ -241,7 +232,6 @@ class Postgres_VectorStores implements INode {
|
|||
const docs = nodeData.inputs?.document as Document[]
|
||||
const recordManager = nodeData.inputs?.recordManager
|
||||
const isFileUploadEnabled = nodeData.inputs?.fileUpload as boolean
|
||||
const _batchSize = nodeData.inputs?.batchSize
|
||||
const vectorStoreDriver: VectorStoreDriver = Postgres_VectorStores.getDriverFromConfig(nodeData, options)
|
||||
|
||||
const flattenDocs = docs && docs.length ? flatten(docs) : []
|
||||
|
|
@ -275,15 +265,7 @@ class Postgres_VectorStores implements INode {
|
|||
|
||||
return res
|
||||
} else {
|
||||
if (_batchSize) {
|
||||
const batchSize = parseInt(_batchSize, 10)
|
||||
for (let i = 0; i < finalDocs.length; i += batchSize) {
|
||||
const batch = finalDocs.slice(i, i + batchSize)
|
||||
await vectorStoreDriver.fromDocuments(batch)
|
||||
}
|
||||
} else {
|
||||
await vectorStoreDriver.fromDocuments(finalDocs)
|
||||
}
|
||||
await vectorStoreDriver.fromDocuments(finalDocs)
|
||||
|
||||
return { numAdded: finalDocs.length, addedDocs: finalDocs }
|
||||
}
|
||||
|
|
@ -303,11 +285,7 @@ class Postgres_VectorStores implements INode {
|
|||
const vectorStoreName = tableName
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await vectorStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -5,11 +5,6 @@ import { TypeORMVectorStore, TypeORMVectorStoreArgs, TypeORMVectorStoreDocument
|
|||
import { VectorStore } from '@langchain/core/vectorstores'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
import { Pool } from 'pg'
|
||||
import { v4 as uuid } from 'uuid'
|
||||
|
||||
type TypeORMAddDocumentOptions = {
|
||||
ids?: string[]
|
||||
}
|
||||
|
||||
export class TypeORMDriver extends VectorStoreDriver {
|
||||
protected _postgresConnectionOptions: DataSourceOptions
|
||||
|
|
@ -100,45 +95,15 @@ export class TypeORMDriver extends VectorStoreDriver {
|
|||
try {
|
||||
instance.appDataSource.getRepository(instance.documentEntity).delete(ids)
|
||||
} catch (e) {
|
||||
console.error('Failed to delete', e)
|
||||
console.error('Failed to delete')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
instance.addVectors = async (
|
||||
vectors: number[][],
|
||||
documents: Document[],
|
||||
documentOptions?: TypeORMAddDocumentOptions
|
||||
): Promise<void> => {
|
||||
const rows = vectors.map((embedding, idx) => {
|
||||
const embeddingString = `[${embedding.join(',')}]`
|
||||
const documentRow = {
|
||||
id: documentOptions?.ids?.length ? documentOptions.ids[idx] : uuid(),
|
||||
pageContent: documents[idx].pageContent,
|
||||
embedding: embeddingString,
|
||||
metadata: documents[idx].metadata
|
||||
}
|
||||
return documentRow
|
||||
})
|
||||
const baseAddVectorsFn = instance.addVectors.bind(instance)
|
||||
|
||||
const documentRepository = instance.appDataSource.getRepository(instance.documentEntity)
|
||||
const _batchSize = this.nodeData.inputs?.batchSize
|
||||
const chunkSize = _batchSize ? parseInt(_batchSize, 10) : 500
|
||||
|
||||
for (let i = 0; i < rows.length; i += chunkSize) {
|
||||
const chunk = rows.slice(i, i + chunkSize)
|
||||
try {
|
||||
await documentRepository.save(chunk)
|
||||
} catch (e) {
|
||||
console.error(e)
|
||||
throw new Error(`Error inserting: ${chunk[0].pageContent}`)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
instance.addDocuments = async (documents: Document[], options?: { ids?: string[] }): Promise<void> => {
|
||||
const texts = documents.map(({ pageContent }) => pageContent)
|
||||
return (instance.addVectors as any)(await this.getEmbeddings().embedDocuments(texts), documents, options)
|
||||
instance.addVectors = async (vectors, documents) => {
|
||||
return baseAddVectorsFn(vectors, this.sanitizeDocuments(documents))
|
||||
}
|
||||
|
||||
return instance
|
||||
|
|
|
|||
|
|
@ -385,11 +385,7 @@ class Qdrant_VectorStores implements INode {
|
|||
const vectorStoreName = collectionName
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await vectorStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -197,11 +197,7 @@ class Supabase_VectorStores implements INode {
|
|||
const vectorStoreName = tableName + '_' + queryName
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await supabaseStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,816 @@
|
|||
import { Document } from '@langchain/core/documents'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams, IndexingResult } from '../../../src/Interface'
|
||||
import { Embeddings } from '@langchain/core/embeddings'
|
||||
import * as teradatasql from 'teradatasql'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
|
||||
class Teradata_VectorStores implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
badge: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'teradata'
|
||||
this.name = 'teradata'
|
||||
this.version = 1.0
|
||||
this.type = 'teradata'
|
||||
this.icon = 'teradata.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = 'Upsert embedded data and perform similarity search upon query using Teradata Enterprise Vector Store'
|
||||
this.baseClasses = [this.type, 'BaseRetriever']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['teradataVectorStoreApiCredentials']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
type: 'Document',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
name: 'embeddings',
|
||||
type: 'Embeddings'
|
||||
},
|
||||
{
|
||||
label: 'Vector_Store_Name',
|
||||
name: 'vectorStoreName',
|
||||
description: 'Teradata Vector Store Name',
|
||||
placeholder: `Vector_Store_Name`,
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Database',
|
||||
name: 'database',
|
||||
description: 'Database for Teradata Vector Store',
|
||||
placeholder: 'Database',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Embeddings_Table_Name',
|
||||
name: 'embeddingsTableName',
|
||||
description: 'Table name for storing embeddings',
|
||||
placeholder: 'Embeddings_Table_Name',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Vector_Store_Description',
|
||||
name: 'vectorStoreDescription',
|
||||
description: 'Teradata Vector Store Description',
|
||||
placeholder: `Vector_Store_Description`,
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Search_Algorithm',
|
||||
name: 'searchAlgorithm',
|
||||
description: 'Search Algorithm for Vector Store',
|
||||
placeholder: 'Search_Algorithm',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Distance_Metric',
|
||||
name: 'distanceMetric',
|
||||
description: 'Distance Metric to be used for distance calculation between vectors',
|
||||
placeholder: 'Distance_Metric',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Initial_Centroids_Method',
|
||||
name: 'initialCentroidsMethod',
|
||||
description: 'Algorithm to be used for initializing the cluster centroids for Search Algorithm KMEANS',
|
||||
placeholder: 'Initial_Centroids_Method',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Train_NumCluster',
|
||||
name: 'trainNumCluster',
|
||||
description: 'Number of clusters to be trained for Search Algorithm KMEANS',
|
||||
placeholder: 'Train_NumCluster',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'MaxIterNum',
|
||||
name: 'maxIterNum',
|
||||
description: 'Maximum number of iterations to be run during training for Search Algorithm KMEANS',
|
||||
placeholder: 'MaxIterNum',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Stop_Threshold',
|
||||
name: 'stopThreshold',
|
||||
description: 'Threshold value at which training should be stopped for Search Algorithm KMEANS',
|
||||
placeholder: 'Stop_Threshold',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Seed',
|
||||
name: 'seed',
|
||||
description: 'Seed value to be used for random number generation for Search Algorithm KMEANS',
|
||||
placeholder: 'Seed',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Num_Init',
|
||||
name: 'numInit',
|
||||
description:
|
||||
'number of times the k-means algorithm should run with different initial centroid seeds for Search Algorithm KMEANS',
|
||||
placeholder: 'Num_Init',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Top_K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Default to 10',
|
||||
placeholder: 'Top_K',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Search_Threshold',
|
||||
name: 'searchThreshold',
|
||||
description: 'Threshold value to consider for matching tables/views while searching',
|
||||
placeholder: 'Search_Threshold',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Search_NumCluster',
|
||||
name: 'searchNumCluster',
|
||||
description: 'Number of clusters to be considered while searching for Search Algorithm KMEANS',
|
||||
placeholder: 'Search_NumCluster',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Ef_Search',
|
||||
name: 'efSearch',
|
||||
description: 'Number of neighbors to be considered during search in HNSW graph for Search Algorithm HNSW',
|
||||
placeholder: 'Ef_Search',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Num_Layer',
|
||||
name: 'numLayer',
|
||||
description: 'Number of layers in the HNSW graph for Search Algorithm HNSW',
|
||||
placeholder: 'Num_Layer',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Ef_Construction',
|
||||
name: 'efConstruction',
|
||||
description: 'Number of neighbors to be considered during construction of the HNSW graph for Search Algorithm HNSW',
|
||||
placeholder: 'Ef_Construction',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Num_ConnPerNode',
|
||||
name: 'numConnPerNode',
|
||||
description: 'Number of connections per node in the HNSW graph during construction for Search Algorithm HNSW',
|
||||
placeholder: 'Num_ConnPerNode',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'MaxNum_ConnPerNode',
|
||||
name: 'maxNumConnPerNode',
|
||||
description: 'Maximum number of connections per node in the HNSW graph during construction for Search Algorithm HNSW',
|
||||
placeholder: 'MaxNum_ConnPerNode',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Apply_Heuristics',
|
||||
name: 'applyHeuristics',
|
||||
description:
|
||||
'Specifies whether to apply heuristics optimizations during construction of the HNSW graph for Search Algorithm HNSW',
|
||||
placeholder: 'Apply_Heuristics',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Rerank_Weight',
|
||||
name: 'rerankWeight',
|
||||
description: 'Weight to be used for reranking the search results',
|
||||
placeholder: 'Rerank_Weight',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Relevance_Top_K',
|
||||
name: 'relevanceTopK',
|
||||
description: 'Number of top similarity matches to be considered for reranking',
|
||||
placeholder: 'Relevance_Top_K',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Relevance_Search_Threshold',
|
||||
name: 'relevanceSearchThreshold',
|
||||
description: 'Threshold value to consider for matching tables/views while reranking',
|
||||
placeholder: 'Relevance_Search_Threshold',
|
||||
type: 'string',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Teradata Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Teradata Vector Store',
|
||||
name: 'vectorStore',
|
||||
baseClasses: [this.type, ...this.baseClasses]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
vectorStoreMethods = {
|
||||
async upsert(nodeData: INodeData, options: ICommonObject): Promise<Partial<IndexingResult>> {
|
||||
const docs = nodeData.inputs?.document as Document[]
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const embeddingsTableName = nodeData.inputs?.embeddingsTableName as string
|
||||
const vectorStoreName = nodeData.inputs?.vectorStoreName as string
|
||||
const database = nodeData.inputs?.database as string
|
||||
|
||||
const vectorStoreDescription = (nodeData.inputs?.vectorStoreDescription as string) || null
|
||||
const searchAlgorithm = (nodeData.inputs?.searchAlgorithm as string) || null
|
||||
const distanceMetric = (nodeData.inputs?.distanceMetric as string) || null
|
||||
const initialCentroidsMethod = (nodeData.inputs?.initialCentroidsMethod as string) || null
|
||||
const trainNumCluster = parseInt(nodeData.inputs?.trainNumCluster as string) || null
|
||||
const maxIterNum = parseInt(nodeData.inputs?.maxIterNum as string) || null
|
||||
const stopThreshold = parseFloat(nodeData.inputs?.stopThreshold as string) || null
|
||||
const seed = parseInt(nodeData.inputs?.seed as string) || null
|
||||
const numInit = parseInt(nodeData.inputs?.numInit as string) || null
|
||||
const topK = parseInt(nodeData.inputs?.topK as string) || 10
|
||||
const searchThreshold = parseFloat(nodeData.inputs?.searchThreshold as string) || null
|
||||
const searchNumCluster = parseInt(nodeData.inputs?.searchNumCluster as string) || null
|
||||
const efSearch = parseInt(nodeData.inputs?.efSearch as string) || null
|
||||
const numLayer = parseInt(nodeData.inputs?.numLayer as string) || null
|
||||
const efConstruction = parseInt(nodeData.inputs?.efConstruction as string) || null
|
||||
const numConnPerNode = parseInt(nodeData.inputs?.numConnPerNode as string) || null
|
||||
const maxNumConnPerNode = parseInt(nodeData.inputs?.maxNumConnPerNode as string) || null
|
||||
const applyHeuristics = (nodeData.inputs?.applyHeuristics as string)?.toLowerCase() === 'true' || null
|
||||
const rerankWeight = parseFloat(nodeData.inputs?.rerankWeight as string) || null
|
||||
const relevanceTopK = parseInt(nodeData.inputs?.relevanceTopK as string) || null
|
||||
const relevanceSearchThreshold = parseFloat(nodeData.inputs?.relevanceSearchThreshold as string) || null
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
|
||||
// Get authentication parameters with fallback to direct inputs
|
||||
const user = getCredentialParam('tdUsername', credentialData, nodeData) || null
|
||||
const password = getCredentialParam('tdPassword', credentialData, nodeData) || null
|
||||
const host = getCredentialParam('tdHostIp', credentialData, nodeData) || null
|
||||
const baseURL = getCredentialParam('baseURL', credentialData, nodeData) || null
|
||||
|
||||
// JWT authentication parameters - prioritize credential store
|
||||
const providedJwtToken = getCredentialParam('jwtToken', credentialData, nodeData) || null
|
||||
|
||||
if (!docs || docs.length === 0) {
|
||||
throw new Error('No documents provided for upsert operation')
|
||||
}
|
||||
|
||||
if (!embeddings) {
|
||||
throw new Error('Embeddings are required for upsert operation')
|
||||
}
|
||||
|
||||
let jwtToken = null
|
||||
if (providedJwtToken) {
|
||||
jwtToken = providedJwtToken
|
||||
}
|
||||
|
||||
// Generate embeddings
|
||||
const embedded_vectors = await embeddings.embedDocuments(docs.map((doc) => doc.pageContent))
|
||||
if (embedded_vectors.length !== docs.length) {
|
||||
throw new Error('The number of embedded vectors does not match the number of documents.')
|
||||
}
|
||||
|
||||
const embeddings_dims = embedded_vectors[0].length
|
||||
|
||||
// Create Teradata connection
|
||||
const connection = new teradatasql.TeradataConnection()
|
||||
let cur = null
|
||||
let tempTableName = ''
|
||||
let embeddingsTableCreated = false
|
||||
|
||||
try {
|
||||
// Connect to Teradata
|
||||
connection.connect({
|
||||
host: host,
|
||||
user: user,
|
||||
password: password,
|
||||
database: database
|
||||
})
|
||||
|
||||
cur = connection.cursor()
|
||||
|
||||
// Start transaction
|
||||
connection.autocommit = false
|
||||
|
||||
// Create temporary embeddings table with VARBYTE first
|
||||
tempTableName = `${embeddingsTableName}_temp_${Date.now()}`
|
||||
const createTempTableSql = `
|
||||
CREATE MULTISET TABLE ${tempTableName}
|
||||
(
|
||||
element_id INTEGER,
|
||||
chunks VARCHAR(32000) CHARACTER SET UNICODE,
|
||||
embedding VARBYTE(64000)
|
||||
);
|
||||
`
|
||||
|
||||
try {
|
||||
cur.execute(createTempTableSql)
|
||||
// Commit the DDL statement
|
||||
connection.commit()
|
||||
} catch (error: any) {
|
||||
throw new Error(`Failed to create temporary table ${tempTableName}: ${error.message}`)
|
||||
}
|
||||
|
||||
// Insert documents and embeddings into the temporary table using FastLoad
|
||||
const insertSql = `
|
||||
{fn teradata_require_fastload}INSERT INTO ${tempTableName} (?, ?, ?)`
|
||||
|
||||
const insertDataArr: any[][] = []
|
||||
for (let i = 0; i < docs.length; i++) {
|
||||
const doc = docs[i]
|
||||
const embedding = embedded_vectors[i]
|
||||
const elementId = i
|
||||
|
||||
// Convert embedding array of doubles to byte array for VARBYTE column
|
||||
const embeddingBuffer = Buffer.alloc(embedding.length * 8) // 8 bytes per double
|
||||
for (let j = 0; j < embedding.length; j++) {
|
||||
embeddingBuffer.writeDoubleLE(embedding[j], j * 8)
|
||||
}
|
||||
|
||||
insertDataArr.push([elementId, doc.pageContent, embeddingBuffer])
|
||||
}
|
||||
|
||||
try {
|
||||
cur.execute(insertSql, insertDataArr)
|
||||
// Commit the insert operation
|
||||
connection.commit()
|
||||
} catch (error: any) {
|
||||
console.error(`Failed to insert documents into temporary table: ${error.message}`)
|
||||
throw error
|
||||
}
|
||||
|
||||
// Create the final table with VECTOR datatype using the original embeddings table name
|
||||
const createFinalTableSql = `
|
||||
CREATE MULTISET TABLE ${embeddingsTableName}
|
||||
(
|
||||
element_id INTEGER,
|
||||
chunks VARCHAR(32000) CHARACTER SET UNICODE,
|
||||
embedding VECTOR
|
||||
) no primary index;
|
||||
`
|
||||
|
||||
try {
|
||||
cur.execute(createFinalTableSql)
|
||||
embeddingsTableCreated = true
|
||||
// Commit the DDL statement
|
||||
connection.commit()
|
||||
} catch (error: any) {
|
||||
throw new Error(`Failed to create final embeddings table ${embeddingsTableName}: ${error.message}`)
|
||||
}
|
||||
|
||||
// Load data from temporary VARBYTE table to final VECTOR table with casting
|
||||
const loadFinalTableSql = `
|
||||
INSERT INTO ${embeddingsTableName} (element_id, chunks, embedding)
|
||||
SELECT
|
||||
element_id,
|
||||
chunks,
|
||||
CAST(embedding AS VECTOR)
|
||||
FROM ${tempTableName};
|
||||
`
|
||||
|
||||
try {
|
||||
cur.execute(loadFinalTableSql)
|
||||
} catch (error: any) {
|
||||
console.error(`Failed to load data into final table: ${error.message}`)
|
||||
throw new Error(`Failed to load data into final table: ${error.message}`)
|
||||
}
|
||||
|
||||
// Drop the temporary table
|
||||
try {
|
||||
cur.execute(`DROP TABLE ${tempTableName}`)
|
||||
tempTableName = '' // Clear the temp table name since it's been dropped
|
||||
} catch (error: any) {
|
||||
console.error(`Failed to drop temporary table: ${error.message}`)
|
||||
throw new Error(`Failed to drop temporary table: ${error.message}`)
|
||||
}
|
||||
|
||||
// Commit the transaction
|
||||
connection.commit()
|
||||
connection.autocommit = true // Re-enable autocommit
|
||||
|
||||
// Continue with the original API-based vector store upload for compatibility
|
||||
const data = {
|
||||
database_name: database
|
||||
}
|
||||
|
||||
// Determine authentication method and headers
|
||||
let authHeaders: Record<string, string> = {}
|
||||
if (jwtToken) {
|
||||
authHeaders = {
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
} else {
|
||||
// Encode the credentials string using Base64
|
||||
const credentials: string = `${user}:${password}`
|
||||
const encodedCredentials: string = Buffer.from(credentials).toString('base64')
|
||||
authHeaders = {
|
||||
Authorization: `Basic ${encodedCredentials}`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
}
|
||||
|
||||
const sessionUrl = baseURL + (baseURL.endsWith('/') ? '' : '/') + 'data-insights/api/v1/session'
|
||||
const response = await fetch(sessionUrl, {
|
||||
method: 'POST',
|
||||
headers: authHeaders,
|
||||
body: JSON.stringify(data)
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to create session: ${response.status}`)
|
||||
}
|
||||
|
||||
// Extract session_id from Set-Cookie header
|
||||
const setCookie = response.headers.get('set-cookie')
|
||||
let session_id = ''
|
||||
if (setCookie) {
|
||||
const match = setCookie.match(/session_id=([^;]+)/)
|
||||
if (match) {
|
||||
session_id = match[1]
|
||||
}
|
||||
}
|
||||
|
||||
// Utility function to filter out null/undefined values
|
||||
const filterNullValues = (obj: Record<string, any>): Record<string, any> => {
|
||||
return Object.fromEntries(Object.entries(obj).filter(([_, value]) => value !== null && value !== undefined))
|
||||
}
|
||||
|
||||
const vsParameters = filterNullValues({
|
||||
search_algorithm: searchAlgorithm,
|
||||
top_k: topK,
|
||||
embeddings_dims: embeddings_dims,
|
||||
metric: distanceMetric,
|
||||
initial_centroids_method: initialCentroidsMethod,
|
||||
train_numcluster: trainNumCluster,
|
||||
max_iternum: maxIterNum,
|
||||
stop_threshold: stopThreshold,
|
||||
seed: seed,
|
||||
num_init: numInit,
|
||||
search_threshold: searchThreshold,
|
||||
search_num_cluster: searchNumCluster,
|
||||
ef_search: efSearch,
|
||||
num_layer: numLayer,
|
||||
ef_construction: efConstruction,
|
||||
num_connpernode: numConnPerNode,
|
||||
maxnum_connpernode: maxNumConnPerNode,
|
||||
apply_heuristics: applyHeuristics,
|
||||
rerank_weight: rerankWeight,
|
||||
relevance_top_k: relevanceTopK,
|
||||
relevance_search_threshold: relevanceSearchThreshold,
|
||||
description: vectorStoreDescription
|
||||
})
|
||||
|
||||
const vsIndex = filterNullValues({
|
||||
target_database: database,
|
||||
object_names: [embeddingsTableName],
|
||||
key_columns: ['element_id'],
|
||||
data_columns: ['embedding'],
|
||||
vector_column: 'vector_index',
|
||||
is_embedded: true,
|
||||
is_normalized: false,
|
||||
metadata_columns: ['chunks'],
|
||||
metadata_descriptions: ['Content or Chunk of the document']
|
||||
})
|
||||
|
||||
const formData = new FormData()
|
||||
formData.append('vs_parameters', JSON.stringify(vsParameters))
|
||||
formData.append('vs_index', JSON.stringify(vsIndex))
|
||||
|
||||
const vectorstoresUrl =
|
||||
baseURL + (baseURL.endsWith('/') ? '' : '/') + 'data-insights/api/v1/vectorstores/' + vectorStoreName
|
||||
|
||||
// Prepare headers for vectorstores API call
|
||||
let vectorstoreHeaders: Record<string, string> = {}
|
||||
if (jwtToken) {
|
||||
vectorstoreHeaders = {
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
Cookie: `session_id=${session_id}`
|
||||
}
|
||||
} else {
|
||||
const credentials: string = `${user}:${password}`
|
||||
const encodedCredentials: string = Buffer.from(credentials).toString('base64')
|
||||
vectorstoreHeaders = {
|
||||
Authorization: `Basic ${encodedCredentials}`,
|
||||
Cookie: `session_id=${session_id}`
|
||||
}
|
||||
}
|
||||
|
||||
const upsertResponse = await fetch(vectorstoresUrl, {
|
||||
method: 'POST',
|
||||
headers: vectorstoreHeaders,
|
||||
body: formData,
|
||||
credentials: 'include'
|
||||
})
|
||||
|
||||
if (!upsertResponse.ok) {
|
||||
throw new Error(`Failed to upsert documents: ${upsertResponse.statusText}`)
|
||||
}
|
||||
|
||||
return { numAdded: docs.length, addedDocs: docs as Document<Record<string, any>>[] }
|
||||
} catch (e: any) {
|
||||
// Rollback transaction on any error
|
||||
try {
|
||||
if (connection && !connection.autocommit) {
|
||||
connection.rollback()
|
||||
connection.autocommit = true
|
||||
}
|
||||
|
||||
// Clean up temporary table if it exists
|
||||
if (tempTableName && cur) {
|
||||
try {
|
||||
cur.execute(`DROP TABLE ${tempTableName}`)
|
||||
} catch (cleanupError: any) {
|
||||
console.warn(`Failed to clean up temporary table: ${cleanupError.message}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up embeddings table if it was created during this transaction
|
||||
if (embeddingsTableCreated && cur) {
|
||||
try {
|
||||
cur.execute(`DROP TABLE ${embeddingsTableName}`)
|
||||
} catch (cleanupError: any) {
|
||||
console.warn(`Failed to clean up embeddings table: ${cleanupError.message}`)
|
||||
}
|
||||
}
|
||||
} catch (rollbackError: any) {
|
||||
console.error(`Failed to rollback transaction: ${rollbackError.message}`)
|
||||
}
|
||||
|
||||
throw new Error(e.message || e)
|
||||
} finally {
|
||||
if (cur) {
|
||||
cur.close()
|
||||
}
|
||||
// Close the connection
|
||||
if (connection) {
|
||||
connection.close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const log_level = 0
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const vectorStoreName = nodeData.inputs?.vectorStoreName as string
|
||||
const database = nodeData.inputs?.database as string
|
||||
|
||||
// Optional parameters for vector store configuration
|
||||
const topK = parseInt(nodeData.inputs?.topK as string) || 10
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
|
||||
// Get authentication parameters with fallback to direct inputs
|
||||
const user = getCredentialParam('tdUsername', credentialData, nodeData) || null
|
||||
const password = getCredentialParam('tdPassword', credentialData, nodeData) || null
|
||||
const baseURL = getCredentialParam('baseURL', credentialData, nodeData) || null
|
||||
|
||||
// JWT authentication parameters - prioritize credential store
|
||||
const providedJwtToken = getCredentialParam('jwtToken', credentialData, nodeData) || null
|
||||
|
||||
// Check if JWT authentication should be used
|
||||
let jwtToken = null
|
||||
if (providedJwtToken) {
|
||||
jwtToken = providedJwtToken
|
||||
}
|
||||
|
||||
// Determine authentication headers
|
||||
let authHeaders: Record<string, string> = {}
|
||||
if (jwtToken) {
|
||||
authHeaders = {
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
} else {
|
||||
const credentials = `${user}:${password}`
|
||||
const encodedCredentials = Buffer.from(credentials).toString('base64')
|
||||
authHeaders = {
|
||||
Authorization: `Basic ${encodedCredentials}`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
}
|
||||
|
||||
const sessionData = {
|
||||
database_name: database
|
||||
}
|
||||
|
||||
const sessionUrl = baseURL + (baseURL.endsWith('/') ? '' : '/') + 'data-insights/api/v1/session'
|
||||
const sessionResponse = await fetch(sessionUrl, {
|
||||
method: 'POST',
|
||||
headers: authHeaders,
|
||||
body: JSON.stringify(sessionData)
|
||||
})
|
||||
|
||||
if (!sessionResponse.ok) {
|
||||
throw new Error(`Failed to create session: ${sessionResponse.status}`)
|
||||
}
|
||||
|
||||
// Extract session_id from Set-Cookie header
|
||||
const setCookie = sessionResponse.headers.get('set-cookie')
|
||||
let session_id = ''
|
||||
if (setCookie) {
|
||||
const match = setCookie.match(/session_id=([^;]+)/)
|
||||
if (match) {
|
||||
session_id = match[1]
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for similarity search
|
||||
const performSimilaritySearch = async (query: string): Promise<Document[]> => {
|
||||
try {
|
||||
// Generate embeddings for the query
|
||||
const queryEmbedding = await embeddings.embedQuery(query)
|
||||
if (!queryEmbedding || queryEmbedding.length === 0) {
|
||||
throw new Error('Failed to generate query embedding')
|
||||
}
|
||||
const queryEmbeddingString = queryEmbedding.join(',')
|
||||
// Prepare the search request
|
||||
const searchData = {
|
||||
question_vector: queryEmbeddingString
|
||||
}
|
||||
|
||||
// Prepare headers for search API call
|
||||
let searchHeaders: Record<string, string> = {}
|
||||
if (jwtToken) {
|
||||
searchHeaders = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${jwtToken}`,
|
||||
Cookie: `session_id=${session_id}`
|
||||
}
|
||||
} else {
|
||||
const credentials = `${user}:${password}`
|
||||
const encodedCredentials = Buffer.from(credentials).toString('base64')
|
||||
searchHeaders = {
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Basic ${encodedCredentials}`,
|
||||
Cookie: `session_id=${session_id}`
|
||||
}
|
||||
}
|
||||
|
||||
const searchUrl = `${baseURL}${
|
||||
baseURL.endsWith('/') ? '' : '/'
|
||||
}data-insights/api/v1/vectorstores/${vectorStoreName}/similarity-search?log_level=${log_level}`
|
||||
const searchResponse = await fetch(searchUrl, {
|
||||
method: 'POST',
|
||||
headers: searchHeaders,
|
||||
body: JSON.stringify(searchData),
|
||||
credentials: 'include'
|
||||
})
|
||||
|
||||
if (!searchResponse.ok) {
|
||||
throw new Error(`Search failed: ${searchResponse.statusText}`)
|
||||
}
|
||||
|
||||
const searchResults = await searchResponse.json()
|
||||
|
||||
return (
|
||||
searchResults.similar_objects_list?.map(
|
||||
(result: any) =>
|
||||
new Document({
|
||||
pageContent: result.chunks || '',
|
||||
metadata: {
|
||||
score: result.score || 0,
|
||||
source: vectorStoreName,
|
||||
database: result.DataBaseName,
|
||||
table: result.TableName,
|
||||
id: result.element_id
|
||||
}
|
||||
})
|
||||
) || []
|
||||
)
|
||||
} catch (error) {
|
||||
console.error('Error during similarity search:', error)
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// Create vector store object following Flowise pattern
|
||||
const vectorStore = {
|
||||
async similaritySearch(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
},
|
||||
|
||||
async similaritySearchWithScore(query: string): Promise<[Document, number][]> {
|
||||
const docs = await performSimilaritySearch(query)
|
||||
return docs.map((doc) => [doc, doc.metadata.score || 0])
|
||||
},
|
||||
|
||||
// Add invoke method directly to vectorStore
|
||||
async invoke(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
},
|
||||
|
||||
async getRelevantDocuments(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
},
|
||||
|
||||
async _getRelevantDocuments(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
},
|
||||
|
||||
asRetriever() {
|
||||
return {
|
||||
async getRelevantDocuments(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
},
|
||||
|
||||
async invoke(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
},
|
||||
|
||||
async _getRelevantDocuments(query: string): Promise<Document[]> {
|
||||
return performSimilaritySearch(query)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create retriever using the vectorStore methods
|
||||
const retriever = {
|
||||
async getRelevantDocuments(query: string): Promise<Document[]> {
|
||||
return vectorStore.getRelevantDocuments(query)
|
||||
},
|
||||
|
||||
async invoke(query: string): Promise<Document[]> {
|
||||
return vectorStore.invoke(query)
|
||||
},
|
||||
|
||||
async _getRelevantDocuments(query: string): Promise<Document[]> {
|
||||
return vectorStore._getRelevantDocuments(query)
|
||||
}
|
||||
}
|
||||
|
||||
if (nodeData.outputs?.output === 'retriever') {
|
||||
return retriever
|
||||
} else if (nodeData.outputs?.output === 'vectorStore') {
|
||||
;(vectorStore as any).k = topK
|
||||
return vectorStore
|
||||
}
|
||||
|
||||
return vectorStore
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Teradata_VectorStores }
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
<svg width="64" height="64" viewBox="0 0 64 64" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g filter="url(#filter0_d_15769_12621)">
|
||||
<path d="M49.3232 8H14.6768C13.1984 8 12 9.19843 12 10.6768V45.3232C12 46.8016 13.1984 48 14.6768 48H49.3232C50.8016 48 52 46.8016 52 45.3232V10.6768C52 9.19843 50.8016 8 49.3232 8Z" fill="#FF5F02"/>
|
||||
<path d="M25.098 32.467V15.5882H30.1292V20.2286H35.7465V24.6834H30.1292V32.467C30.1292 34.4794 31.1745 35.1364 32.6447 35.1364H35.7391V39.5863H32.6447C27.4915 39.5814 25.098 37.3369 25.098 32.467Z" fill="white"/>
|
||||
<path d="M37.8688 37.376C37.8688 36.668 38.1501 35.989 38.6507 35.4884C39.1513 34.9878 39.8303 34.7066 40.5383 34.7066C41.2462 34.7066 41.9252 34.9878 42.4258 35.4884C42.9265 35.989 43.2077 36.668 43.2077 37.376C43.2077 38.084 42.9265 38.7629 42.4258 39.2636C41.9252 39.7642 41.2462 40.0454 40.5383 40.0454C39.8303 40.0454 39.1513 39.7642 38.6507 39.2636C38.1501 38.7629 37.8688 38.084 37.8688 37.376Z" fill="white"/>
|
||||
</g>
|
||||
<defs>
|
||||
<filter id="filter0_d_15769_12621" x="0" y="0" width="64" height="64" filterUnits="userSpaceOnUse" color-interpolation-filters="sRGB">
|
||||
<feFlood flood-opacity="0" result="BackgroundImageFix"/>
|
||||
<feColorMatrix in="SourceAlpha" type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 0" result="hardAlpha"/>
|
||||
<feOffset dy="4"/>
|
||||
<feGaussianBlur stdDeviation="6"/>
|
||||
<feComposite in2="hardAlpha" operator="out"/>
|
||||
<feColorMatrix type="matrix" values="0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.12 0"/>
|
||||
<feBlend mode="normal" in2="BackgroundImageFix" result="effect1_dropShadow_15769_12621"/>
|
||||
<feBlend mode="normal" in="SourceGraphic" in2="effect1_dropShadow_15769_12621" result="shape"/>
|
||||
</filter>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
|
|
@ -187,11 +187,7 @@ class Upstash_VectorStores implements INode {
|
|||
const vectorStoreName = UPSTASH_VECTOR_REST_URL
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await upstashStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -252,11 +252,7 @@ class Weaviate_VectorStores implements INode {
|
|||
const vectorStoreName = weaviateTextKey ? weaviateIndex + '_' + weaviateTextKey : weaviateIndex
|
||||
await recordManager.createSchema()
|
||||
;(recordManager as any).namespace = (recordManager as any).namespace + '_' + vectorStoreName
|
||||
const filterKeys: ICommonObject = {}
|
||||
if (options.docId) {
|
||||
filterKeys.docId = options.docId
|
||||
}
|
||||
const keys: string[] = await recordManager.listKeys(filterKeys)
|
||||
const keys: string[] = await recordManager.listKeys({})
|
||||
|
||||
await weaviateStore.delete({ ids: keys })
|
||||
await recordManager.deleteKeys(keys)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-components",
|
||||
"version": "3.0.11",
|
||||
"version": "3.0.9",
|
||||
"description": "Flowiseai Components",
|
||||
"main": "dist/src/index",
|
||||
"types": "dist/src/index.d.ts",
|
||||
|
|
@ -42,8 +42,7 @@
|
|||
"@google-ai/generativelanguage": "^2.5.0",
|
||||
"@google-cloud/storage": "^7.15.2",
|
||||
"@google/generative-ai": "^0.24.0",
|
||||
"@grpc/grpc-js": "^1.10.10",
|
||||
"@huggingface/inference": "^4.13.2",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@langchain/anthropic": "0.3.33",
|
||||
"@langchain/aws": "^0.1.11",
|
||||
"@langchain/baidu-qianfan": "^0.1.0",
|
||||
|
|
@ -74,20 +73,6 @@
|
|||
"@modelcontextprotocol/server-slack": "^2025.1.17",
|
||||
"@notionhq/client": "^2.2.8",
|
||||
"@opensearch-project/opensearch": "^1.2.0",
|
||||
"@opentelemetry/api": "1.9.0",
|
||||
"@opentelemetry/auto-instrumentations-node": "^0.52.0",
|
||||
"@opentelemetry/core": "1.27.0",
|
||||
"@opentelemetry/exporter-metrics-otlp-grpc": "0.54.0",
|
||||
"@opentelemetry/exporter-metrics-otlp-http": "0.54.0",
|
||||
"@opentelemetry/exporter-metrics-otlp-proto": "0.54.0",
|
||||
"@opentelemetry/exporter-trace-otlp-grpc": "0.54.0",
|
||||
"@opentelemetry/exporter-trace-otlp-http": "0.54.0",
|
||||
"@opentelemetry/exporter-trace-otlp-proto": "0.54.0",
|
||||
"@opentelemetry/resources": "1.27.0",
|
||||
"@opentelemetry/sdk-metrics": "1.27.0",
|
||||
"@opentelemetry/sdk-node": "^0.54.0",
|
||||
"@opentelemetry/sdk-trace-base": "1.27.0",
|
||||
"@opentelemetry/semantic-conventions": "1.27.0",
|
||||
"@pinecone-database/pinecone": "4.0.0",
|
||||
"@qdrant/js-client-rest": "^1.9.0",
|
||||
"@stripe/agent-toolkit": "^0.1.20",
|
||||
|
|
@ -159,6 +144,7 @@
|
|||
"sanitize-filename": "^1.6.3",
|
||||
"srt-parser-2": "^1.2.3",
|
||||
"supergateway": "3.0.1",
|
||||
"teradatasql": "^20.0.40",
|
||||
"typeorm": "^0.3.6",
|
||||
"weaviate-ts-client": "^1.1.0",
|
||||
"winston": "^3.9.0",
|
||||
|
|
|
|||
|
|
@ -0,0 +1,167 @@
|
|||
import { Serializable } from '@langchain/core/load/serializable'
|
||||
import { NodeFileStore } from 'langchain/stores/file/node'
|
||||
import { isUnsafeFilePath, isWithinWorkspace } from './validator'
|
||||
import * as path from 'path'
|
||||
import * as fs from 'fs'
|
||||
|
||||
/**
|
||||
* Security configuration for file operations
|
||||
*/
|
||||
export interface FileSecurityConfig {
|
||||
/** Base workspace path - all file operations are restricted to this directory */
|
||||
workspacePath: string
|
||||
/** Whether to enforce workspace boundaries (default: true) */
|
||||
enforceWorkspaceBoundaries?: boolean
|
||||
/** Maximum file size in bytes (default: 10MB) */
|
||||
maxFileSize?: number
|
||||
/** Allowed file extensions (if empty, all extensions allowed) */
|
||||
allowedExtensions?: string[]
|
||||
/** Blocked file extensions */
|
||||
blockedExtensions?: string[]
|
||||
}
|
||||
|
||||
/**
|
||||
* Secure file store that enforces workspace boundaries and validates file operations
|
||||
*/
|
||||
export class SecureFileStore extends Serializable {
|
||||
lc_namespace = ['flowise', 'components', 'stores', 'file']
|
||||
|
||||
private config: Required<FileSecurityConfig>
|
||||
private nodeFileStore: NodeFileStore
|
||||
|
||||
constructor(config: FileSecurityConfig) {
|
||||
super()
|
||||
|
||||
// Set default configuration
|
||||
this.config = {
|
||||
workspacePath: config.workspacePath,
|
||||
enforceWorkspaceBoundaries: config.enforceWorkspaceBoundaries ?? true,
|
||||
maxFileSize: config.maxFileSize ?? 10 * 1024 * 1024, // 10MB default
|
||||
allowedExtensions: config.allowedExtensions ?? [],
|
||||
blockedExtensions: config.blockedExtensions ?? [
|
||||
'.exe',
|
||||
'.bat',
|
||||
'.cmd',
|
||||
'.sh',
|
||||
'.ps1',
|
||||
'.vbs',
|
||||
'.scr',
|
||||
'.com',
|
||||
'.pif',
|
||||
'.dll',
|
||||
'.sys',
|
||||
'.msi',
|
||||
'.jar'
|
||||
]
|
||||
}
|
||||
|
||||
// Validate workspace path
|
||||
if (!this.config.workspacePath || !path.isAbsolute(this.config.workspacePath)) {
|
||||
throw new Error('Workspace path must be an absolute path')
|
||||
}
|
||||
|
||||
// Ensure workspace directory exists
|
||||
if (!fs.existsSync(this.config.workspacePath)) {
|
||||
throw new Error(`Workspace directory does not exist: ${this.config.workspacePath}`)
|
||||
}
|
||||
|
||||
// Initialize the underlying NodeFileStore with workspace path
|
||||
this.nodeFileStore = new NodeFileStore(this.config.workspacePath)
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates a file path against security policies
|
||||
*/
|
||||
private validateFilePath(filePath: string): void {
|
||||
// Check for unsafe path patterns
|
||||
if (isUnsafeFilePath(filePath)) {
|
||||
throw new Error(`Unsafe file path detected: ${filePath}`)
|
||||
}
|
||||
|
||||
// Enforce workspace boundaries if enabled
|
||||
if (this.config.enforceWorkspaceBoundaries) {
|
||||
if (!isWithinWorkspace(filePath, this.config.workspacePath)) {
|
||||
throw new Error(`File path outside workspace boundaries: ${filePath}`)
|
||||
}
|
||||
}
|
||||
|
||||
// Check file extension
|
||||
const ext = path.extname(filePath).toLowerCase()
|
||||
|
||||
// Check blocked extensions
|
||||
if (this.config.blockedExtensions.includes(ext)) {
|
||||
throw new Error(`File extension not allowed: ${ext}`)
|
||||
}
|
||||
|
||||
// Check allowed extensions (if specified)
|
||||
if (this.config.allowedExtensions.length > 0 && !this.config.allowedExtensions.includes(ext)) {
|
||||
throw new Error(`File extension not in allowed list: ${ext}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates file size
|
||||
*/
|
||||
private validateFileSize(content: string): void {
|
||||
const sizeInBytes = Buffer.byteLength(content, 'utf8')
|
||||
if (sizeInBytes > this.config.maxFileSize) {
|
||||
throw new Error(`File size exceeds maximum allowed size: ${sizeInBytes} > ${this.config.maxFileSize}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a file with security validation
|
||||
*/
|
||||
async readFile(filePath: string): Promise<string> {
|
||||
this.validateFilePath(filePath)
|
||||
|
||||
try {
|
||||
return await this.nodeFileStore.readFile(filePath)
|
||||
} catch (error) {
|
||||
// Provide generic error message to avoid information leakage
|
||||
throw new Error(`Failed to read file: ${path.basename(filePath)}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a file with security validation
|
||||
*/
|
||||
async writeFile(filePath: string, contents: string): Promise<void> {
|
||||
this.validateFilePath(filePath)
|
||||
this.validateFileSize(contents)
|
||||
|
||||
try {
|
||||
// Ensure the directory exists
|
||||
const dir = path.dirname(path.resolve(this.config.workspacePath, filePath))
|
||||
if (!fs.existsSync(dir)) {
|
||||
fs.mkdirSync(dir, { recursive: true })
|
||||
}
|
||||
|
||||
await this.nodeFileStore.writeFile(filePath, contents)
|
||||
} catch (error) {
|
||||
// Provide generic error message to avoid information leakage
|
||||
throw new Error(`Failed to write file: ${path.basename(filePath)}`)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the workspace configuration
|
||||
*/
|
||||
getConfig(): Readonly<Required<FileSecurityConfig>> {
|
||||
return { ...this.config }
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a secure file store with workspace enforcement disabled (for backward compatibility)
|
||||
* WARNING: This should only be used when absolutely necessary and with proper user consent
|
||||
*/
|
||||
static createUnsecure(basePath?: string): SecureFileStore {
|
||||
const workspacePath = basePath || process.cwd()
|
||||
return new SecureFileStore({
|
||||
workspacePath,
|
||||
enforceWorkspaceBoundaries: false,
|
||||
maxFileSize: 50 * 1024 * 1024, // 50MB for insecure mode
|
||||
blockedExtensions: [] // No extension restrictions in insecure mode
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
@ -1774,7 +1774,7 @@ export class AnalyticHandler {
|
|||
}
|
||||
|
||||
if (Object.prototype.hasOwnProperty.call(this.handlers, 'lunary')) {
|
||||
const toolEventId: string = this.handlers['lunary'].toolEvent[returnIds['lunary'].toolEvent]
|
||||
const toolEventId: string = this.handlers['lunary'].llmEvent[returnIds['lunary'].toolEvent]
|
||||
const monitor = this.handlers['lunary'].client
|
||||
|
||||
if (monitor && toolEventId) {
|
||||
|
|
|
|||
|
|
@ -8,10 +8,6 @@ import { IndexingResult } from './Interface'
|
|||
|
||||
type Metadata = Record<string, unknown>
|
||||
|
||||
export interface ExtendedRecordManagerInterface extends RecordManagerInterface {
|
||||
update(keys: Array<{ uid: string; docId: string }> | string[], updateOptions?: Record<string, any>): Promise<void>
|
||||
}
|
||||
|
||||
type StringOrDocFunc = string | ((doc: DocumentInterface) => string)
|
||||
|
||||
export interface HashedDocumentInterface extends DocumentInterface {
|
||||
|
|
@ -211,7 +207,7 @@ export const _isBaseDocumentLoader = (arg: any): arg is BaseDocumentLoader => {
|
|||
|
||||
interface IndexArgs {
|
||||
docsSource: BaseDocumentLoader | DocumentInterface[]
|
||||
recordManager: ExtendedRecordManagerInterface
|
||||
recordManager: RecordManagerInterface
|
||||
vectorStore: VectorStore
|
||||
options?: IndexOptions
|
||||
}
|
||||
|
|
@ -279,7 +275,7 @@ export async function index(args: IndexArgs): Promise<IndexingResult> {
|
|||
|
||||
const uids: string[] = []
|
||||
const docsToIndex: DocumentInterface[] = []
|
||||
const docsToUpdate: Array<{ uid: string; docId: string }> = []
|
||||
const docsToUpdate: string[] = []
|
||||
const seenDocs = new Set<string>()
|
||||
hashedDocs.forEach((hashedDoc, i) => {
|
||||
const docExists = batchExists[i]
|
||||
|
|
@ -287,7 +283,7 @@ export async function index(args: IndexArgs): Promise<IndexingResult> {
|
|||
if (forceUpdate) {
|
||||
seenDocs.add(hashedDoc.uid)
|
||||
} else {
|
||||
docsToUpdate.push({ uid: hashedDoc.uid, docId: hashedDoc.metadata.docId as string })
|
||||
docsToUpdate.push(hashedDoc.uid)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -312,7 +308,7 @@ export async function index(args: IndexArgs): Promise<IndexingResult> {
|
|||
}
|
||||
|
||||
await recordManager.update(
|
||||
hashedDocs.map((doc) => ({ uid: doc.uid, docId: doc.metadata.docId as string })),
|
||||
hashedDocs.map((doc) => doc.uid),
|
||||
{ timeAtLeast: indexStartDt, groupIds: sourceIds }
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import { cloneDeep, omit, get } from 'lodash'
|
|||
import TurndownService from 'turndown'
|
||||
import { DataSource, Equal } from 'typeorm'
|
||||
import { ICommonObject, IDatabaseEntity, IFileUpload, IMessage, INodeData, IVariable, MessageContentImageUrl } from './Interface'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { AES, enc } from 'crypto-js'
|
||||
import { AIMessage, HumanMessage, BaseMessage } from '@langchain/core/messages'
|
||||
import { Document } from '@langchain/core/documents'
|
||||
|
|
@ -1501,29 +1500,9 @@ export const executeJavaScriptCode = async (
|
|||
|
||||
const sbx = await Sandbox.create({ apiKey: process.env.E2B_APIKEY, timeoutMs })
|
||||
|
||||
// Determine which libraries to install
|
||||
const librariesToInstall = new Set<string>(libraries)
|
||||
|
||||
// Auto-detect required libraries from code
|
||||
// Extract required modules from import/require statements
|
||||
const importRegex = /(?:import\s+.*?\s+from\s+['"]([^'"]+)['"]|require\s*\(\s*['"]([^'"]+)['"]\s*\))/g
|
||||
let match
|
||||
while ((match = importRegex.exec(code)) !== null) {
|
||||
const moduleName = match[1] || match[2]
|
||||
// Extract base module name (e.g., 'typeorm' from 'typeorm/something')
|
||||
const baseModuleName = moduleName.split('/')[0]
|
||||
librariesToInstall.add(baseModuleName)
|
||||
}
|
||||
|
||||
// Install libraries
|
||||
for (const library of librariesToInstall) {
|
||||
// Validate library name to prevent command injection.
|
||||
const validPackageNameRegex = /^(@[a-z0-9-~][a-z0-9-._~]*\/)?[a-z0-9-~][a-z0-9-._~]*$/
|
||||
if (validPackageNameRegex.test(library)) {
|
||||
await sbx.commands.run(`npm install ${library}`)
|
||||
} else {
|
||||
console.warn(`[Sandbox] Skipping installation of invalid module: ${library}`)
|
||||
}
|
||||
for (const library of libraries) {
|
||||
await sbx.commands.run(`npm install ${library}`)
|
||||
}
|
||||
|
||||
// Separate imports from the rest of the code for proper ES6 module structure
|
||||
|
|
@ -1942,160 +1921,3 @@ export async function parseWithTypeConversion<T extends z.ZodTypeAny>(schema: T,
|
|||
throw e
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Configures structured output for the LLM using Zod schema
|
||||
* @param {BaseChatModel} llmNodeInstance - The LLM instance to configure
|
||||
* @param {any[]} structuredOutput - Array of structured output schema definitions
|
||||
* @returns {BaseChatModel} - The configured LLM instance
|
||||
*/
|
||||
export const configureStructuredOutput = (llmNodeInstance: BaseChatModel, structuredOutput: any[]): BaseChatModel => {
|
||||
try {
|
||||
const zodObj: ICommonObject = {}
|
||||
for (const sch of structuredOutput) {
|
||||
if (sch.type === 'string') {
|
||||
zodObj[sch.key] = z.string().describe(sch.description || '')
|
||||
} else if (sch.type === 'stringArray') {
|
||||
zodObj[sch.key] = z.array(z.string()).describe(sch.description || '')
|
||||
} else if (sch.type === 'number') {
|
||||
zodObj[sch.key] = z.number().describe(sch.description || '')
|
||||
} else if (sch.type === 'boolean') {
|
||||
zodObj[sch.key] = z.boolean().describe(sch.description || '')
|
||||
} else if (sch.type === 'enum') {
|
||||
const enumValues = sch.enumValues?.split(',').map((item: string) => item.trim()) || []
|
||||
zodObj[sch.key] = z
|
||||
.enum(enumValues.length ? (enumValues as [string, ...string[]]) : ['default'])
|
||||
.describe(sch.description || '')
|
||||
} else if (sch.type === 'jsonArray') {
|
||||
const jsonSchema = sch.jsonSchema
|
||||
if (jsonSchema) {
|
||||
try {
|
||||
// Parse the JSON schema
|
||||
const schemaObj = JSON.parse(jsonSchema)
|
||||
|
||||
// Create a Zod schema from the JSON schema
|
||||
const itemSchema = createZodSchemaFromJSON(schemaObj)
|
||||
|
||||
// Create an array schema of the item schema
|
||||
zodObj[sch.key] = z.array(itemSchema).describe(sch.description || '')
|
||||
} catch (err) {
|
||||
console.error(`Error parsing JSON schema for ${sch.key}:`, err)
|
||||
// Fallback to generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
} else {
|
||||
// If no schema provided, use generic array of records
|
||||
zodObj[sch.key] = z.array(z.record(z.any())).describe(sch.description || '')
|
||||
}
|
||||
}
|
||||
}
|
||||
const structuredOutputSchema = z.object(zodObj)
|
||||
|
||||
// @ts-ignore
|
||||
return llmNodeInstance.withStructuredOutput(structuredOutputSchema)
|
||||
} catch (exception) {
|
||||
console.error(exception)
|
||||
return llmNodeInstance
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a Zod schema from a JSON schema object
|
||||
* @param {any} jsonSchema - The JSON schema object
|
||||
* @returns {z.ZodTypeAny} - A Zod schema
|
||||
*/
|
||||
export const createZodSchemaFromJSON = (jsonSchema: any): z.ZodTypeAny => {
|
||||
// If the schema is an object with properties, create an object schema
|
||||
if (typeof jsonSchema === 'object' && jsonSchema !== null) {
|
||||
const schemaObj: Record<string, z.ZodTypeAny> = {}
|
||||
|
||||
// Process each property in the schema
|
||||
for (const [key, value] of Object.entries(jsonSchema)) {
|
||||
if (value === null) {
|
||||
// Handle null values
|
||||
schemaObj[key] = z.null()
|
||||
} else if (typeof value === 'object' && !Array.isArray(value)) {
|
||||
// Check if the property has a type definition
|
||||
if ('type' in value) {
|
||||
const type = value.type as string
|
||||
const description = ('description' in value ? (value.description as string) : '') || ''
|
||||
|
||||
// Create the appropriate Zod type based on the type property
|
||||
if (type === 'string') {
|
||||
schemaObj[key] = z.string().describe(description)
|
||||
} else if (type === 'number') {
|
||||
schemaObj[key] = z.number().describe(description)
|
||||
} else if (type === 'boolean') {
|
||||
schemaObj[key] = z.boolean().describe(description)
|
||||
} else if (type === 'array') {
|
||||
// If it's an array type, check if items is defined
|
||||
if ('items' in value && value.items) {
|
||||
const itemSchema = createZodSchemaFromJSON(value.items)
|
||||
schemaObj[key] = z.array(itemSchema).describe(description)
|
||||
} else {
|
||||
// Default to array of any if items not specified
|
||||
schemaObj[key] = z.array(z.any()).describe(description)
|
||||
}
|
||||
} else if (type === 'object') {
|
||||
// If it's an object type, check if properties is defined
|
||||
if ('properties' in value && value.properties) {
|
||||
const nestedSchema = createZodSchemaFromJSON(value.properties)
|
||||
schemaObj[key] = nestedSchema.describe(description)
|
||||
} else {
|
||||
// Default to record of any if properties not specified
|
||||
schemaObj[key] = z.record(z.any()).describe(description)
|
||||
}
|
||||
} else {
|
||||
// Default to any for unknown types
|
||||
schemaObj[key] = z.any().describe(description)
|
||||
}
|
||||
|
||||
// Check if the property is optional
|
||||
if ('optional' in value && value.optional === true) {
|
||||
schemaObj[key] = schemaObj[key].optional()
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values without a type property
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// It's a nested object without a type property, recursively create schema
|
||||
schemaObj[key] = createZodSchemaFromJSON(value)
|
||||
}
|
||||
} else if (Array.isArray(value)) {
|
||||
// Array values
|
||||
if (value.length > 0) {
|
||||
// If the array has items, recursively create a schema for the first item
|
||||
const itemSchema = createZodSchemaFromJSON(value[0])
|
||||
schemaObj[key] = z.array(itemSchema)
|
||||
} else {
|
||||
// Empty array, allow any array
|
||||
schemaObj[key] = z.array(z.any())
|
||||
}
|
||||
} else {
|
||||
// For primitive values (which shouldn't be in the schema directly)
|
||||
// Use the corresponding Zod type
|
||||
if (typeof value === 'string') {
|
||||
schemaObj[key] = z.string()
|
||||
} else if (typeof value === 'number') {
|
||||
schemaObj[key] = z.number()
|
||||
} else if (typeof value === 'boolean') {
|
||||
schemaObj[key] = z.boolean()
|
||||
} else {
|
||||
schemaObj[key] = z.any()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return z.object(schemaObj)
|
||||
}
|
||||
|
||||
// Fallback to any for unknown types
|
||||
return z.any()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,3 +69,36 @@ export const isUnsafeFilePath = (filePath: string): boolean => {
|
|||
|
||||
return dangerousPatterns.some((pattern) => pattern.test(filePath))
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates if a file path is within the allowed workspace boundaries
|
||||
* @param {string} filePath The file path to validate
|
||||
* @param {string} workspacePath The workspace base path
|
||||
* @returns {boolean} True if path is within workspace, false otherwise
|
||||
*/
|
||||
export const isWithinWorkspace = (filePath: string, workspacePath: string): boolean => {
|
||||
if (!filePath || !workspacePath) {
|
||||
return false
|
||||
}
|
||||
|
||||
try {
|
||||
const path = require('path')
|
||||
|
||||
// Resolve both paths to absolute paths
|
||||
const resolvedFilePath = path.resolve(workspacePath, filePath)
|
||||
const resolvedWorkspacePath = path.resolve(workspacePath)
|
||||
|
||||
// Normalize paths to handle different separators
|
||||
const normalizedFilePath = path.normalize(resolvedFilePath)
|
||||
const normalizedWorkspacePath = path.normalize(resolvedWorkspacePath)
|
||||
|
||||
// Check if the file path starts with the workspace path
|
||||
const relativePath = path.relative(normalizedWorkspacePath, normalizedFilePath)
|
||||
|
||||
// If relative path starts with '..' or is absolute, it's outside workspace
|
||||
return !relativePath.startsWith('..') && !path.isAbsolute(relativePath)
|
||||
} catch (error) {
|
||||
// If any error occurs during path resolution, deny access
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ cd Flowise/packages/server
|
|||
pnpm install
|
||||
./node_modules/.bin/cypress install
|
||||
pnpm build
|
||||
#Only for writing new tests on local dev -> pnpm run cypress:open
|
||||
#Only for writting new tests on local dev -> pnpm run cypress:open
|
||||
pnpm run e2e
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -284,7 +284,7 @@
|
|||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"customFunctionInputVariables": "",
|
||||
"customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\nconst { Pool } = require('pg');\n\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nlet sqlSchemaPrompt = '';\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nasync function getSQLPrompt() {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n // Get all user-defined tables (excluding system tables)\n const tablesResult = await queryRunner.query(`\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public' AND table_type = 'BASE TABLE'\n `);\n\n for (const tableRow of tablesResult) {\n const tableName = tableRow.table_name;\n\n const schemaInfo = await queryRunner.query(`\n SELECT column_name, data_type, is_nullable\n FROM information_schema.columns\n WHERE table_name = '${tableName}'\n `);\n\n const createColumns = [];\n const columnNames = [];\n\n for (const column of schemaInfo) {\n const name = column.column_name;\n const type = column.data_type.toUpperCase();\n const notNull = column.is_nullable === 'NO' ? 'NOT NULL' : '';\n columnNames.push(name);\n createColumns.push(`${name} ${type} ${notNull}`);\n }\n\n const sqlCreateTableQuery = `CREATE TABLE ${tableName} (${createColumns.join(', ')})`;\n const sqlSelectTableQuery = `SELECT * FROM ${tableName} LIMIT 3`;\n\n let allValues = [];\n try {\n const rows = await queryRunner.query(sqlSelectTableQuery);\n\n allValues = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n } catch (err) {\n allValues.push('[ERROR FETCHING ROWS]');\n }\n\n sqlSchemaPrompt +=\n sqlCreateTableQuery +\n '\\n' +\n sqlSelectTableQuery +\n '\\n' +\n columnNames.join(' ') +\n '\\n' +\n allValues.join('\\n') +\n '\\n\\n';\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error(err);\n throw err;\n }\n}\n\nasync function main() {\n await getSQLPrompt();\n}\n\nawait main();\n\nreturn sqlSchemaPrompt;\n",
|
||||
"customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\n\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nlet sqlSchemaPrompt = '';\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nasync function getSQLPrompt() {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n // Get all user-defined tables (excluding system tables)\n const tablesResult = await queryRunner.query(`\n SELECT table_name\n FROM information_schema.tables\n WHERE table_schema = 'public' AND table_type = 'BASE TABLE'\n `);\n\n for (const tableRow of tablesResult) {\n const tableName = tableRow.table_name;\n\n const schemaInfo = await queryRunner.query(`\n SELECT column_name, data_type, is_nullable\n FROM information_schema.columns\n WHERE table_name = '${tableName}'\n `);\n\n const createColumns = [];\n const columnNames = [];\n\n for (const column of schemaInfo) {\n const name = column.column_name;\n const type = column.data_type.toUpperCase();\n const notNull = column.is_nullable === 'NO' ? 'NOT NULL' : '';\n columnNames.push(name);\n createColumns.push(`${name} ${type} ${notNull}`);\n }\n\n const sqlCreateTableQuery = `CREATE TABLE ${tableName} (${createColumns.join(', ')})`;\n const sqlSelectTableQuery = `SELECT * FROM ${tableName} LIMIT 3`;\n\n let allValues = [];\n try {\n const rows = await queryRunner.query(sqlSelectTableQuery);\n\n allValues = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n } catch (err) {\n allValues.push('[ERROR FETCHING ROWS]');\n }\n\n sqlSchemaPrompt +=\n sqlCreateTableQuery +\n '\\n' +\n sqlSelectTableQuery +\n '\\n' +\n columnNames.join(' ') +\n '\\n' +\n allValues.join('\\n') +\n '\\n\\n';\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error(err);\n throw err;\n }\n}\n\nasync function main() {\n await getSQLPrompt();\n}\n\nawait main();\n\nreturn sqlSchemaPrompt;\n",
|
||||
"customFunctionUpdateState": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
|
|
@ -913,7 +913,7 @@
|
|||
"variableValue": "<p><span class=\"variable\" data-type=\"mention\" data-id=\"$flow.state.sqlQuery\" data-label=\"$flow.state.sqlQuery\">{{ $flow.state.sqlQuery }}</span> </p>"
|
||||
}
|
||||
],
|
||||
"customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\nconst { Pool } = require('pg');\n\n// Configuration\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nconst sqlQuery = $sqlQuery;\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nlet formattedResult = '';\n\nasync function runSQLQuery(query) {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n const rows = await queryRunner.query(query);\n console.log('rows =', rows);\n\n if (rows.length === 0) {\n formattedResult = '[No results returned]';\n } else {\n const columnNames = Object.keys(rows[0]);\n const header = columnNames.join(' ');\n const values = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n\n formattedResult = query + '\\n' + header + '\\n' + values.join('\\n');\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error('[ERROR]', err);\n formattedResult = `[Error executing query]: ${err}`;\n }\n\n return formattedResult;\n}\n\nasync function main() {\n formattedResult = await runSQLQuery(sqlQuery);\n}\n\nawait main();\n\nreturn formattedResult;\n",
|
||||
"customFunctionJavascriptFunction": "const { DataSource } = require('typeorm');\n\n// Configuration\nconst HOST = 'localhost';\nconst USER = 'testuser';\nconst PASSWORD = 'testpwd';\nconst DATABASE = 'abudhabi';\nconst PORT = 5555;\n\nconst sqlQuery = $sqlQuery;\n\nconst AppDataSource = new DataSource({\n type: 'postgres',\n host: HOST,\n port: PORT,\n username: USER,\n password: PASSWORD,\n database: DATABASE,\n synchronize: false,\n logging: false,\n});\n\nlet formattedResult = '';\n\nasync function runSQLQuery(query) {\n try {\n await AppDataSource.initialize();\n const queryRunner = AppDataSource.createQueryRunner();\n\n const rows = await queryRunner.query(query);\n console.log('rows =', rows);\n\n if (rows.length === 0) {\n formattedResult = '[No results returned]';\n } else {\n const columnNames = Object.keys(rows[0]);\n const header = columnNames.join(' ');\n const values = rows.map(row =>\n columnNames.map(col => row[col]).join(' ')\n );\n\n formattedResult = query + '\\n' + header + '\\n' + values.join('\\n');\n }\n\n await queryRunner.release();\n } catch (err) {\n console.error('[ERROR]', err);\n formattedResult = `[Error executing query]: ${err}`;\n }\n\n return formattedResult;\n}\n\nasync function main() {\n formattedResult = await runSQLQuery(sqlQuery);\n}\n\nawait main();\n\nreturn formattedResult;\n",
|
||||
"customFunctionUpdateState": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "3.0.11",
|
||||
"version": "3.0.9",
|
||||
"description": "Flowiseai Server",
|
||||
"main": "dist/index",
|
||||
"types": "dist/index.d.ts",
|
||||
|
|
@ -66,7 +66,7 @@
|
|||
"@google-cloud/logging-winston": "^6.0.0",
|
||||
"@keyv/redis": "^4.2.0",
|
||||
"@oclif/core": "4.0.7",
|
||||
"@opentelemetry/api": "1.9.0",
|
||||
"@opentelemetry/api": "^1.3.0",
|
||||
"@opentelemetry/auto-instrumentations-node": "^0.52.0",
|
||||
"@opentelemetry/core": "1.27.0",
|
||||
"@opentelemetry/exporter-metrics-otlp-grpc": "0.54.0",
|
||||
|
|
@ -119,12 +119,12 @@
|
|||
"lodash": "^4.17.21",
|
||||
"moment": "^2.29.3",
|
||||
"moment-timezone": "^0.5.34",
|
||||
"multer": "^2.0.2",
|
||||
"multer": "^1.4.5-lts.1",
|
||||
"multer-cloud-storage": "^4.0.0",
|
||||
"multer-s3": "^3.0.1",
|
||||
"mysql2": "^3.11.3",
|
||||
"nanoid": "3",
|
||||
"nodemailer": "^7.0.7",
|
||||
"nodemailer": "^6.9.14",
|
||||
"openai": "^4.96.0",
|
||||
"passport": "^0.7.0",
|
||||
"passport-auth0": "^1.4.4",
|
||||
|
|
|
|||
|
|
@ -37,19 +37,7 @@ export class UsageCacheManager {
|
|||
if (process.env.MODE === MODE.QUEUE) {
|
||||
let redisConfig: string | Record<string, any>
|
||||
if (process.env.REDIS_URL) {
|
||||
redisConfig = {
|
||||
url: process.env.REDIS_URL,
|
||||
socket: {
|
||||
keepAlive:
|
||||
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
|
||||
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
|
||||
: undefined
|
||||
},
|
||||
pingInterval:
|
||||
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
|
||||
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
|
||||
: undefined
|
||||
}
|
||||
redisConfig = process.env.REDIS_URL
|
||||
} else {
|
||||
redisConfig = {
|
||||
username: process.env.REDIS_USERNAME || undefined,
|
||||
|
|
@ -60,16 +48,8 @@ export class UsageCacheManager {
|
|||
tls: process.env.REDIS_TLS === 'true',
|
||||
cert: process.env.REDIS_CERT ? Buffer.from(process.env.REDIS_CERT, 'base64') : undefined,
|
||||
key: process.env.REDIS_KEY ? Buffer.from(process.env.REDIS_KEY, 'base64') : undefined,
|
||||
ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined,
|
||||
keepAlive:
|
||||
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
|
||||
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
|
||||
: undefined
|
||||
},
|
||||
pingInterval:
|
||||
process.env.REDIS_KEEP_ALIVE && !isNaN(parseInt(process.env.REDIS_KEEP_ALIVE, 10))
|
||||
? parseInt(process.env.REDIS_KEEP_ALIVE, 10)
|
||||
: undefined
|
||||
ca: process.env.REDIS_CA ? Buffer.from(process.env.REDIS_CA, 'base64') : undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
this.cache = createCache({
|
||||
|
|
|
|||
|
|
@ -465,10 +465,9 @@ const insertIntoVectorStore = async (req: Request, res: Response, next: NextFunc
|
|||
}
|
||||
const subscriptionId = req.user?.activeOrganizationSubscriptionId || ''
|
||||
const body = req.body
|
||||
const isStrictSave = body.isStrictSave ?? false
|
||||
const apiResponse = await documentStoreService.insertIntoVectorStoreMiddleware(
|
||||
body,
|
||||
isStrictSave,
|
||||
false,
|
||||
orgId,
|
||||
workspaceId,
|
||||
subscriptionId,
|
||||
|
|
@ -514,11 +513,7 @@ const deleteVectorStoreFromStore = async (req: Request, res: Response, next: Nex
|
|||
`Error: documentStoreController.deleteVectorStoreFromStore - workspaceId not provided!`
|
||||
)
|
||||
}
|
||||
const apiResponse = await documentStoreService.deleteVectorStoreFromStore(
|
||||
req.params.storeId,
|
||||
workspaceId,
|
||||
(req.query.docId as string) || undefined
|
||||
)
|
||||
const apiResponse = await documentStoreService.deleteVectorStoreFromStore(req.params.storeId, workspaceId)
|
||||
return res.json(apiResponse)
|
||||
} catch (error) {
|
||||
next(error)
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
import { MigrationInterface, QueryRunner } from 'typeorm'
|
||||
|
||||
export class FixDocumentStoreFileChunkLongText1765000000000 implements MigrationInterface {
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`pageContent\` LONGTEXT NOT NULL;`)
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`metadata\` LONGTEXT NULL;`)
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
// WARNING: Reverting to TEXT may cause data loss if content exceeds the 64KB limit.
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`pageContent\` TEXT NOT NULL;`)
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`metadata\` TEXT NULL;`)
|
||||
}
|
||||
}
|
||||
|
|
@ -40,7 +40,6 @@ import { AddTextToSpeechToChatFlow1754986457485 } from './1754986457485-AddTextT
|
|||
import { ModifyChatflowType1755066758601 } from './1755066758601-ModifyChatflowType'
|
||||
import { AddTextToSpeechToChatFlow1759419231100 } from './1759419231100-AddTextToSpeechToChatFlow'
|
||||
import { AddChatFlowNameIndex1759424809984 } from './1759424809984-AddChatFlowNameIndex'
|
||||
import { FixDocumentStoreFileChunkLongText1765000000000 } from './1765000000000-FixDocumentStoreFileChunkLongText'
|
||||
|
||||
import { AddAuthTables1720230151482 } from '../../../enterprise/database/migrations/mariadb/1720230151482-AddAuthTables'
|
||||
import { AddWorkspace1725437498242 } from '../../../enterprise/database/migrations/mariadb/1725437498242-AddWorkspace'
|
||||
|
|
@ -107,6 +106,5 @@ export const mariadbMigrations = [
|
|||
AddTextToSpeechToChatFlow1754986457485,
|
||||
ModifyChatflowType1755066758601,
|
||||
AddTextToSpeechToChatFlow1759419231100,
|
||||
AddChatFlowNameIndex1759424809984,
|
||||
FixDocumentStoreFileChunkLongText1765000000000
|
||||
AddChatFlowNameIndex1759424809984
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
import { MigrationInterface, QueryRunner } from 'typeorm'
|
||||
|
||||
export class FixDocumentStoreFileChunkLongText1765000000000 implements MigrationInterface {
|
||||
public async up(queryRunner: QueryRunner): Promise<void> {
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`pageContent\` LONGTEXT NOT NULL;`)
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`metadata\` LONGTEXT NULL;`)
|
||||
}
|
||||
|
||||
public async down(queryRunner: QueryRunner): Promise<void> {
|
||||
// WARNING: Reverting to TEXT may cause data loss if content exceeds the 64KB limit.
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`pageContent\` TEXT NOT NULL;`)
|
||||
await queryRunner.query(`ALTER TABLE \`document_store_file_chunk\` MODIFY \`metadata\` TEXT NULL;`)
|
||||
}
|
||||
}
|
||||
|
|
@ -41,7 +41,6 @@ import { AddTextToSpeechToChatFlow1754986468397 } from './1754986468397-AddTextT
|
|||
import { ModifyChatflowType1755066758601 } from './1755066758601-ModifyChatflowType'
|
||||
import { AddTextToSpeechToChatFlow1759419216034 } from './1759419216034-AddTextToSpeechToChatFlow'
|
||||
import { AddChatFlowNameIndex1759424828558 } from './1759424828558-AddChatFlowNameIndex'
|
||||
import { FixDocumentStoreFileChunkLongText1765000000000 } from './1765000000000-FixDocumentStoreFileChunkLongText'
|
||||
|
||||
import { AddAuthTables1720230151482 } from '../../../enterprise/database/migrations/mysql/1720230151482-AddAuthTables'
|
||||
import { AddWorkspace1720230151484 } from '../../../enterprise/database/migrations/mysql/1720230151484-AddWorkspace'
|
||||
|
|
@ -109,6 +108,5 @@ export const mysqlMigrations = [
|
|||
AddTextToSpeechToChatFlow1754986468397,
|
||||
ModifyChatflowType1755066758601,
|
||||
AddTextToSpeechToChatFlow1759419216034,
|
||||
AddChatFlowNameIndex1759424828558,
|
||||
FixDocumentStoreFileChunkLongText1765000000000
|
||||
AddChatFlowNameIndex1759424828558
|
||||
]
|
||||
|
|
|
|||
|
|
@ -391,7 +391,7 @@ const deleteDocumentStoreFileChunk = async (storeId: string, docId: string, chun
|
|||
}
|
||||
}
|
||||
|
||||
const deleteVectorStoreFromStore = async (storeId: string, workspaceId: string, docId?: string) => {
|
||||
const deleteVectorStoreFromStore = async (storeId: string, workspaceId: string) => {
|
||||
try {
|
||||
const appServer = getRunningExpressApp()
|
||||
const componentNodes = appServer.nodesPool.componentNodes
|
||||
|
|
@ -461,7 +461,7 @@ const deleteVectorStoreFromStore = async (storeId: string, workspaceId: string,
|
|||
|
||||
// Call the delete method of the vector store
|
||||
if (vectorStoreObj.vectorStoreMethods.delete) {
|
||||
await vectorStoreObj.vectorStoreMethods.delete(vStoreNodeData, idsToDelete, { ...options, docId })
|
||||
await vectorStoreObj.vectorStoreMethods.delete(vStoreNodeData, idsToDelete, options)
|
||||
}
|
||||
} catch (error) {
|
||||
throw new InternalFlowiseError(
|
||||
|
|
@ -1157,18 +1157,6 @@ const updateVectorStoreConfigOnly = async (data: ICommonObject, workspaceId: str
|
|||
)
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Saves vector store configuration to the document store entity.
|
||||
* Handles embedding, vector store, and record manager configurations.
|
||||
*
|
||||
* @example
|
||||
* // Strict mode: Only save what's provided, clear the rest
|
||||
* await saveVectorStoreConfig(ds, { storeId, embeddingName, embeddingConfig }, true, wsId)
|
||||
*
|
||||
* @example
|
||||
* // Lenient mode: Reuse existing configs if not provided
|
||||
* await saveVectorStoreConfig(ds, { storeId, vectorStoreName, vectorStoreConfig }, false, wsId)
|
||||
*/
|
||||
const saveVectorStoreConfig = async (appDataSource: DataSource, data: ICommonObject, isStrictSave = true, workspaceId: string) => {
|
||||
try {
|
||||
const entity = await appDataSource.getRepository(DocumentStore).findOneBy({
|
||||
|
|
@ -1233,15 +1221,6 @@ const saveVectorStoreConfig = async (appDataSource: DataSource, data: ICommonObj
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Inserts documents from document store into the configured vector store.
|
||||
*
|
||||
* Process:
|
||||
* 1. Saves vector store configuration (embedding, vector store, record manager)
|
||||
* 2. Sets document store status to UPSERTING
|
||||
* 3. Performs the actual vector store upsert operation
|
||||
* 4. Updates status to UPSERTED upon completion
|
||||
*/
|
||||
export const insertIntoVectorStore = async ({
|
||||
appDataSource,
|
||||
componentNodes,
|
||||
|
|
@ -1252,16 +1231,19 @@ export const insertIntoVectorStore = async ({
|
|||
workspaceId
|
||||
}: IExecuteVectorStoreInsert) => {
|
||||
try {
|
||||
// Step 1: Save configuration based on isStrictSave mode
|
||||
const entity = await saveVectorStoreConfig(appDataSource, data, isStrictSave, workspaceId)
|
||||
|
||||
// Step 2: Mark as UPSERTING before starting the operation
|
||||
entity.status = DocumentStoreStatus.UPSERTING
|
||||
await appDataSource.getRepository(DocumentStore).save(entity)
|
||||
|
||||
// Step 3: Perform the actual vector store upsert
|
||||
// Note: Configuration already saved above, worker thread just retrieves and uses it
|
||||
const indexResult = await _insertIntoVectorStoreWorkerThread(appDataSource, componentNodes, telemetry, data, orgId, workspaceId)
|
||||
const indexResult = await _insertIntoVectorStoreWorkerThread(
|
||||
appDataSource,
|
||||
componentNodes,
|
||||
telemetry,
|
||||
data,
|
||||
isStrictSave,
|
||||
orgId,
|
||||
workspaceId
|
||||
)
|
||||
return indexResult
|
||||
} catch (error) {
|
||||
throw new InternalFlowiseError(
|
||||
|
|
@ -1326,18 +1308,12 @@ const _insertIntoVectorStoreWorkerThread = async (
|
|||
componentNodes: IComponentNodes,
|
||||
telemetry: Telemetry,
|
||||
data: ICommonObject,
|
||||
isStrictSave = true,
|
||||
orgId: string,
|
||||
workspaceId: string
|
||||
) => {
|
||||
try {
|
||||
// Configuration already saved by insertIntoVectorStore, just retrieve the entity
|
||||
const entity = await appDataSource.getRepository(DocumentStore).findOneBy({
|
||||
id: data.storeId,
|
||||
workspaceId: workspaceId
|
||||
})
|
||||
if (!entity) {
|
||||
throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Document store ${data.storeId} not found`)
|
||||
}
|
||||
const entity = await saveVectorStoreConfig(appDataSource, data, isStrictSave, workspaceId)
|
||||
let upsertHistory: Record<string, any> = {}
|
||||
const chatflowid = data.storeId // fake chatflowid because this is not tied to any chatflow
|
||||
|
||||
|
|
@ -1374,10 +1350,7 @@ const _insertIntoVectorStoreWorkerThread = async (
|
|||
const docs: Document[] = chunks.map((chunk: DocumentStoreFileChunk) => {
|
||||
return new Document({
|
||||
pageContent: chunk.pageContent,
|
||||
metadata: {
|
||||
...JSON.parse(chunk.metadata),
|
||||
docId: chunk.docId
|
||||
}
|
||||
metadata: JSON.parse(chunk.metadata)
|
||||
})
|
||||
})
|
||||
vStoreNodeData.inputs.document = docs
|
||||
|
|
@ -1938,8 +1911,6 @@ const upsertDocStore = async (
|
|||
recordManagerConfig
|
||||
}
|
||||
|
||||
// Use isStrictSave: false to preserve existing configurations during upsert
|
||||
// This allows the operation to reuse existing embedding/vector store/record manager configs
|
||||
const res = await insertIntoVectorStore({
|
||||
appDataSource,
|
||||
componentNodes,
|
||||
|
|
|
|||
|
|
@ -2122,62 +2122,7 @@ export const executeAgentFlow = async ({
|
|||
|
||||
// check if last agentFlowExecutedData.data.output contains the key "content"
|
||||
const lastNodeOutput = agentFlowExecutedData[agentFlowExecutedData.length - 1].data?.output as ICommonObject | undefined
|
||||
let content = (lastNodeOutput?.content as string) ?? ' '
|
||||
|
||||
/* Check for post-processing settings */
|
||||
let chatflowConfig: ICommonObject = {}
|
||||
try {
|
||||
if (chatflow.chatbotConfig) {
|
||||
chatflowConfig = typeof chatflow.chatbotConfig === 'string' ? JSON.parse(chatflow.chatbotConfig) : chatflow.chatbotConfig
|
||||
}
|
||||
} catch (e) {
|
||||
logger.error('[server]: Error parsing chatflow config:', e)
|
||||
}
|
||||
|
||||
if (chatflowConfig?.postProcessing?.enabled === true && content) {
|
||||
try {
|
||||
const postProcessingFunction = JSON.parse(chatflowConfig?.postProcessing?.customFunction)
|
||||
const nodeInstanceFilePath = componentNodes['customFunctionAgentflow'].filePath as string
|
||||
const nodeModule = await import(nodeInstanceFilePath)
|
||||
//set the outputs.output to EndingNode to prevent json escaping of content...
|
||||
const nodeData = {
|
||||
inputs: { customFunctionJavascriptFunction: postProcessingFunction }
|
||||
}
|
||||
const runtimeChatHistory = agentflowRuntime.chatHistory || []
|
||||
const chatHistory = [...pastChatHistory, ...runtimeChatHistory]
|
||||
const options: ICommonObject = {
|
||||
chatflowid: chatflow.id,
|
||||
sessionId,
|
||||
chatId,
|
||||
input: question || form,
|
||||
postProcessing: {
|
||||
rawOutput: content,
|
||||
chatHistory: cloneDeep(chatHistory),
|
||||
sourceDocuments: lastNodeOutput?.sourceDocuments ? cloneDeep(lastNodeOutput.sourceDocuments) : undefined,
|
||||
usedTools: lastNodeOutput?.usedTools ? cloneDeep(lastNodeOutput.usedTools) : undefined,
|
||||
artifacts: lastNodeOutput?.artifacts ? cloneDeep(lastNodeOutput.artifacts) : undefined,
|
||||
fileAnnotations: lastNodeOutput?.fileAnnotations ? cloneDeep(lastNodeOutput.fileAnnotations) : undefined
|
||||
},
|
||||
appDataSource,
|
||||
databaseEntities,
|
||||
workspaceId,
|
||||
orgId,
|
||||
logger
|
||||
}
|
||||
const customFuncNodeInstance = new nodeModule.nodeClass()
|
||||
const customFunctionResponse = await customFuncNodeInstance.run(nodeData, question || form, options)
|
||||
const moderatedResponse = customFunctionResponse.output.content
|
||||
if (typeof moderatedResponse === 'string') {
|
||||
content = moderatedResponse
|
||||
} else if (typeof moderatedResponse === 'object') {
|
||||
content = '```json\n' + JSON.stringify(moderatedResponse, null, 2) + '\n```'
|
||||
} else {
|
||||
content = moderatedResponse
|
||||
}
|
||||
} catch (e) {
|
||||
logger.error('[server]: Post Processing Error:', e)
|
||||
}
|
||||
}
|
||||
const content = (lastNodeOutput?.content as string) ?? ' '
|
||||
|
||||
// remove credentialId from agentFlowExecutedData
|
||||
agentFlowExecutedData = agentFlowExecutedData.map((data) => _removeCredentialId(data))
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { Request } from 'express'
|
|||
import * as path from 'path'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { omit, cloneDeep } from 'lodash'
|
||||
import { omit } from 'lodash'
|
||||
import {
|
||||
IFileUpload,
|
||||
convertSpeechToText,
|
||||
|
|
@ -817,14 +817,7 @@ export const executeFlow = async ({
|
|||
sessionId,
|
||||
chatId,
|
||||
input: question,
|
||||
postProcessing: {
|
||||
rawOutput: resultText,
|
||||
chatHistory: cloneDeep(chatHistory),
|
||||
sourceDocuments: result?.sourceDocuments ? cloneDeep(result.sourceDocuments) : undefined,
|
||||
usedTools: result?.usedTools ? cloneDeep(result.usedTools) : undefined,
|
||||
artifacts: result?.artifacts ? cloneDeep(result.artifacts) : undefined,
|
||||
fileAnnotations: result?.fileAnnotations ? cloneDeep(result.fileAnnotations) : undefined
|
||||
},
|
||||
rawOutput: resultText,
|
||||
appDataSource,
|
||||
databaseEntities,
|
||||
workspaceId,
|
||||
|
|
|
|||
|
|
@ -27,15 +27,15 @@ export const createFileAttachment = async (req: Request) => {
|
|||
const appServer = getRunningExpressApp()
|
||||
|
||||
const chatflowid = req.params.chatflowId
|
||||
const chatId = req.params.chatId
|
||||
|
||||
if (!chatflowid || !isValidUUID(chatflowid)) {
|
||||
throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Invalid chatflowId format - must be a valid UUID')
|
||||
}
|
||||
if (isPathTraversal(chatflowid) || (chatId && isPathTraversal(chatId))) {
|
||||
if (isPathTraversal(chatflowid)) {
|
||||
throw new InternalFlowiseError(StatusCodes.BAD_REQUEST, 'Invalid path characters detected')
|
||||
}
|
||||
|
||||
const chatId = req.params.chatId
|
||||
|
||||
// Validate chatflow exists and check API key
|
||||
const chatflow = await appServer.AppDataSource.getRepository(ChatFlow).findOneBy({
|
||||
id: chatflowid
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ export const checkUsageLimit = async (
|
|||
if (limit === -1) return
|
||||
|
||||
if (currentUsage > limit) {
|
||||
throw new InternalFlowiseError(StatusCodes.PAYMENT_REQUIRED, `Limit exceeded: ${type}`)
|
||||
throw new InternalFlowiseError(StatusCodes.TOO_MANY_REQUESTS, `Limit exceeded: ${type}`)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -135,7 +135,7 @@ export const checkPredictions = async (orgId: string, subscriptionId: string, us
|
|||
if (predictionsLimit === -1) return
|
||||
|
||||
if (currentPredictions >= predictionsLimit) {
|
||||
throw new InternalFlowiseError(StatusCodes.PAYMENT_REQUIRED, 'Predictions limit exceeded')
|
||||
throw new InternalFlowiseError(StatusCodes.TOO_MANY_REQUESTS, 'Predictions limit exceeded')
|
||||
}
|
||||
|
||||
return {
|
||||
|
|
@ -161,7 +161,7 @@ export const checkStorage = async (orgId: string, subscriptionId: string, usageC
|
|||
if (storageLimit === -1) return
|
||||
|
||||
if (currentStorageUsage >= storageLimit) {
|
||||
throw new InternalFlowiseError(StatusCodes.PAYMENT_REQUIRED, 'Storage limit exceeded')
|
||||
throw new InternalFlowiseError(StatusCodes.TOO_MANY_REQUESTS, 'Storage limit exceeded')
|
||||
}
|
||||
|
||||
return {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-ui",
|
||||
"version": "3.0.11",
|
||||
"version": "3.0.9",
|
||||
"license": "SEE LICENSE IN LICENSE.md",
|
||||
"homepage": "https://flowiseai.com",
|
||||
"author": {
|
||||
|
|
|
|||
|
|
@ -22,10 +22,7 @@ const refreshLoader = (storeId) => client.post(`/document-store/refresh/${storeI
|
|||
const insertIntoVectorStore = (body) => client.post(`/document-store/vectorstore/insert`, body)
|
||||
const saveVectorStoreConfig = (body) => client.post(`/document-store/vectorstore/save`, body)
|
||||
const updateVectorStoreConfig = (body) => client.post(`/document-store/vectorstore/update`, body)
|
||||
const deleteVectorStoreDataFromStore = (storeId, docId) => {
|
||||
const url = docId ? `/document-store/vectorstore/${storeId}?docId=${docId}` : `/document-store/vectorstore/${storeId}`
|
||||
return client.delete(url)
|
||||
}
|
||||
const deleteVectorStoreDataFromStore = (storeId) => client.delete(`/document-store/vectorstore/${storeId}`)
|
||||
const queryVectorStore = (body) => client.post(`/document-store/vectorstore/query`, body)
|
||||
const getVectorStoreProviders = () => client.get('/document-store/components/vectorstore')
|
||||
const getEmbeddingProviders = () => client.get('/document-store/components/embeddings')
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ const NavGroup = ({ item }) => {
|
|||
|
||||
const renderNonPrimaryGroups = () => {
|
||||
let nonprimaryGroups = item.children.filter((child) => child.id !== 'primary')
|
||||
// Display children based on permission and display
|
||||
// Display chilren based on permission and display
|
||||
nonprimaryGroups = nonprimaryGroups.map((group) => {
|
||||
const children = group.children.filter((menu) => shouldDisplayMenu(menu))
|
||||
return { ...group, children }
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ const VerifyEmailPage = Loadable(lazy(() => import('@/views/auth/verify-email'))
|
|||
const ForgotPasswordPage = Loadable(lazy(() => import('@/views/auth/forgotPassword')))
|
||||
const ResetPasswordPage = Loadable(lazy(() => import('@/views/auth/resetPassword')))
|
||||
const UnauthorizedPage = Loadable(lazy(() => import('@/views/auth/unauthorized')))
|
||||
const RateLimitedPage = Loadable(lazy(() => import('@/views/auth/rateLimited')))
|
||||
const OrganizationSetupPage = Loadable(lazy(() => import('@/views/organization/index')))
|
||||
const LicenseExpiredPage = Loadable(lazy(() => import('@/views/auth/expired')))
|
||||
|
||||
|
|
@ -46,10 +45,6 @@ const AuthRoutes = {
|
|||
path: '/unauthorized',
|
||||
element: <UnauthorizedPage />
|
||||
},
|
||||
{
|
||||
path: '/rate-limited',
|
||||
element: <RateLimitedPage />
|
||||
},
|
||||
{
|
||||
path: '/organization-setup',
|
||||
element: <OrganizationSetupPage />
|
||||
|
|
|
|||
|
|
@ -10,29 +10,11 @@ const ErrorContext = createContext()
|
|||
|
||||
export const ErrorProvider = ({ children }) => {
|
||||
const [error, setError] = useState(null)
|
||||
const [authRateLimitError, setAuthRateLimitError] = useState(null)
|
||||
const navigate = useNavigate()
|
||||
|
||||
const handleError = async (err) => {
|
||||
console.error(err)
|
||||
if (err?.response?.status === 429 && err?.response?.data?.type === 'authentication_rate_limit') {
|
||||
setAuthRateLimitError("You're making a lot of requests. Please wait and try again later.")
|
||||
} else if (err?.response?.status === 429 && err?.response?.data?.type !== 'authentication_rate_limit') {
|
||||
const retryAfterHeader = err?.response?.headers?.['retry-after']
|
||||
let retryAfter = 60 // Default in seconds
|
||||
if (retryAfterHeader) {
|
||||
const parsedSeconds = parseInt(retryAfterHeader, 10)
|
||||
if (Number.isNaN(parsedSeconds)) {
|
||||
const retryDate = new Date(retryAfterHeader)
|
||||
if (!Number.isNaN(retryDate.getTime())) {
|
||||
retryAfter = Math.max(0, Math.ceil((retryDate.getTime() - Date.now()) / 1000))
|
||||
}
|
||||
} else {
|
||||
retryAfter = parsedSeconds
|
||||
}
|
||||
}
|
||||
navigate('/rate-limited', { state: { retryAfter } })
|
||||
} else if (err?.response?.status === 403) {
|
||||
if (err?.response?.status === 403) {
|
||||
navigate('/unauthorized')
|
||||
} else if (err?.response?.status === 401) {
|
||||
if (ErrorMessage.INVALID_MISSING_TOKEN === err?.response?.data?.message) {
|
||||
|
|
@ -62,9 +44,7 @@ export const ErrorProvider = ({ children }) => {
|
|||
value={{
|
||||
error,
|
||||
setError,
|
||||
handleError,
|
||||
authRateLimitError,
|
||||
setAuthRateLimitError
|
||||
handleError
|
||||
}}
|
||||
>
|
||||
{children}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ const StyledMenu = styled((props) => (
|
|||
}
|
||||
}))
|
||||
|
||||
export default function FlowListMenu({ chatflow, isAgentCanvas, isAgentflowV2, setError, updateFlowsApi, currentPage, pageLimit }) {
|
||||
export default function FlowListMenu({ chatflow, isAgentCanvas, isAgentflowV2, setError, updateFlowsApi }) {
|
||||
const { confirm } = useConfirm()
|
||||
const dispatch = useDispatch()
|
||||
const updateChatflowApi = useApi(chatflowsApi.updateChatflow)
|
||||
|
|
@ -166,16 +166,10 @@ export default function FlowListMenu({ chatflow, isAgentCanvas, isAgentflowV2, s
|
|||
}
|
||||
try {
|
||||
await updateChatflowApi.request(chatflow.id, updateBody)
|
||||
const params = {
|
||||
page: currentPage,
|
||||
limit: pageLimit
|
||||
}
|
||||
if (isAgentCanvas && isAgentflowV2) {
|
||||
await updateFlowsApi.request('AGENTFLOW', params)
|
||||
} else if (isAgentCanvas) {
|
||||
await updateFlowsApi.request('MULTIAGENT', params)
|
||||
await updateFlowsApi.request('AGENTFLOW')
|
||||
} else {
|
||||
await updateFlowsApi.request(params)
|
||||
await updateFlowsApi.request(isAgentCanvas ? 'MULTIAGENT' : undefined)
|
||||
}
|
||||
} catch (error) {
|
||||
if (setError) setError(error)
|
||||
|
|
@ -215,15 +209,7 @@ export default function FlowListMenu({ chatflow, isAgentCanvas, isAgentflowV2, s
|
|||
}
|
||||
try {
|
||||
await updateChatflowApi.request(chatflow.id, updateBody)
|
||||
const params = {
|
||||
page: currentPage,
|
||||
limit: pageLimit
|
||||
}
|
||||
if (isAgentCanvas) {
|
||||
await updateFlowsApi.request('AGENTFLOW', params)
|
||||
} else {
|
||||
await updateFlowsApi.request(params)
|
||||
}
|
||||
await updateFlowsApi.request(isAgentCanvas ? 'AGENTFLOW' : undefined)
|
||||
} catch (error) {
|
||||
if (setError) setError(error)
|
||||
enqueueSnackbar({
|
||||
|
|
@ -255,16 +241,10 @@ export default function FlowListMenu({ chatflow, isAgentCanvas, isAgentflowV2, s
|
|||
if (isConfirmed) {
|
||||
try {
|
||||
await chatflowsApi.deleteChatflow(chatflow.id)
|
||||
const params = {
|
||||
page: currentPage,
|
||||
limit: pageLimit
|
||||
}
|
||||
if (isAgentCanvas && isAgentflowV2) {
|
||||
await updateFlowsApi.request('AGENTFLOW', params)
|
||||
} else if (isAgentCanvas) {
|
||||
await updateFlowsApi.request('MULTIAGENT', params)
|
||||
await updateFlowsApi.request('AGENTFLOW')
|
||||
} else {
|
||||
await updateFlowsApi.request(params)
|
||||
await updateFlowsApi.request(isAgentCanvas ? 'MULTIAGENT' : undefined)
|
||||
}
|
||||
} catch (error) {
|
||||
if (setError) setError(error)
|
||||
|
|
@ -474,7 +454,5 @@ FlowListMenu.propTypes = {
|
|||
isAgentCanvas: PropTypes.bool,
|
||||
isAgentflowV2: PropTypes.bool,
|
||||
setError: PropTypes.func,
|
||||
updateFlowsApi: PropTypes.object,
|
||||
currentPage: PropTypes.number,
|
||||
pageLimit: PropTypes.number
|
||||
updateFlowsApi: PropTypes.object
|
||||
}
|
||||
|
|
|
|||
|
|
@ -53,7 +53,8 @@ const CHATFLOW_CONFIGURATION_TABS = [
|
|||
},
|
||||
{
|
||||
label: 'Post Processing',
|
||||
id: 'postProcessing'
|
||||
id: 'postProcessing',
|
||||
hideInAgentFlow: true
|
||||
}
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -16,11 +16,11 @@ import { useEditor, EditorContent } from '@tiptap/react'
|
|||
import Placeholder from '@tiptap/extension-placeholder'
|
||||
import { mergeAttributes } from '@tiptap/core'
|
||||
import StarterKit from '@tiptap/starter-kit'
|
||||
import Mention from '@tiptap/extension-mention'
|
||||
import CodeBlockLowlight from '@tiptap/extension-code-block-lowlight'
|
||||
import { common, createLowlight } from 'lowlight'
|
||||
import { suggestionOptions } from '@/ui-component/input/suggestionOption'
|
||||
import { getAvailableNodesForVariable } from '@/utils/genericHelper'
|
||||
import { CustomMention } from '@/utils/customMention'
|
||||
|
||||
const lowlight = createLowlight(common)
|
||||
|
||||
|
|
@ -78,7 +78,7 @@ const extensions = (availableNodesForVariable, availableState, acceptNodeOutputA
|
|||
StarterKit.configure({
|
||||
codeBlock: false
|
||||
}),
|
||||
CustomMention.configure({
|
||||
Mention.configure({
|
||||
HTMLAttributes: {
|
||||
class: 'variable'
|
||||
},
|
||||
|
|
|
|||
|
|
@ -4,25 +4,8 @@ import PropTypes from 'prop-types'
|
|||
import { useSelector } from 'react-redux'
|
||||
|
||||
// material-ui
|
||||
import {
|
||||
IconButton,
|
||||
Button,
|
||||
Box,
|
||||
Typography,
|
||||
TableContainer,
|
||||
Table,
|
||||
TableHead,
|
||||
TableBody,
|
||||
TableRow,
|
||||
TableCell,
|
||||
Paper,
|
||||
Accordion,
|
||||
AccordionSummary,
|
||||
AccordionDetails,
|
||||
Card
|
||||
} from '@mui/material'
|
||||
import { IconArrowsMaximize, IconX } from '@tabler/icons-react'
|
||||
import ExpandMoreIcon from '@mui/icons-material/ExpandMore'
|
||||
import { IconButton, Button, Box, Typography } from '@mui/material'
|
||||
import { IconArrowsMaximize, IconBulb, IconX } from '@tabler/icons-react'
|
||||
import { useTheme } from '@mui/material/styles'
|
||||
|
||||
// Project import
|
||||
|
|
@ -38,11 +21,7 @@ import useNotifier from '@/utils/useNotifier'
|
|||
// API
|
||||
import chatflowsApi from '@/api/chatflows'
|
||||
|
||||
const sampleFunction = `// Access chat history as a string
|
||||
const chatHistory = JSON.stringify($flow.chatHistory, null, 2);
|
||||
|
||||
// Return a modified response
|
||||
return $flow.rawOutput + " This is a post processed response!";`
|
||||
const sampleFunction = `return $flow.rawOutput + " This is a post processed response!";`
|
||||
|
||||
const PostProcessing = ({ dialogProps }) => {
|
||||
const dispatch = useDispatch()
|
||||
|
|
@ -196,105 +175,31 @@ const PostProcessing = ({ dialogProps }) => {
|
|||
/>
|
||||
</div>
|
||||
</Box>
|
||||
<Card sx={{ borderColor: theme.palette.primary[200] + 75, mt: 2, mb: 2 }} variant='outlined'>
|
||||
<Accordion
|
||||
disableGutters
|
||||
sx={{
|
||||
'&:before': {
|
||||
display: 'none'
|
||||
}
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
borderRadius: 10,
|
||||
background: '#d8f3dc',
|
||||
padding: 10,
|
||||
marginTop: 10
|
||||
}}
|
||||
>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
flexDirection: 'row',
|
||||
alignItems: 'center',
|
||||
paddingTop: 10
|
||||
}}
|
||||
>
|
||||
<AccordionSummary expandIcon={<ExpandMoreIcon />}>
|
||||
<Typography>Available Variables</Typography>
|
||||
</AccordionSummary>
|
||||
<AccordionDetails sx={{ p: 0 }}>
|
||||
<TableContainer component={Paper}>
|
||||
<Table aria-label='available variables table'>
|
||||
<TableHead>
|
||||
<TableRow>
|
||||
<TableCell sx={{ width: '30%' }}>Variable</TableCell>
|
||||
<TableCell sx={{ width: '15%' }}>Type</TableCell>
|
||||
<TableCell sx={{ width: '55%' }}>Description</TableCell>
|
||||
</TableRow>
|
||||
</TableHead>
|
||||
<TableBody>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.rawOutput</code>
|
||||
</TableCell>
|
||||
<TableCell>string</TableCell>
|
||||
<TableCell>The raw output response from the flow</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.input</code>
|
||||
</TableCell>
|
||||
<TableCell>string</TableCell>
|
||||
<TableCell>The user input message</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.chatHistory</code>
|
||||
</TableCell>
|
||||
<TableCell>array</TableCell>
|
||||
<TableCell>Array of previous messages in the conversation</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.chatflowId</code>
|
||||
</TableCell>
|
||||
<TableCell>string</TableCell>
|
||||
<TableCell>Unique identifier for the chatflow</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.sessionId</code>
|
||||
</TableCell>
|
||||
<TableCell>string</TableCell>
|
||||
<TableCell>Current session identifier</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.chatId</code>
|
||||
</TableCell>
|
||||
<TableCell>string</TableCell>
|
||||
<TableCell>Current chat identifier</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.sourceDocuments</code>
|
||||
</TableCell>
|
||||
<TableCell>array</TableCell>
|
||||
<TableCell>Source documents used in retrieval (if applicable)</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.usedTools</code>
|
||||
</TableCell>
|
||||
<TableCell>array</TableCell>
|
||||
<TableCell>List of tools used during execution</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell>
|
||||
<code>$flow.artifacts</code>
|
||||
</TableCell>
|
||||
<TableCell>array</TableCell>
|
||||
<TableCell>List of artifacts generated during execution</TableCell>
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell sx={{ borderBottom: 'none' }}>
|
||||
<code>$flow.fileAnnotations</code>
|
||||
</TableCell>
|
||||
<TableCell sx={{ borderBottom: 'none' }}>array</TableCell>
|
||||
<TableCell sx={{ borderBottom: 'none' }}>File annotations associated with the response</TableCell>
|
||||
</TableRow>
|
||||
</TableBody>
|
||||
</Table>
|
||||
</TableContainer>
|
||||
</AccordionDetails>
|
||||
</Accordion>
|
||||
</Card>
|
||||
<IconBulb size={30} color='#2d6a4f' />
|
||||
<span style={{ color: '#2d6a4f', marginLeft: 10, fontWeight: 500 }}>
|
||||
The following variables are available to use in the custom function:{' '}
|
||||
<pre>$flow.rawOutput, $flow.input, $flow.chatflowId, $flow.sessionId, $flow.chatId</pre>
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
<StyledButton
|
||||
style={{ marginBottom: 10, marginTop: 10 }}
|
||||
variant='contained'
|
||||
|
|
|
|||
|
|
@ -7,11 +7,11 @@ import { mergeAttributes } from '@tiptap/core'
|
|||
import StarterKit from '@tiptap/starter-kit'
|
||||
import { styled } from '@mui/material/styles'
|
||||
import { Box } from '@mui/material'
|
||||
import Mention from '@tiptap/extension-mention'
|
||||
import CodeBlockLowlight from '@tiptap/extension-code-block-lowlight'
|
||||
import { common, createLowlight } from 'lowlight'
|
||||
import { suggestionOptions } from './suggestionOption'
|
||||
import { getAvailableNodesForVariable } from '@/utils/genericHelper'
|
||||
import { CustomMention } from '@/utils/customMention'
|
||||
|
||||
const lowlight = createLowlight(common)
|
||||
|
||||
|
|
@ -20,7 +20,7 @@ const extensions = (availableNodesForVariable, availableState, acceptNodeOutputA
|
|||
StarterKit.configure({
|
||||
codeBlock: false
|
||||
}),
|
||||
CustomMention.configure({
|
||||
Mention.configure({
|
||||
HTMLAttributes: {
|
||||
class: 'variable'
|
||||
},
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ export const suggestionOptions = (
|
|||
category: 'Node Outputs'
|
||||
})
|
||||
|
||||
const structuredOutputs = nodeData?.inputs?.llmStructuredOutput ?? nodeData?.inputs?.agentStructuredOutput ?? []
|
||||
const structuredOutputs = nodeData?.inputs?.llmStructuredOutput ?? []
|
||||
if (structuredOutputs && structuredOutputs.length > 0) {
|
||||
structuredOutputs.forEach((item) => {
|
||||
defaultItems.unshift({
|
||||
|
|
|
|||
|
|
@ -59,9 +59,7 @@ export const FlowListTable = ({
|
|||
updateFlowsApi,
|
||||
setError,
|
||||
isAgentCanvas,
|
||||
isAgentflowV2,
|
||||
currentPage,
|
||||
pageLimit
|
||||
isAgentflowV2
|
||||
}) => {
|
||||
const { hasPermission } = useAuth()
|
||||
const isActionsAvailable = isAgentCanvas
|
||||
|
|
@ -333,8 +331,6 @@ export const FlowListTable = ({
|
|||
chatflow={row}
|
||||
setError={setError}
|
||||
updateFlowsApi={updateFlowsApi}
|
||||
currentPage={currentPage}
|
||||
pageLimit={pageLimit}
|
||||
/>
|
||||
</Stack>
|
||||
</StyledTableCell>
|
||||
|
|
@ -359,7 +355,5 @@ FlowListTable.propTypes = {
|
|||
updateFlowsApi: PropTypes.object,
|
||||
setError: PropTypes.func,
|
||||
isAgentCanvas: PropTypes.bool,
|
||||
isAgentflowV2: PropTypes.bool,
|
||||
currentPage: PropTypes.number,
|
||||
pageLimit: PropTypes.number
|
||||
isAgentflowV2: PropTypes.bool
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,26 +0,0 @@
|
|||
import Mention from '@tiptap/extension-mention'
|
||||
import { PasteRule } from '@tiptap/core'
|
||||
|
||||
export const CustomMention = Mention.extend({
|
||||
renderText({ node }) {
|
||||
return `{{${node.attrs.label ?? node.attrs.id}}}`
|
||||
},
|
||||
addPasteRules() {
|
||||
return [
|
||||
new PasteRule({
|
||||
find: /\{\{([^{}]+)\}\}/g,
|
||||
handler: ({ match, chain, range }) => {
|
||||
const label = match[1].trim()
|
||||
if (label) {
|
||||
chain()
|
||||
.deleteRange(range)
|
||||
.insertContentAt(range.from, {
|
||||
type: this.name,
|
||||
attrs: { id: label, label: label }
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
]
|
||||
}
|
||||
})
|
||||
|
|
@ -325,8 +325,6 @@ const Agentflows = () => {
|
|||
filterFunction={filterFlows}
|
||||
updateFlowsApi={getAllAgentflows}
|
||||
setError={setError}
|
||||
currentPage={currentPage}
|
||||
pageLimit={pageLimit}
|
||||
/>
|
||||
)}
|
||||
{/* Pagination and Page Size Controls */}
|
||||
|
|
|
|||
|
|
@ -150,8 +150,6 @@ const AgentFlowNode = ({ data }) => {
|
|||
return <IconWorldWww size={14} color={'white'} />
|
||||
case 'googleSearch':
|
||||
return <IconBrandGoogle size={14} color={'white'} />
|
||||
case 'codeExecution':
|
||||
return <IconCode size={14} color={'white'} />
|
||||
default:
|
||||
return null
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ import accountApi from '@/api/account.api'
|
|||
// Hooks
|
||||
import useApi from '@/hooks/useApi'
|
||||
import { useConfig } from '@/store/context/ConfigContext'
|
||||
import { useError } from '@/store/context/ErrorContext'
|
||||
|
||||
// utils
|
||||
import useNotifier from '@/utils/useNotifier'
|
||||
|
|
@ -42,13 +41,10 @@ const ForgotPasswordPage = () => {
|
|||
const [isLoading, setLoading] = useState(false)
|
||||
const [responseMsg, setResponseMsg] = useState(undefined)
|
||||
|
||||
const { authRateLimitError, setAuthRateLimitError } = useError()
|
||||
|
||||
const forgotPasswordApi = useApi(accountApi.forgotPassword)
|
||||
|
||||
const sendResetRequest = async (event) => {
|
||||
event.preventDefault()
|
||||
setAuthRateLimitError(null)
|
||||
const body = {
|
||||
user: {
|
||||
email: usernameVal
|
||||
|
|
@ -58,11 +54,6 @@ const ForgotPasswordPage = () => {
|
|||
await forgotPasswordApi.request(body)
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
setAuthRateLimitError(null)
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [setAuthRateLimitError])
|
||||
|
||||
useEffect(() => {
|
||||
if (forgotPasswordApi.error) {
|
||||
const errMessage =
|
||||
|
|
@ -98,11 +89,6 @@ const ForgotPasswordPage = () => {
|
|||
{responseMsg.msg}
|
||||
</Alert>
|
||||
)}
|
||||
{authRateLimitError && (
|
||||
<Alert icon={<IconExclamationCircle />} variant='filled' severity='error'>
|
||||
{authRateLimitError}
|
||||
</Alert>
|
||||
)}
|
||||
{responseMsg && responseMsg?.type !== 'error' && (
|
||||
<Alert icon={<IconCircleCheck />} variant='filled' severity='success'>
|
||||
{responseMsg.msg}
|
||||
|
|
|
|||
|
|
@ -1,51 +0,0 @@
|
|||
import { Box, Button, Stack, Typography } from '@mui/material'
|
||||
import { Link, useLocation } from 'react-router-dom'
|
||||
import unauthorizedSVG from '@/assets/images/unauthorized.svg'
|
||||
import MainCard from '@/ui-component/cards/MainCard'
|
||||
|
||||
// ==============================|| RateLimitedPage ||============================== //
|
||||
|
||||
const RateLimitedPage = () => {
|
||||
const location = useLocation()
|
||||
|
||||
const retryAfter = location.state?.retryAfter || 60
|
||||
|
||||
return (
|
||||
<MainCard>
|
||||
<Box
|
||||
sx={{
|
||||
display: 'flex',
|
||||
justifyContent: 'center',
|
||||
alignItems: 'center',
|
||||
height: 'calc(100vh - 210px)'
|
||||
}}
|
||||
>
|
||||
<Stack
|
||||
sx={{
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
maxWidth: '500px'
|
||||
}}
|
||||
flexDirection='column'
|
||||
>
|
||||
<Box sx={{ p: 2, height: 'auto' }}>
|
||||
<img style={{ objectFit: 'cover', height: '20vh', width: 'auto' }} src={unauthorizedSVG} alt='rateLimitedSVG' />
|
||||
</Box>
|
||||
<Typography sx={{ mb: 2 }} variant='h4' component='div' fontWeight='bold'>
|
||||
429 Too Many Requests
|
||||
</Typography>
|
||||
<Typography variant='body1' component='div' sx={{ mb: 2, textAlign: 'center' }}>
|
||||
{`You have made too many requests in a short period of time. Please wait ${retryAfter}s before trying again.`}
|
||||
</Typography>
|
||||
<Link to='/'>
|
||||
<Button variant='contained' color='primary'>
|
||||
Back to Home
|
||||
</Button>
|
||||
</Link>
|
||||
</Stack>
|
||||
</Box>
|
||||
</MainCard>
|
||||
)
|
||||
}
|
||||
|
||||
export default RateLimitedPage
|
||||
|
|
@ -18,7 +18,6 @@ import ssoApi from '@/api/sso'
|
|||
// Hooks
|
||||
import useApi from '@/hooks/useApi'
|
||||
import { useConfig } from '@/store/context/ConfigContext'
|
||||
import { useError } from '@/store/context/ErrorContext'
|
||||
|
||||
// utils
|
||||
import useNotifier from '@/utils/useNotifier'
|
||||
|
|
@ -112,9 +111,7 @@ const RegisterPage = () => {
|
|||
|
||||
const [loading, setLoading] = useState(false)
|
||||
const [authError, setAuthError] = useState('')
|
||||
const [successMsg, setSuccessMsg] = useState('')
|
||||
|
||||
const { authRateLimitError, setAuthRateLimitError } = useError()
|
||||
const [successMsg, setSuccessMsg] = useState(undefined)
|
||||
|
||||
const registerApi = useApi(accountApi.registerAccount)
|
||||
const ssoLoginApi = useApi(ssoApi.ssoLogin)
|
||||
|
|
@ -123,7 +120,6 @@ const RegisterPage = () => {
|
|||
|
||||
const register = async (event) => {
|
||||
event.preventDefault()
|
||||
setAuthRateLimitError(null)
|
||||
if (isEnterpriseLicensed) {
|
||||
const result = RegisterEnterpriseUserSchema.safeParse({
|
||||
username,
|
||||
|
|
@ -196,7 +192,6 @@ const RegisterPage = () => {
|
|||
}, [registerApi.error])
|
||||
|
||||
useEffect(() => {
|
||||
setAuthRateLimitError(null)
|
||||
if (!isOpenSource) {
|
||||
getDefaultProvidersApi.request()
|
||||
}
|
||||
|
|
@ -279,11 +274,6 @@ const RegisterPage = () => {
|
|||
)}
|
||||
</Alert>
|
||||
)}
|
||||
{authRateLimitError && (
|
||||
<Alert icon={<IconExclamationCircle />} variant='filled' severity='error'>
|
||||
{authRateLimitError}
|
||||
</Alert>
|
||||
)}
|
||||
{successMsg && (
|
||||
<Alert icon={<IconCircleCheck />} variant='filled' severity='success'>
|
||||
{successMsg}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
import { useEffect, useState } from 'react'
|
||||
import { useState } from 'react'
|
||||
import { useDispatch } from 'react-redux'
|
||||
import { Link, useNavigate, useSearchParams } from 'react-router-dom'
|
||||
|
||||
|
|
@ -19,9 +19,6 @@ import accountApi from '@/api/account.api'
|
|||
import useNotifier from '@/utils/useNotifier'
|
||||
import { validatePassword } from '@/utils/validation'
|
||||
|
||||
// Hooks
|
||||
import { useError } from '@/store/context/ErrorContext'
|
||||
|
||||
// Icons
|
||||
import { IconExclamationCircle, IconX } from '@tabler/icons-react'
|
||||
|
||||
|
|
@ -73,8 +70,6 @@ const ResetPasswordPage = () => {
|
|||
const [loading, setLoading] = useState(false)
|
||||
const [authErrors, setAuthErrors] = useState([])
|
||||
|
||||
const { authRateLimitError, setAuthRateLimitError } = useError()
|
||||
|
||||
const goLogin = () => {
|
||||
navigate('/signin', { replace: true })
|
||||
}
|
||||
|
|
@ -83,7 +78,6 @@ const ResetPasswordPage = () => {
|
|||
event.preventDefault()
|
||||
const validationErrors = []
|
||||
setAuthErrors([])
|
||||
setAuthRateLimitError(null)
|
||||
if (!tokenVal) {
|
||||
validationErrors.push('Token cannot be left blank!')
|
||||
}
|
||||
|
|
@ -148,11 +142,6 @@ const ResetPasswordPage = () => {
|
|||
}
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
setAuthRateLimitError(null)
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<>
|
||||
<MainCard>
|
||||
|
|
@ -166,11 +155,6 @@ const ResetPasswordPage = () => {
|
|||
</ul>
|
||||
</Alert>
|
||||
)}
|
||||
{authRateLimitError && (
|
||||
<Alert icon={<IconExclamationCircle />} variant='filled' severity='error'>
|
||||
{authRateLimitError}
|
||||
</Alert>
|
||||
)}
|
||||
<Stack sx={{ gap: 1 }}>
|
||||
<Typography variant='h1'>Reset Password</Typography>
|
||||
<Typography variant='body2' sx={{ color: theme.palette.grey[600] }}>
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@ import { Input } from '@/ui-component/input/Input'
|
|||
// Hooks
|
||||
import useApi from '@/hooks/useApi'
|
||||
import { useConfig } from '@/store/context/ConfigContext'
|
||||
import { useError } from '@/store/context/ErrorContext'
|
||||
|
||||
// API
|
||||
import authApi from '@/api/auth'
|
||||
|
|
@ -63,8 +62,6 @@ const SignInPage = () => {
|
|||
const [showResendButton, setShowResendButton] = useState(false)
|
||||
const [successMessage, setSuccessMessage] = useState('')
|
||||
|
||||
const { authRateLimitError, setAuthRateLimitError } = useError()
|
||||
|
||||
const loginApi = useApi(authApi.login)
|
||||
const ssoLoginApi = useApi(ssoApi.ssoLogin)
|
||||
const getDefaultProvidersApi = useApi(loginMethodApi.getDefaultLoginMethods)
|
||||
|
|
@ -74,7 +71,6 @@ const SignInPage = () => {
|
|||
|
||||
const doLogin = (event) => {
|
||||
event.preventDefault()
|
||||
setAuthRateLimitError(null)
|
||||
setLoading(true)
|
||||
const body = {
|
||||
email: usernameVal,
|
||||
|
|
@ -96,12 +92,11 @@ const SignInPage = () => {
|
|||
|
||||
useEffect(() => {
|
||||
store.dispatch(logoutSuccess())
|
||||
setAuthRateLimitError(null)
|
||||
if (!isOpenSource) {
|
||||
getDefaultProvidersApi.request()
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [setAuthRateLimitError, isOpenSource])
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
// Parse the "user" query parameter from the URL
|
||||
|
|
@ -184,11 +179,6 @@ const SignInPage = () => {
|
|||
{successMessage}
|
||||
</Alert>
|
||||
)}
|
||||
{authRateLimitError && (
|
||||
<Alert icon={<IconExclamationCircle />} variant='filled' severity='error'>
|
||||
{authRateLimitError}
|
||||
</Alert>
|
||||
)}
|
||||
{authError && (
|
||||
<Alert icon={<IconExclamationCircle />} variant='filled' severity='error'>
|
||||
{authError}
|
||||
|
|
|
|||
|
|
@ -208,8 +208,6 @@ const Chatflows = () => {
|
|||
filterFunction={filterFlows}
|
||||
updateFlowsApi={getAllChatflowsApi}
|
||||
setError={setError}
|
||||
currentPage={currentPage}
|
||||
pageLimit={pageLimit}
|
||||
/>
|
||||
)}
|
||||
{/* Pagination and Page Size Controls */}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue