Merge branch 'main' into bugfix/Concurrent-Chat-Session

# Conflicts:
#	packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
This commit is contained in:
Henry 2024-01-15 13:22:59 +00:00
commit 7ba4a063fb
20 changed files with 460 additions and 82 deletions

View File

@ -145,25 +145,40 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
## 🌐 自托管 ## 🌐 自托管
### [Railway](https://docs.flowiseai.com/deployment/railway) 在您现有的基础设施中部署自托管的 Flowise我们支持各种[部署](https://docs.flowiseai.com/configuration/deployment)
[![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) - [AWS](https://docs.flowiseai.com/deployment/aws)
- [Azure](https://docs.flowiseai.com/deployment/azure)
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
- [GCP](https://docs.flowiseai.com/deployment/gcp)
- <details>
<summary>其他</summary>
### [Render](https://docs.flowiseai.com/deployment/render) - [Railway](https://docs.flowiseai.com/deployment/railway)
[![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) [![在 Railway 上部署](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) - [Render](https://docs.flowiseai.com/deployment/render)
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a> [![部署到 Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
### [AWS](https://docs.flowiseai.com/deployment/aws) - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
### [Azure](https://docs.flowiseai.com/deployment/azure) <a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean) - [Elestio](https://elest.io/open-source/flowiseai)
### [GCP](https://docs.flowiseai.com/deployment/gcp) [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
[![部署到 Sealos](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
[![部署到 RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
</details>
## 💻 云托管 ## 💻 云托管

View File

@ -145,29 +145,40 @@ Flowise support different environment variables to configure your instance. You
## 🌐 Self Host ## 🌐 Self Host
### [Railway](https://docs.flowiseai.com/deployment/railway) Deploy Flowise self-hosted in your existing infrastructure, we support various [deployments](https://docs.flowiseai.com/configuration/deployment)
[![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9) - [AWS](https://docs.flowiseai.com/deployment/aws)
- [Azure](https://docs.flowiseai.com/deployment/azure)
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
- [GCP](https://docs.flowiseai.com/deployment/gcp)
- <details>
<summary>Others</summary>
### [Render](https://docs.flowiseai.com/deployment/render) - [Railway](https://docs.flowiseai.com/deployment/railway)
[![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render) [![Deploy on Railway](https://railway.app/button.svg)](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
### [Elestio](https://elest.io/open-source/flowiseai) - [Render](https://docs.flowiseai.com/deployment/render)
[![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai) [![Deploy to Render](https://render.com/images/deploy-to-render-button.svg)](https://docs.flowiseai.com/deployment/render)
### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face) - [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a> <a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
### [AWS](https://docs.flowiseai.com/deployment/aws) - [Elestio](https://elest.io/open-source/flowiseai)
### [Azure](https://docs.flowiseai.com/deployment/azure) [![Deploy](https://pub-da36157c854648669813f3f76c526c2b.r2.dev/deploy-on-elestio-black.png)](https://elest.io/open-source/flowiseai)
### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean) - [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
### [GCP](https://docs.flowiseai.com/deployment/gcp) [![](https://raw.githubusercontent.com/labring-actions/templates/main/Deploy-on-Sealos.svg)](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
- [RepoCloud](https://repocloud.io/details/?app_id=29)
[![Deploy on RepoCloud](https://d16t0pc4846x52.cloudfront.net/deploy.png)](https://repocloud.io/details/?app_id=29)
</details>
## 💻 Cloud Hosted ## 💻 Cloud Hosted

View File

@ -1,6 +1,6 @@
# Flowise Docker Hub Image # Flowise Docker Hub Image
Starts Flowise from [DockerHub Image](https://hub.docker.com/repository/docker/flowiseai/flowise/general) Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise)
## Usage ## Usage

View File

@ -0,0 +1,34 @@
import { INodeParams, INodeCredential } from '../src/Interface'
class AstraDBApi implements INodeCredential {
label: string
name: string
version: number
description: string
inputs: INodeParams[]
constructor() {
this.label = 'Astra DB API'
this.name = 'AstraDBApi'
this.version = 1.0
this.inputs = [
{
label: 'Astra DB Collection Name',
name: 'collectionName',
type: 'string'
},
{
label: 'Astra DB Application Token',
name: 'applicationToken',
type: 'password'
},
{
label: 'Astra DB Api Endpoint',
name: 'dbEndPoint',
type: 'string'
}
]
}
}
module.exports = { credClass: AstraDBApi }

View File

@ -1,24 +1,23 @@
import { INodeParams, INodeCredential } from '../src/Interface' import { INodeParams, INodeCredential } from '../src/Interface'
class ZapierNLAApi implements INodeCredential { class LocalAIApi implements INodeCredential {
label: string label: string
name: string name: string
version: number version: number
description: string
inputs: INodeParams[] inputs: INodeParams[]
constructor() { constructor() {
this.label = 'Zapier NLA API' this.label = 'LocalAI API'
this.name = 'zapierNLAApi' this.name = 'localAIApi'
this.version = 1.0 this.version = 1.0
this.inputs = [ this.inputs = [
{ {
label: 'Zapier NLA Api Key', label: 'LocalAI Api Key',
name: 'zapierNLAApiKey', name: 'localAIApiKey',
type: 'password' type: 'password'
} }
] ]
} }
} }
module.exports = { credClass: ZapierNLAApi } module.exports = { credClass: LocalAIApi }

View File

@ -112,7 +112,7 @@ const prepareAgent = (
const inputKey = memory.inputKey ? memory.inputKey : 'input' const inputKey = memory.inputKey ? memory.inputKey : 'input'
const prompt = ChatPromptTemplate.fromMessages([ const prompt = ChatPromptTemplate.fromMessages([
['ai', systemMessage ? systemMessage : `You are a helpful AI assistant.`], ['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
new MessagesPlaceholder(memoryKey), new MessagesPlaceholder(memoryKey),
['human', `{${inputKey}}`], ['human', `{${inputKey}}`],
new MessagesPlaceholder('agent_scratchpad') new MessagesPlaceholder('agent_scratchpad')

View File

@ -69,22 +69,23 @@ class VectaraChain_Chains implements INode {
options: [ options: [
{ {
label: 'vectara-summary-ext-v1.2.0 (gpt-3.5-turbo)', label: 'vectara-summary-ext-v1.2.0 (gpt-3.5-turbo)',
name: 'vectara-summary-ext-v1.2.0' name: 'vectara-summary-ext-v1.2.0',
description: 'base summarizer, available to all Vectara users'
}, },
{ {
label: 'vectara-experimental-summary-ext-2023-10-23-small (gpt-3.5-turbo)', label: 'vectara-experimental-summary-ext-2023-10-23-small (gpt-3.5-turbo)',
name: 'vectara-experimental-summary-ext-2023-10-23-small', name: 'vectara-experimental-summary-ext-2023-10-23-small',
description: 'In beta, available to both Growth and Scale Vectara users' description: `In beta, available to both Growth and <a target="_blank" href="https://vectara.com/pricing/">Scale</a> Vectara users`
}, },
{ {
label: 'vectara-summary-ext-v1.3.0 (gpt-4.0)', label: 'vectara-summary-ext-v1.3.0 (gpt-4.0)',
name: 'vectara-summary-ext-v1.3.0', name: 'vectara-summary-ext-v1.3.0',
description: 'Only available to paying Scale Vectara users' description: 'Only available to <a target="_blank" href="https://vectara.com/pricing/">Scale</a> Vectara users'
}, },
{ {
label: 'vectara-experimental-summary-ext-2023-10-23-med (gpt-4.0)', label: 'vectara-experimental-summary-ext-2023-10-23-med (gpt-4.0)',
name: 'vectara-experimental-summary-ext-2023-10-23-med', name: 'vectara-experimental-summary-ext-2023-10-23-med',
description: 'In beta, only available to paying Scale Vectara users' description: `In beta, only available to <a target="_blank" href="https://vectara.com/pricing/">Scale</a> Vectara users`
} }
], ],
default: 'vectara-summary-ext-v1.2.0' default: 'vectara-summary-ext-v1.2.0'
@ -228,7 +229,7 @@ class VectaraChain_Chains implements INode {
async run(nodeData: INodeData, input: string): Promise<object> { async run(nodeData: INodeData, input: string): Promise<object> {
const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'auto' const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng'
const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string
const maxSummarizedResultsStr = nodeData.inputs?.maxSummarizedResults as string const maxSummarizedResultsStr = nodeData.inputs?.maxSummarizedResults as string
const maxSummarizedResults = maxSummarizedResultsStr ? parseInt(maxSummarizedResultsStr, 10) : 7 const maxSummarizedResults = maxSummarizedResultsStr ? parseInt(maxSummarizedResultsStr, 10) : 7
@ -247,17 +248,31 @@ class VectaraChain_Chains implements INode {
lexicalInterpolationConfig: { lambda: vectaraFilter?.lambda ?? 0.025 } lexicalInterpolationConfig: { lambda: vectaraFilter?.lambda ?? 0.025 }
})) }))
// Vectara reranker ID for MMR (https://docs.vectara.com/docs/api-reference/search-apis/reranking#maximal-marginal-relevance-mmr-reranker)
const mmrRerankerId = 272725718
const mmrEnabled = vectaraFilter?.mmrConfig?.enabled
const data = { const data = {
query: [ query: [
{ {
query: input, query: input,
start: 0, start: 0,
numResults: topK, numResults: mmrEnabled ? vectaraFilter?.mmrTopK : topK,
corpusKey: corpusKeys,
contextConfig: { contextConfig: {
sentencesAfter: vectaraFilter?.contextConfig?.sentencesAfter ?? 2, sentencesAfter: vectaraFilter?.contextConfig?.sentencesAfter ?? 2,
sentencesBefore: vectaraFilter?.contextConfig?.sentencesBefore ?? 2 sentencesBefore: vectaraFilter?.contextConfig?.sentencesBefore ?? 2
}, },
corpusKey: corpusKeys, ...(mmrEnabled
? {
rerankingConfig: {
rerankerId: mmrRerankerId,
mmrConfig: {
diversityBias: vectaraFilter?.mmrConfig.diversityBias
}
}
}
: {}),
summary: [ summary: [
{ {
summarizerPromptName, summarizerPromptName,
@ -285,6 +300,14 @@ class VectaraChain_Chains implements INode {
const documents = result.responseSet[0].document const documents = result.responseSet[0].document
let rawSummarizedText = '' let rawSummarizedText = ''
// remove responses that are not in the topK (in case of MMR)
// Note that this does not really matter functionally due to the reorder citations, but it is more efficient
const maxResponses = mmrEnabled ? Math.min(responses.length, topK) : responses.length
if (responses.length > maxResponses) {
responses.splice(0, maxResponses)
}
// Add metadata to each text response given its corresponding document metadata
for (let i = 0; i < responses.length; i += 1) { for (let i = 0; i < responses.length; i += 1) {
const responseMetadata = responses[i].metadata const responseMetadata = responses[i].metadata
const documentMetadata = documents[responses[i].documentIndex].metadata const documentMetadata = documents[responses[i].documentIndex].metadata
@ -301,13 +324,13 @@ class VectaraChain_Chains implements INode {
responses[i].metadata = combinedMetadata responses[i].metadata = combinedMetadata
} }
// Create the summarization response
const summaryStatus = result.responseSet[0].summary[0].status const summaryStatus = result.responseSet[0].summary[0].status
if (summaryStatus.length > 0 && summaryStatus[0].code === 'BAD_REQUEST') { if (summaryStatus.length > 0 && summaryStatus[0].code === 'BAD_REQUEST') {
throw new Error( throw new Error(
`BAD REQUEST: Too much text for the summarizer to summarize. Please try reducing the number of search results to summarize, or the context of each result by adjusting the 'summary_num_sentences', and 'summary_num_results' parameters respectively.` `BAD REQUEST: Too much text for the summarizer to summarize. Please try reducing the number of search results to summarize, or the context of each result by adjusting the 'summary_num_sentences', and 'summary_num_results' parameters respectively.`
) )
} }
if ( if (
summaryStatus.length > 0 && summaryStatus.length > 0 &&
summaryStatus[0].code === 'NOT_FOUND' && summaryStatus[0].code === 'NOT_FOUND' &&
@ -316,8 +339,8 @@ class VectaraChain_Chains implements INode {
throw new Error(`BAD REQUEST: summarizer ${summarizerPromptName} is invalid for this account.`) throw new Error(`BAD REQUEST: summarizer ${summarizerPromptName} is invalid for this account.`)
} }
// Reorder citations in summary and create the list of returned source documents
rawSummarizedText = result.responseSet[0].summary[0]?.text rawSummarizedText = result.responseSet[0].summary[0]?.text
let summarizedText = reorderCitations(rawSummarizedText) let summarizedText = reorderCitations(rawSummarizedText)
let summaryResponses = applyCitationOrder(responses, rawSummarizedText) let summaryResponses = applyCitationOrder(responses, rawSummarizedText)

View File

@ -1,5 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAIChat } from 'langchain/llms/openai' import { OpenAIChat } from 'langchain/llms/openai'
import { OpenAIChatInput } from 'langchain/chat_models/openai' import { OpenAIChatInput } from 'langchain/chat_models/openai'
import { BaseCache } from 'langchain/schema' import { BaseCache } from 'langchain/schema'
@ -14,6 +14,7 @@ class ChatLocalAI_ChatModels implements INode {
category: string category: string
description: string description: string
baseClasses: string[] baseClasses: string[]
credential: INodeParams
inputs: INodeParams[] inputs: INodeParams[]
constructor() { constructor() {
@ -25,6 +26,13 @@ class ChatLocalAI_ChatModels implements INode {
this.category = 'Chat Models' this.category = 'Chat Models'
this.description = 'Use local LLMs like llama.cpp, gpt4all using LocalAI' this.description = 'Use local LLMs like llama.cpp, gpt4all using LocalAI'
this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(OpenAIChat)] this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(OpenAIChat)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['localAIApi'],
optional: true
}
this.inputs = [ this.inputs = [
{ {
label: 'Cache', label: 'Cache',
@ -79,13 +87,16 @@ class ChatLocalAI_ChatModels implements INode {
] ]
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const temperature = nodeData.inputs?.temperature as string const temperature = nodeData.inputs?.temperature as string
const modelName = nodeData.inputs?.modelName as string const modelName = nodeData.inputs?.modelName as string
const maxTokens = nodeData.inputs?.maxTokens as string const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string const topP = nodeData.inputs?.topP as string
const timeout = nodeData.inputs?.timeout as string const timeout = nodeData.inputs?.timeout as string
const basePath = nodeData.inputs?.basePath as string const basePath = nodeData.inputs?.basePath as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData)
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = { const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
@ -98,6 +109,7 @@ class ChatLocalAI_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP) if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10) if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache if (cache) obj.cache = cache
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
const model = new OpenAIChat(obj, { basePath }) const model = new OpenAIChat(obj, { basePath })

View File

@ -1,4 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from 'langchain/embeddings/openai' import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from 'langchain/embeddings/openai'
class LocalAIEmbedding_Embeddings implements INode { class LocalAIEmbedding_Embeddings implements INode {
@ -10,6 +11,7 @@ class LocalAIEmbedding_Embeddings implements INode {
category: string category: string
description: string description: string
baseClasses: string[] baseClasses: string[]
credential: INodeParams
inputs: INodeParams[] inputs: INodeParams[]
constructor() { constructor() {
@ -21,6 +23,13 @@ class LocalAIEmbedding_Embeddings implements INode {
this.category = 'Embeddings' this.category = 'Embeddings'
this.description = 'Use local embeddings models like llama.cpp' this.description = 'Use local embeddings models like llama.cpp'
this.baseClasses = [this.type, 'Embeddings'] this.baseClasses = [this.type, 'Embeddings']
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['localAIApi'],
optional: true
}
this.inputs = [ this.inputs = [
{ {
label: 'Base Path', label: 'Base Path',
@ -37,15 +46,20 @@ class LocalAIEmbedding_Embeddings implements INode {
] ]
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.modelName as string const modelName = nodeData.inputs?.modelName as string
const basePath = nodeData.inputs?.basePath as string const basePath = nodeData.inputs?.basePath as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = { const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = {
modelName, modelName,
openAIApiKey: 'sk-' openAIApiKey: 'sk-'
} }
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
const model = new OpenAIEmbeddings(obj, { basePath }) const model = new OpenAIEmbeddings(obj, { basePath })
return model return model

View File

@ -65,7 +65,7 @@ class CustomFunction_Utilities implements INode {
inputVars = inputVars =
typeof functionInputVariablesRaw === 'object' ? functionInputVariablesRaw : JSON.parse(functionInputVariablesRaw) typeof functionInputVariablesRaw === 'object' ? functionInputVariablesRaw : JSON.parse(functionInputVariablesRaw)
} catch (exception) { } catch (exception) {
throw new Error("Invalid JSON in the PromptTemplate's promptValues: " + exception) throw new Error('Invalid JSON in the Custom Function Input Variables: ' + exception)
} }
} }

View File

@ -0,0 +1,190 @@
import { flatten } from 'lodash'
import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData } from '../../../src/utils'
import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb'
class Astra_VectorStores implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
credential: INodeParams
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Astra'
this.name = 'Astra'
this.version = 1.0
this.type = 'Astra'
this.icon = 'astra.svg'
this.category = 'Vector Stores'
this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database thats perfect for managing mission-critical AI workloads`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['AstraDBApi']
}
this.inputs = [
{
label: 'Document',
name: 'document',
type: 'Document',
list: true,
optional: true
},
{
label: 'Embeddings',
name: 'embeddings',
type: 'Embeddings'
},
{
label: 'Vector Dimension',
name: 'vectorDimension',
type: 'number',
placeholder: '1536',
optional: true,
description: 'Dimension used for storing vector embedding'
},
{
label: 'Similarity Metric',
name: 'similarityMetric',
type: 'string',
placeholder: 'cosine',
optional: true,
description: 'cosine | euclidean | dot_product'
},
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to 4',
placeholder: '4',
type: 'number',
additionalParams: true,
optional: true
}
]
this.outputs = [
{
label: 'Astra Retriever',
name: 'retriever',
baseClasses: this.baseClasses
},
{
label: 'Astra Vector Store',
name: 'vectorStore',
baseClasses: [this.type, ...getBaseClasses(AstraDBVectorStore)]
}
]
}
//@ts-ignore
vectorStoreMethods = {
async upsert(nodeData: INodeData, options: ICommonObject): Promise<void> {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const vectorDimension = nodeData.inputs?.vectorDimension as number
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const expectedSimilarityMetric = ['cosine', 'euclidean', 'dot_product']
if (similarityMetric && !expectedSimilarityMetric.includes(similarityMetric)) {
throw new Error(`Invalid Similarity Metric should be one of 'cosine' | 'euclidean' | 'dot_product'`)
}
const clientConfig = {
token: credentialData?.applicationToken,
endpoint: credentialData?.dbEndPoint
}
const astraConfig: AstraLibArgs = {
...clientConfig,
collection: credentialData.collectionName ?? 'flowise_test',
collectionOptions: {
vector: {
dimension: vectorDimension ?? 1536,
metric: similarityMetric ?? 'cosine'
}
}
}
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
try {
await AstraDBVectorStore.fromDocuments(finalDocs, embeddings, astraConfig)
} catch (e) {
throw new Error(e)
}
}
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const vectorDimension = nodeData.inputs?.vectorDimension as number
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const expectedSimilarityMetric = ['cosine', 'euclidean', 'dot_product']
if (similarityMetric && !expectedSimilarityMetric.includes(similarityMetric)) {
throw new Error(`Invalid Similarity Metric should be one of 'cosine' | 'euclidean' | 'dot_product'`)
}
const clientConfig = {
token: credentialData?.applicationToken,
endpoint: credentialData?.dbEndPoint
}
const astraConfig: AstraLibArgs = {
...clientConfig,
collection: credentialData.collectionName ?? 'flowise_test',
collectionOptions: {
vector: {
dimension: vectorDimension ?? 1536,
metric: similarityMetric ?? 'cosine'
}
}
}
const flattenDocs = docs && docs.length ? flatten(docs) : []
const finalDocs = []
for (let i = 0; i < flattenDocs.length; i += 1) {
if (flattenDocs[i] && flattenDocs[i].pageContent) {
finalDocs.push(new Document(flattenDocs[i]))
}
}
const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig)
if (output === 'retriever') {
const retriever = vectorStore.asRetriever(k)
return retriever
} else if (output === 'vectorStore') {
;(vectorStore as any).k = k
return vectorStore
}
return vectorStore
}
}
module.exports = { nodeClass: Astra_VectorStores }

View File

@ -0,0 +1,12 @@
<svg width="1200" height="1200" viewBox="0 0 1200 1200" fill="none" xmlns="http://www.w3.org/2000/svg">
<rect width="1200" height="1200" fill="black"/>
<g clip-path="url(#clip0_102_1968)">
<path d="M508.819 464.97H267.001V737.697H508.819L569.566 690.526V512.14L508.819 464.97ZM313.864 512.14H522.703V690.575H313.864V512.14Z" fill="white"/>
<path d="M917.531 514.121V468H696.425L636.389 514.121V577.447L696.425 623.568H889.124V688.545H648.348V734.667H875.409L935.444 688.545V623.568L875.409 577.447H682.709V514.121H917.531Z" fill="white"/>
</g>
<defs>
<clipPath id="clip0_102_1968">
<rect width="668.444" height="266.667" fill="white" transform="translate(267 468)"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 694 B

View File

@ -65,6 +65,14 @@ class Milvus_VectorStores implements INode {
name: 'milvusCollection', name: 'milvusCollection',
type: 'string' type: 'string'
}, },
{
label: 'Milvus Text Field',
name: 'milvusTextField',
type: 'string',
placeholder: 'langchain_text',
optional: true,
additionalParams: true
},
{ {
label: 'Milvus Filter', label: 'Milvus Filter',
name: 'milvusFilter', name: 'milvusFilter',
@ -150,6 +158,7 @@ class Milvus_VectorStores implements INode {
const address = nodeData.inputs?.milvusServerUrl as string const address = nodeData.inputs?.milvusServerUrl as string
const collectionName = nodeData.inputs?.milvusCollection as string const collectionName = nodeData.inputs?.milvusCollection as string
const milvusFilter = nodeData.inputs?.milvusFilter as string const milvusFilter = nodeData.inputs?.milvusFilter as string
const textField = nodeData.inputs?.milvusTextField as string
// embeddings // embeddings
const embeddings = nodeData.inputs?.embeddings as Embeddings const embeddings = nodeData.inputs?.embeddings as Embeddings
@ -169,7 +178,8 @@ class Milvus_VectorStores implements INode {
// init MilvusLibArgs // init MilvusLibArgs
const milVusArgs: MilvusLibArgs = { const milVusArgs: MilvusLibArgs = {
url: address, url: address,
collectionName: collectionName collectionName: collectionName,
textField: textField
} }
if (milvusUser) milVusArgs.username = milvusUser if (milvusUser) milVusArgs.username = milvusUser

View File

@ -1,5 +1,5 @@
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile } from 'langchain/vectorstores/vectara' import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile, MMRConfig } from 'langchain/vectorstores/vectara'
import { Document } from 'langchain/document' import { Document } from 'langchain/document'
import { Embeddings } from 'langchain/embeddings/base' import { Embeddings } from 'langchain/embeddings/base'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
@ -22,7 +22,7 @@ class Vectara_VectorStores implements INode {
constructor() { constructor() {
this.label = 'Vectara' this.label = 'Vectara'
this.name = 'vectara' this.name = 'vectara'
this.version = 1.0 this.version = 2.0
this.type = 'Vectara' this.type = 'Vectara'
this.icon = 'vectara.png' this.icon = 'vectara.png'
this.category = 'Vector Stores' this.category = 'Vector Stores'
@ -82,7 +82,9 @@ class Vectara_VectorStores implements INode {
label: 'Lambda', label: 'Lambda',
name: 'lambda', name: 'lambda',
description: description:
'Improves retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.', 'Enable hybrid search to improve retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.' +
'A value of 0.0 means that only neural search is used, while a value of 1.0 means that only keyword-based search is used. Defaults to 0.0 (neural only).',
default: 0.0,
type: 'number', type: 'number',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -90,8 +92,30 @@ class Vectara_VectorStores implements INode {
{ {
label: 'Top K', label: 'Top K',
name: 'topK', name: 'topK',
description: 'Number of top results to fetch. Defaults to 4', description: 'Number of top results to fetch. Defaults to 5',
placeholder: '4', placeholder: '5',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'MMR K',
name: 'mmrK',
description: 'Number of top results to fetch for MMR. Defaults to 50',
placeholder: '50',
type: 'number',
additionalParams: true,
optional: true
},
{
label: 'MMR diversity bias',
name: 'mmrDiversityBias',
step: 0.1,
description:
'The diversity bias to use for MMR. This is a value between 0.0 and 1.0' +
'Values closer to 1.0 optimize for the most diverse results.' +
'Defaults to 0 (MMR disabled)',
placeholder: '0.0',
type: 'number', type: 'number',
additionalParams: true, additionalParams: true,
optional: true optional: true
@ -191,7 +215,9 @@ class Vectara_VectorStores implements INode {
const lambda = nodeData.inputs?.lambda as number const lambda = nodeData.inputs?.lambda as number
const output = nodeData.outputs?.output as string const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4 const k = topK ? parseFloat(topK) : 5
const mmrK = nodeData.inputs?.mmrK as number
const mmrDiversityBias = nodeData.inputs?.mmrDiversityBias as number
const vectaraArgs: VectaraLibArgs = { const vectaraArgs: VectaraLibArgs = {
apiKey: apiKey, apiKey: apiKey,
@ -208,6 +234,11 @@ class Vectara_VectorStores implements INode {
if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore
if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter
vectaraFilter.contextConfig = vectaraContextConfig vectaraFilter.contextConfig = vectaraContextConfig
const mmrConfig: MMRConfig = {}
mmrConfig.enabled = mmrDiversityBias > 0
mmrConfig.mmrTopK = mmrK
mmrConfig.diversityBias = mmrDiversityBias
vectaraFilter.mmrConfig = mmrConfig
const vectorStore = new VectaraStore(vectaraArgs) const vectorStore = new VectaraStore(vectaraArgs)

View File

@ -19,6 +19,7 @@
"@aws-sdk/client-bedrock-runtime": "3.422.0", "@aws-sdk/client-bedrock-runtime": "3.422.0",
"@aws-sdk/client-dynamodb": "^3.360.0", "@aws-sdk/client-dynamodb": "^3.360.0",
"@aws-sdk/client-s3": "^3.427.0", "@aws-sdk/client-s3": "^3.427.0",
"@datastax/astra-db-ts": "^0.1.2",
"@dqbd/tiktoken": "^1.0.7", "@dqbd/tiktoken": "^1.0.7",
"@elastic/elasticsearch": "^8.9.0", "@elastic/elasticsearch": "^8.9.0",
"@getzep/zep-js": "^0.9.0", "@getzep/zep-js": "^0.9.0",
@ -26,6 +27,7 @@
"@gomomento/sdk-core": "^1.51.1", "@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1", "@google-ai/generativelanguage": "^0.2.1",
"@huggingface/inference": "^2.6.1", "@huggingface/inference": "^2.6.1",
"@langchain/community": "^0.0.16",
"@langchain/google-genai": "^0.0.6", "@langchain/google-genai": "^0.0.6",
"@langchain/mistralai": "^0.0.6", "@langchain/mistralai": "^0.0.6",
"@notionhq/client": "^2.2.8", "@notionhq/client": "^2.2.8",
@ -48,7 +50,7 @@
"faiss-node": "^0.2.2", "faiss-node": "^0.2.2",
"fast-json-patch": "^3.1.1", "fast-json-patch": "^3.1.1",
"form-data": "^4.0.0", "form-data": "^4.0.0",
"google-auth-library": "^9.0.0", "google-auth-library": "^9.4.0",
"graphql": "^16.6.0", "graphql": "^16.6.0",
"html-to-text": "^9.0.5", "html-to-text": "^9.0.5",
"husky": "^8.0.3", "husky": "^8.0.3",

View File

@ -606,9 +606,18 @@ class ExceptionTool extends Tool {
export const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] => export const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
steps.flatMap(({ action, observation }) => { steps.flatMap(({ action, observation }) => {
const create_function_message = (observation: string, action: AgentAction) => {
let content: string
if (typeof observation !== 'string') {
content = JSON.stringify(observation)
} else {
content = observation
}
return new FunctionMessage(content, action.tool)
}
if ('messageLog' in action && action.messageLog !== undefined) { if ('messageLog' in action && action.messageLog !== undefined) {
const log = action.messageLog as BaseMessage[] const log = action.messageLog as BaseMessage[]
return log.concat(new FunctionMessage(observation, action.tool)) return log.concat(create_function_message(observation, action))
} else { } else {
return [new AIMessage(action.log)] return [new AIMessage(action.log)]
} }

View File

@ -350,12 +350,33 @@
{ {
"label": "Top K", "label": "Top K",
"name": "topK", "name": "topK",
"description": "Number of top results to fetch. Defaults to 4", "description": "Number of top results to fetch. Defaults to 5",
"placeholder": "4", "placeholder": "5",
"type": "number", "type": "number",
"additionalParams": true, "additionalParams": true,
"optional": true, "optional": true,
"id": "vectara_0-input-topK-number" "id": "vectara_0-input-topK-number"
},
{
"label": "MMR K",
"name": "mmrK",
"description": "The number of results to rerank if MMR is enabled.",
"placeholder": "50",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "vectara_0-input-mmrK-number"
},
{
"label": "MMR Diversity Bias",
"name": "mmrDiversityBias",
"step": 0.1,
"description": "Diversity Bias parameter for MMR, if enabled. 0.0 means no diversiry bias, 1.0 means maximum diversity bias. Defaults to 0.0 (MMR disabled).",
"placeholder": "0.0",
"type": "number",
"additionalParams": true,
"optional": true,
"id": "vectara_0-input-mmrDiversityBias-number"
} }
], ],
"inputAnchors": [ "inputAnchors": [
@ -374,7 +395,9 @@
"sentencesBefore": "", "sentencesBefore": "",
"sentencesAfter": "", "sentencesAfter": "",
"lambda": "", "lambda": "",
"topK": "" "topK": "",
"mmrK": "",
"mmrDiversityBias": ""
}, },
"outputAnchors": [ "outputAnchors": [
{ {

View File

@ -361,7 +361,8 @@ export class App {
const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({ const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({
id: req.params.id id: req.params.id
}) })
if (chatflow && chatflow.chatbotConfig) { if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`)
if (chatflow.chatbotConfig) {
try { try {
const parsedConfig = JSON.parse(chatflow.chatbotConfig) const parsedConfig = JSON.parse(chatflow.chatbotConfig)
return res.json(parsedConfig) return res.json(parsedConfig)
@ -369,7 +370,7 @@ export class App {
return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`) return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`)
} }
} }
return res.status(404).send(`Chatbot Config for Chatflow ${req.params.id} not found`) return res.status(200).send('OK')
}) })
// Save chatflow // Save chatflow
@ -521,7 +522,7 @@ export class App {
res.status(404).send(`Chatflow ${chatflowid} not found`) res.status(404).send(`Chatflow ${chatflowid} not found`)
return return
} }
const chatId = (req.query?.chatId as string) ?? (await getChatId(chatflowid)) const chatId = req.query?.chatId as string
const memoryType = req.query?.memoryType as string | undefined const memoryType = req.query?.memoryType as string | undefined
const sessionId = req.query?.sessionId as string | undefined const sessionId = req.query?.sessionId as string | undefined
const chatType = req.query?.chatType as string | undefined const chatType = req.query?.chatType as string | undefined
@ -545,7 +546,8 @@ export class App {
return res.status(500).send('Error clearing chat messages') return res.status(500).send('Error clearing chat messages')
} }
const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid, chatId } const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid }
if (chatId) deleteOptions.chatId = chatId
if (memoryType) deleteOptions.memoryType = memoryType if (memoryType) deleteOptions.memoryType = memoryType
if (sessionId) deleteOptions.sessionId = sessionId if (sessionId) deleteOptions.sessionId = sessionId
if (chatType) deleteOptions.chatType = chatType if (chatType) deleteOptions.chatType = chatType
@ -633,7 +635,7 @@ export class App {
return res.json(result) return res.json(result)
}) })
// Delete all chatmessages from chatflowid // Delete all credentials from chatflowid
this.app.delete('/api/v1/credentials/:id', async (req: Request, res: Response) => { this.app.delete('/api/v1/credentials/:id', async (req: Request, res: Response) => {
const results = await this.AppDataSource.getRepository(Credential).delete({ id: req.params.id }) const results = await this.AppDataSource.getRepository(Credential).delete({ id: req.params.id })
return res.json(results) return res.json(results)
@ -1790,23 +1792,6 @@ export class App {
} }
} }
/**
* Get first chat message id
* @param {string} chatflowid
* @returns {string}
*/
export async function getChatId(chatflowid: string): Promise<string> {
// first chatmessage id as the unique chat id
const firstChatMessage = await getDataSource()
.getRepository(ChatMessage)
.createQueryBuilder('cm')
.select('cm.id')
.where('chatflowid = :chatflowid', { chatflowid })
.orderBy('cm.createdDate', 'ASC')
.getOne()
return firstChatMessage ? firstChatMessage.id : ''
}
let serverApp: App | undefined let serverApp: App | undefined
export async function getAllChatFlow(): Promise<IChatFlow[]> { export async function getAllChatFlow(): Promise<IChatFlow[]> {

View File

@ -547,7 +547,11 @@ export const getVariableValue = (
variablePaths.forEach((path) => { variablePaths.forEach((path) => {
const variableValue = variableDict[path] const variableValue = variableDict[path]
// Replace all occurrence // Replace all occurrence
returnVal = returnVal.split(path).join(variableValue) if (typeof variableValue === 'object') {
returnVal = returnVal.split(path).join(JSON.stringify(variableValue).replace(/"/g, '\\"'))
} else {
returnVal = returnVal.split(path).join(variableValue)
}
}) })
return returnVal return returnVal
} }

View File

@ -67,7 +67,11 @@ const ExpandTextDialog = ({ show, dialogProps, onCancel, onConfirm }) => {
useEffect(() => { useEffect(() => {
if (executeCustomFunctionNodeApi.data) { if (executeCustomFunctionNodeApi.data) {
setCodeExecutedResult(executeCustomFunctionNodeApi.data) if (typeof executeCustomFunctionNodeApi.data === 'object') {
setCodeExecutedResult(JSON.stringify(executeCustomFunctionNodeApi.data, null, 2))
} else {
setCodeExecutedResult(executeCustomFunctionNodeApi.data)
}
} }
}, [executeCustomFunctionNodeApi.data]) }, [executeCustomFunctionNodeApi.data])