Merge branch 'main' into bugfix/Concurrent-Chat-Session
# Conflicts: # packages/components/nodes/agents/OpenAIFunctionAgent/OpenAIFunctionAgent.ts
This commit is contained in:
commit
7ba4a063fb
29
README-ZH.md
29
README-ZH.md
|
|
@ -145,25 +145,40 @@ Flowise 支持不同的环境变量来配置您的实例。您可以在 `package
|
|||
|
||||
## 🌐 自托管
|
||||
|
||||
### [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
在您现有的基础设施中部署自托管的 Flowise,我们支持各种[部署](https://docs.flowiseai.com/configuration/deployment)
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
- <details>
|
||||
<summary>其他</summary>
|
||||
|
||||
- [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
### [Render](https://docs.flowiseai.com/deployment/render)
|
||||
- [Render](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
- [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
|
||||
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
|
||||
|
||||
### [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
### [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
### [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
[](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
</details>
|
||||
|
||||
## 💻 云托管
|
||||
|
||||
|
|
|
|||
33
README.md
33
README.md
|
|
@ -145,29 +145,40 @@ Flowise support different environment variables to configure your instance. You
|
|||
|
||||
## 🌐 Self Host
|
||||
|
||||
### [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
Deploy Flowise self-hosted in your existing infrastructure, we support various [deployments](https://docs.flowiseai.com/configuration/deployment)
|
||||
|
||||
- [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
- [Digital Ocean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
- <details>
|
||||
<summary>Others</summary>
|
||||
|
||||
- [Railway](https://docs.flowiseai.com/deployment/railway)
|
||||
|
||||
[](https://railway.app/template/pn4G8S?referralCode=WVNPD9)
|
||||
|
||||
### [Render](https://docs.flowiseai.com/deployment/render)
|
||||
- [Render](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
[](https://docs.flowiseai.com/deployment/render)
|
||||
|
||||
### [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
### [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
- [HuggingFace Spaces](https://docs.flowiseai.com/deployment/hugging-face)
|
||||
|
||||
<a href="https://huggingface.co/spaces/FlowiseAI/Flowise"><img src="https://huggingface.co/datasets/huggingface/badges/raw/main/open-in-hf-spaces-sm.svg" alt="HuggingFace Spaces"></a>
|
||||
|
||||
### [AWS](https://docs.flowiseai.com/deployment/aws)
|
||||
- [Elestio](https://elest.io/open-source/flowiseai)
|
||||
|
||||
### [Azure](https://docs.flowiseai.com/deployment/azure)
|
||||
[](https://elest.io/open-source/flowiseai)
|
||||
|
||||
### [DigitalOcean](https://docs.flowiseai.com/deployment/digital-ocean)
|
||||
- [Sealos](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
### [GCP](https://docs.flowiseai.com/deployment/gcp)
|
||||
[](https://cloud.sealos.io/?openapp=system-template%3FtemplateName%3Dflowise)
|
||||
|
||||
- [RepoCloud](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
[](https://repocloud.io/details/?app_id=29)
|
||||
|
||||
</details>
|
||||
|
||||
## 💻 Cloud Hosted
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
# Flowise Docker Hub Image
|
||||
|
||||
Starts Flowise from [DockerHub Image](https://hub.docker.com/repository/docker/flowiseai/flowise/general)
|
||||
Starts Flowise from [DockerHub Image](https://hub.docker.com/r/flowiseai/flowise)
|
||||
|
||||
## Usage
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,34 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class AstraDBApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Astra DB API'
|
||||
this.name = 'AstraDBApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Astra DB Collection Name',
|
||||
name: 'collectionName',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Astra DB Application Token',
|
||||
name: 'applicationToken',
|
||||
type: 'password'
|
||||
},
|
||||
{
|
||||
label: 'Astra DB Api Endpoint',
|
||||
name: 'dbEndPoint',
|
||||
type: 'string'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: AstraDBApi }
|
||||
|
|
@ -1,24 +1,23 @@
|
|||
import { INodeParams, INodeCredential } from '../src/Interface'
|
||||
|
||||
class ZapierNLAApi implements INodeCredential {
|
||||
class LocalAIApi implements INodeCredential {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Zapier NLA API'
|
||||
this.name = 'zapierNLAApi'
|
||||
this.label = 'LocalAI API'
|
||||
this.name = 'localAIApi'
|
||||
this.version = 1.0
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Zapier NLA Api Key',
|
||||
name: 'zapierNLAApiKey',
|
||||
label: 'LocalAI Api Key',
|
||||
name: 'localAIApiKey',
|
||||
type: 'password'
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { credClass: ZapierNLAApi }
|
||||
module.exports = { credClass: LocalAIApi }
|
||||
|
|
@ -112,7 +112,7 @@ const prepareAgent = (
|
|||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['ai', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
['system', systemMessage ? systemMessage : `You are a helpful AI assistant.`],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
|
|
|
|||
|
|
@ -69,22 +69,23 @@ class VectaraChain_Chains implements INode {
|
|||
options: [
|
||||
{
|
||||
label: 'vectara-summary-ext-v1.2.0 (gpt-3.5-turbo)',
|
||||
name: 'vectara-summary-ext-v1.2.0'
|
||||
name: 'vectara-summary-ext-v1.2.0',
|
||||
description: 'base summarizer, available to all Vectara users'
|
||||
},
|
||||
{
|
||||
label: 'vectara-experimental-summary-ext-2023-10-23-small (gpt-3.5-turbo)',
|
||||
name: 'vectara-experimental-summary-ext-2023-10-23-small',
|
||||
description: 'In beta, available to both Growth and Scale Vectara users'
|
||||
description: `In beta, available to both Growth and <a target="_blank" href="https://vectara.com/pricing/">Scale</a> Vectara users`
|
||||
},
|
||||
{
|
||||
label: 'vectara-summary-ext-v1.3.0 (gpt-4.0)',
|
||||
name: 'vectara-summary-ext-v1.3.0',
|
||||
description: 'Only available to paying Scale Vectara users'
|
||||
description: 'Only available to <a target="_blank" href="https://vectara.com/pricing/">Scale</a> Vectara users'
|
||||
},
|
||||
{
|
||||
label: 'vectara-experimental-summary-ext-2023-10-23-med (gpt-4.0)',
|
||||
name: 'vectara-experimental-summary-ext-2023-10-23-med',
|
||||
description: 'In beta, only available to paying Scale Vectara users'
|
||||
description: `In beta, only available to <a target="_blank" href="https://vectara.com/pricing/">Scale</a> Vectara users`
|
||||
}
|
||||
],
|
||||
default: 'vectara-summary-ext-v1.2.0'
|
||||
|
|
@ -228,7 +229,7 @@ class VectaraChain_Chains implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string): Promise<object> {
|
||||
const vectorStore = nodeData.inputs?.vectaraStore as VectaraStore
|
||||
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'auto'
|
||||
const responseLang = (nodeData.inputs?.responseLang as string) ?? 'eng'
|
||||
const summarizerPromptName = nodeData.inputs?.summarizerPromptName as string
|
||||
const maxSummarizedResultsStr = nodeData.inputs?.maxSummarizedResults as string
|
||||
const maxSummarizedResults = maxSummarizedResultsStr ? parseInt(maxSummarizedResultsStr, 10) : 7
|
||||
|
|
@ -247,17 +248,31 @@ class VectaraChain_Chains implements INode {
|
|||
lexicalInterpolationConfig: { lambda: vectaraFilter?.lambda ?? 0.025 }
|
||||
}))
|
||||
|
||||
// Vectara reranker ID for MMR (https://docs.vectara.com/docs/api-reference/search-apis/reranking#maximal-marginal-relevance-mmr-reranker)
|
||||
const mmrRerankerId = 272725718
|
||||
const mmrEnabled = vectaraFilter?.mmrConfig?.enabled
|
||||
|
||||
const data = {
|
||||
query: [
|
||||
{
|
||||
query: input,
|
||||
start: 0,
|
||||
numResults: topK,
|
||||
numResults: mmrEnabled ? vectaraFilter?.mmrTopK : topK,
|
||||
corpusKey: corpusKeys,
|
||||
contextConfig: {
|
||||
sentencesAfter: vectaraFilter?.contextConfig?.sentencesAfter ?? 2,
|
||||
sentencesBefore: vectaraFilter?.contextConfig?.sentencesBefore ?? 2
|
||||
},
|
||||
corpusKey: corpusKeys,
|
||||
...(mmrEnabled
|
||||
? {
|
||||
rerankingConfig: {
|
||||
rerankerId: mmrRerankerId,
|
||||
mmrConfig: {
|
||||
diversityBias: vectaraFilter?.mmrConfig.diversityBias
|
||||
}
|
||||
}
|
||||
}
|
||||
: {}),
|
||||
summary: [
|
||||
{
|
||||
summarizerPromptName,
|
||||
|
|
@ -285,6 +300,14 @@ class VectaraChain_Chains implements INode {
|
|||
const documents = result.responseSet[0].document
|
||||
let rawSummarizedText = ''
|
||||
|
||||
// remove responses that are not in the topK (in case of MMR)
|
||||
// Note that this does not really matter functionally due to the reorder citations, but it is more efficient
|
||||
const maxResponses = mmrEnabled ? Math.min(responses.length, topK) : responses.length
|
||||
if (responses.length > maxResponses) {
|
||||
responses.splice(0, maxResponses)
|
||||
}
|
||||
|
||||
// Add metadata to each text response given its corresponding document metadata
|
||||
for (let i = 0; i < responses.length; i += 1) {
|
||||
const responseMetadata = responses[i].metadata
|
||||
const documentMetadata = documents[responses[i].documentIndex].metadata
|
||||
|
|
@ -301,13 +324,13 @@ class VectaraChain_Chains implements INode {
|
|||
responses[i].metadata = combinedMetadata
|
||||
}
|
||||
|
||||
// Create the summarization response
|
||||
const summaryStatus = result.responseSet[0].summary[0].status
|
||||
if (summaryStatus.length > 0 && summaryStatus[0].code === 'BAD_REQUEST') {
|
||||
throw new Error(
|
||||
`BAD REQUEST: Too much text for the summarizer to summarize. Please try reducing the number of search results to summarize, or the context of each result by adjusting the 'summary_num_sentences', and 'summary_num_results' parameters respectively.`
|
||||
)
|
||||
}
|
||||
|
||||
if (
|
||||
summaryStatus.length > 0 &&
|
||||
summaryStatus[0].code === 'NOT_FOUND' &&
|
||||
|
|
@ -316,8 +339,8 @@ class VectaraChain_Chains implements INode {
|
|||
throw new Error(`BAD REQUEST: summarizer ${summarizerPromptName} is invalid for this account.`)
|
||||
}
|
||||
|
||||
// Reorder citations in summary and create the list of returned source documents
|
||||
rawSummarizedText = result.responseSet[0].summary[0]?.text
|
||||
|
||||
let summarizedText = reorderCitations(rawSummarizedText)
|
||||
let summaryResponses = applyCitationOrder(responses, rawSummarizedText)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAIChat } from 'langchain/llms/openai'
|
||||
import { OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
|
|
@ -14,6 +14,7 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
|
|
@ -25,6 +26,13 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
this.category = 'Chat Models'
|
||||
this.description = 'Use local LLMs like llama.cpp, gpt4all using LocalAI'
|
||||
this.baseClasses = [this.type, 'BaseChatModel', ...getBaseClasses(OpenAIChat)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['localAIApi'],
|
||||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
|
|
@ -79,13 +87,16 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
const basePath = nodeData.inputs?.basePath as string
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData)
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
|
||||
|
|
@ -98,6 +109,7 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
|
||||
|
||||
const model = new OpenAIChat(obj, { basePath })
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from 'langchain/embeddings/openai'
|
||||
|
||||
class LocalAIEmbedding_Embeddings implements INode {
|
||||
|
|
@ -10,6 +11,7 @@ class LocalAIEmbedding_Embeddings implements INode {
|
|||
category: string
|
||||
description: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
|
|
@ -21,6 +23,13 @@ class LocalAIEmbedding_Embeddings implements INode {
|
|||
this.category = 'Embeddings'
|
||||
this.description = 'Use local embeddings models like llama.cpp'
|
||||
this.baseClasses = [this.type, 'Embeddings']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['localAIApi'],
|
||||
optional: true
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Base Path',
|
||||
|
|
@ -37,15 +46,20 @@ class LocalAIEmbedding_Embeddings implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const basePath = nodeData.inputs?.basePath as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = {
|
||||
modelName,
|
||||
openAIApiKey: 'sk-'
|
||||
}
|
||||
|
||||
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
|
||||
|
||||
const model = new OpenAIEmbeddings(obj, { basePath })
|
||||
|
||||
return model
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ class CustomFunction_Utilities implements INode {
|
|||
inputVars =
|
||||
typeof functionInputVariablesRaw === 'object' ? functionInputVariablesRaw : JSON.parse(functionInputVariablesRaw)
|
||||
} catch (exception) {
|
||||
throw new Error("Invalid JSON in the PromptTemplate's promptValues: " + exception)
|
||||
throw new Error('Invalid JSON in the Custom Function Input Variables: ' + exception)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,190 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { Embeddings } from 'langchain/embeddings/base'
|
||||
import { Document } from 'langchain/document'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData } from '../../../src/utils'
|
||||
import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb'
|
||||
|
||||
class Astra_VectorStores implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
badge: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Astra'
|
||||
this.name = 'Astra'
|
||||
this.version = 1.0
|
||||
this.type = 'Astra'
|
||||
this.icon = 'astra.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads`
|
||||
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
|
||||
this.badge = 'NEW'
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['AstraDBApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
type: 'Document',
|
||||
list: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
name: 'embeddings',
|
||||
type: 'Embeddings'
|
||||
},
|
||||
{
|
||||
label: 'Vector Dimension',
|
||||
name: 'vectorDimension',
|
||||
type: 'number',
|
||||
placeholder: '1536',
|
||||
optional: true,
|
||||
description: 'Dimension used for storing vector embedding'
|
||||
},
|
||||
{
|
||||
label: 'Similarity Metric',
|
||||
name: 'similarityMetric',
|
||||
type: 'string',
|
||||
placeholder: 'cosine',
|
||||
optional: true,
|
||||
description: 'cosine | euclidean | dot_product'
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Default to 4',
|
||||
placeholder: '4',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Astra Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Astra Vector Store',
|
||||
name: 'vectorStore',
|
||||
baseClasses: [this.type, ...getBaseClasses(AstraDBVectorStore)]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
//@ts-ignore
|
||||
vectorStoreMethods = {
|
||||
async upsert(nodeData: INodeData, options: ICommonObject): Promise<void> {
|
||||
const docs = nodeData.inputs?.document as Document[]
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const vectorDimension = nodeData.inputs?.vectorDimension as number
|
||||
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
|
||||
const expectedSimilarityMetric = ['cosine', 'euclidean', 'dot_product']
|
||||
if (similarityMetric && !expectedSimilarityMetric.includes(similarityMetric)) {
|
||||
throw new Error(`Invalid Similarity Metric should be one of 'cosine' | 'euclidean' | 'dot_product'`)
|
||||
}
|
||||
|
||||
const clientConfig = {
|
||||
token: credentialData?.applicationToken,
|
||||
endpoint: credentialData?.dbEndPoint
|
||||
}
|
||||
|
||||
const astraConfig: AstraLibArgs = {
|
||||
...clientConfig,
|
||||
collection: credentialData.collectionName ?? 'flowise_test',
|
||||
collectionOptions: {
|
||||
vector: {
|
||||
dimension: vectorDimension ?? 1536,
|
||||
metric: similarityMetric ?? 'cosine'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const flattenDocs = docs && docs.length ? flatten(docs) : []
|
||||
const finalDocs = []
|
||||
for (let i = 0; i < flattenDocs.length; i += 1) {
|
||||
if (flattenDocs[i] && flattenDocs[i].pageContent) {
|
||||
finalDocs.push(new Document(flattenDocs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
await AstraDBVectorStore.fromDocuments(finalDocs, embeddings, astraConfig)
|
||||
} catch (e) {
|
||||
throw new Error(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const docs = nodeData.inputs?.document as Document[]
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const vectorDimension = nodeData.inputs?.vectorDimension as number
|
||||
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
|
||||
const expectedSimilarityMetric = ['cosine', 'euclidean', 'dot_product']
|
||||
if (similarityMetric && !expectedSimilarityMetric.includes(similarityMetric)) {
|
||||
throw new Error(`Invalid Similarity Metric should be one of 'cosine' | 'euclidean' | 'dot_product'`)
|
||||
}
|
||||
|
||||
const clientConfig = {
|
||||
token: credentialData?.applicationToken,
|
||||
endpoint: credentialData?.dbEndPoint
|
||||
}
|
||||
|
||||
const astraConfig: AstraLibArgs = {
|
||||
...clientConfig,
|
||||
collection: credentialData.collectionName ?? 'flowise_test',
|
||||
collectionOptions: {
|
||||
vector: {
|
||||
dimension: vectorDimension ?? 1536,
|
||||
metric: similarityMetric ?? 'cosine'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const flattenDocs = docs && docs.length ? flatten(docs) : []
|
||||
const finalDocs = []
|
||||
for (let i = 0; i < flattenDocs.length; i += 1) {
|
||||
if (flattenDocs[i] && flattenDocs[i].pageContent) {
|
||||
finalDocs.push(new Document(flattenDocs[i]))
|
||||
}
|
||||
}
|
||||
|
||||
const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig)
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = vectorStore.asRetriever(k)
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: Astra_VectorStores }
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
<svg width="1200" height="1200" viewBox="0 0 1200 1200" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="1200" height="1200" fill="black"/>
|
||||
<g clip-path="url(#clip0_102_1968)">
|
||||
<path d="M508.819 464.97H267.001V737.697H508.819L569.566 690.526V512.14L508.819 464.97ZM313.864 512.14H522.703V690.575H313.864V512.14Z" fill="white"/>
|
||||
<path d="M917.531 514.121V468H696.425L636.389 514.121V577.447L696.425 623.568H889.124V688.545H648.348V734.667H875.409L935.444 688.545V623.568L875.409 577.447H682.709V514.121H917.531Z" fill="white"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_102_1968">
|
||||
<rect width="668.444" height="266.667" fill="white" transform="translate(267 468)"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 694 B |
|
|
@ -65,6 +65,14 @@ class Milvus_VectorStores implements INode {
|
|||
name: 'milvusCollection',
|
||||
type: 'string'
|
||||
},
|
||||
{
|
||||
label: 'Milvus Text Field',
|
||||
name: 'milvusTextField',
|
||||
type: 'string',
|
||||
placeholder: 'langchain_text',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Milvus Filter',
|
||||
name: 'milvusFilter',
|
||||
|
|
@ -150,6 +158,7 @@ class Milvus_VectorStores implements INode {
|
|||
const address = nodeData.inputs?.milvusServerUrl as string
|
||||
const collectionName = nodeData.inputs?.milvusCollection as string
|
||||
const milvusFilter = nodeData.inputs?.milvusFilter as string
|
||||
const textField = nodeData.inputs?.milvusTextField as string
|
||||
|
||||
// embeddings
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
|
|
@ -169,7 +178,8 @@ class Milvus_VectorStores implements INode {
|
|||
// init MilvusLibArgs
|
||||
const milVusArgs: MilvusLibArgs = {
|
||||
url: address,
|
||||
collectionName: collectionName
|
||||
collectionName: collectionName,
|
||||
textField: textField
|
||||
}
|
||||
|
||||
if (milvusUser) milVusArgs.username = milvusUser
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile } from 'langchain/vectorstores/vectara'
|
||||
import { VectaraStore, VectaraLibArgs, VectaraFilter, VectaraContextConfig, VectaraFile, MMRConfig } from 'langchain/vectorstores/vectara'
|
||||
import { Document } from 'langchain/document'
|
||||
import { Embeddings } from 'langchain/embeddings/base'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
|
|
@ -22,7 +22,7 @@ class Vectara_VectorStores implements INode {
|
|||
constructor() {
|
||||
this.label = 'Vectara'
|
||||
this.name = 'vectara'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Vectara'
|
||||
this.icon = 'vectara.png'
|
||||
this.category = 'Vector Stores'
|
||||
|
|
@ -82,7 +82,9 @@ class Vectara_VectorStores implements INode {
|
|||
label: 'Lambda',
|
||||
name: 'lambda',
|
||||
description:
|
||||
'Improves retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.',
|
||||
'Enable hybrid search to improve retrieval accuracy by adjusting the balance (from 0 to 1) between neural search and keyword-based search factors.' +
|
||||
'A value of 0.0 means that only neural search is used, while a value of 1.0 means that only keyword-based search is used. Defaults to 0.0 (neural only).',
|
||||
default: 0.0,
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
|
|
@ -90,8 +92,30 @@ class Vectara_VectorStores implements INode {
|
|||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Defaults to 4',
|
||||
placeholder: '4',
|
||||
description: 'Number of top results to fetch. Defaults to 5',
|
||||
placeholder: '5',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'MMR K',
|
||||
name: 'mmrK',
|
||||
description: 'Number of top results to fetch for MMR. Defaults to 50',
|
||||
placeholder: '50',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'MMR diversity bias',
|
||||
name: 'mmrDiversityBias',
|
||||
step: 0.1,
|
||||
description:
|
||||
'The diversity bias to use for MMR. This is a value between 0.0 and 1.0' +
|
||||
'Values closer to 1.0 optimize for the most diverse results.' +
|
||||
'Defaults to 0 (MMR disabled)',
|
||||
placeholder: '0.0',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
|
|
@ -191,7 +215,9 @@ class Vectara_VectorStores implements INode {
|
|||
const lambda = nodeData.inputs?.lambda as number
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
const k = topK ? parseFloat(topK) : 5
|
||||
const mmrK = nodeData.inputs?.mmrK as number
|
||||
const mmrDiversityBias = nodeData.inputs?.mmrDiversityBias as number
|
||||
|
||||
const vectaraArgs: VectaraLibArgs = {
|
||||
apiKey: apiKey,
|
||||
|
|
@ -208,6 +234,11 @@ class Vectara_VectorStores implements INode {
|
|||
if (sentencesBefore) vectaraContextConfig.sentencesBefore = sentencesBefore
|
||||
if (sentencesAfter) vectaraContextConfig.sentencesAfter = sentencesAfter
|
||||
vectaraFilter.contextConfig = vectaraContextConfig
|
||||
const mmrConfig: MMRConfig = {}
|
||||
mmrConfig.enabled = mmrDiversityBias > 0
|
||||
mmrConfig.mmrTopK = mmrK
|
||||
mmrConfig.diversityBias = mmrDiversityBias
|
||||
vectaraFilter.mmrConfig = mmrConfig
|
||||
|
||||
const vectorStore = new VectaraStore(vectaraArgs)
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
"@aws-sdk/client-bedrock-runtime": "3.422.0",
|
||||
"@aws-sdk/client-dynamodb": "^3.360.0",
|
||||
"@aws-sdk/client-s3": "^3.427.0",
|
||||
"@datastax/astra-db-ts": "^0.1.2",
|
||||
"@dqbd/tiktoken": "^1.0.7",
|
||||
"@elastic/elasticsearch": "^8.9.0",
|
||||
"@getzep/zep-js": "^0.9.0",
|
||||
|
|
@ -26,6 +27,7 @@
|
|||
"@gomomento/sdk-core": "^1.51.1",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@langchain/community": "^0.0.16",
|
||||
"@langchain/google-genai": "^0.0.6",
|
||||
"@langchain/mistralai": "^0.0.6",
|
||||
"@notionhq/client": "^2.2.8",
|
||||
|
|
@ -48,7 +50,7 @@
|
|||
"faiss-node": "^0.2.2",
|
||||
"fast-json-patch": "^3.1.1",
|
||||
"form-data": "^4.0.0",
|
||||
"google-auth-library": "^9.0.0",
|
||||
"google-auth-library": "^9.4.0",
|
||||
"graphql": "^16.6.0",
|
||||
"html-to-text": "^9.0.5",
|
||||
"husky": "^8.0.3",
|
||||
|
|
|
|||
|
|
@ -606,9 +606,18 @@ class ExceptionTool extends Tool {
|
|||
|
||||
export const formatAgentSteps = (steps: AgentStep[]): BaseMessage[] =>
|
||||
steps.flatMap(({ action, observation }) => {
|
||||
const create_function_message = (observation: string, action: AgentAction) => {
|
||||
let content: string
|
||||
if (typeof observation !== 'string') {
|
||||
content = JSON.stringify(observation)
|
||||
} else {
|
||||
content = observation
|
||||
}
|
||||
return new FunctionMessage(content, action.tool)
|
||||
}
|
||||
if ('messageLog' in action && action.messageLog !== undefined) {
|
||||
const log = action.messageLog as BaseMessage[]
|
||||
return log.concat(new FunctionMessage(observation, action.tool))
|
||||
return log.concat(create_function_message(observation, action))
|
||||
} else {
|
||||
return [new AIMessage(action.log)]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -350,12 +350,33 @@
|
|||
{
|
||||
"label": "Top K",
|
||||
"name": "topK",
|
||||
"description": "Number of top results to fetch. Defaults to 4",
|
||||
"placeholder": "4",
|
||||
"description": "Number of top results to fetch. Defaults to 5",
|
||||
"placeholder": "5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "vectara_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "MMR K",
|
||||
"name": "mmrK",
|
||||
"description": "The number of results to rerank if MMR is enabled.",
|
||||
"placeholder": "50",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "vectara_0-input-mmrK-number"
|
||||
},
|
||||
{
|
||||
"label": "MMR Diversity Bias",
|
||||
"name": "mmrDiversityBias",
|
||||
"step": 0.1,
|
||||
"description": "Diversity Bias parameter for MMR, if enabled. 0.0 means no diversiry bias, 1.0 means maximum diversity bias. Defaults to 0.0 (MMR disabled).",
|
||||
"placeholder": "0.0",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "vectara_0-input-mmrDiversityBias-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -374,7 +395,9 @@
|
|||
"sentencesBefore": "",
|
||||
"sentencesAfter": "",
|
||||
"lambda": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"mmrK": "",
|
||||
"mmrDiversityBias": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -361,7 +361,8 @@ export class App {
|
|||
const chatflow = await this.AppDataSource.getRepository(ChatFlow).findOneBy({
|
||||
id: req.params.id
|
||||
})
|
||||
if (chatflow && chatflow.chatbotConfig) {
|
||||
if (!chatflow) return res.status(404).send(`Chatflow ${req.params.id} not found`)
|
||||
if (chatflow.chatbotConfig) {
|
||||
try {
|
||||
const parsedConfig = JSON.parse(chatflow.chatbotConfig)
|
||||
return res.json(parsedConfig)
|
||||
|
|
@ -369,7 +370,7 @@ export class App {
|
|||
return res.status(500).send(`Error parsing Chatbot Config for Chatflow ${req.params.id}`)
|
||||
}
|
||||
}
|
||||
return res.status(404).send(`Chatbot Config for Chatflow ${req.params.id} not found`)
|
||||
return res.status(200).send('OK')
|
||||
})
|
||||
|
||||
// Save chatflow
|
||||
|
|
@ -521,7 +522,7 @@ export class App {
|
|||
res.status(404).send(`Chatflow ${chatflowid} not found`)
|
||||
return
|
||||
}
|
||||
const chatId = (req.query?.chatId as string) ?? (await getChatId(chatflowid))
|
||||
const chatId = req.query?.chatId as string
|
||||
const memoryType = req.query?.memoryType as string | undefined
|
||||
const sessionId = req.query?.sessionId as string | undefined
|
||||
const chatType = req.query?.chatType as string | undefined
|
||||
|
|
@ -545,7 +546,8 @@ export class App {
|
|||
return res.status(500).send('Error clearing chat messages')
|
||||
}
|
||||
|
||||
const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid, chatId }
|
||||
const deleteOptions: FindOptionsWhere<ChatMessage> = { chatflowid }
|
||||
if (chatId) deleteOptions.chatId = chatId
|
||||
if (memoryType) deleteOptions.memoryType = memoryType
|
||||
if (sessionId) deleteOptions.sessionId = sessionId
|
||||
if (chatType) deleteOptions.chatType = chatType
|
||||
|
|
@ -633,7 +635,7 @@ export class App {
|
|||
return res.json(result)
|
||||
})
|
||||
|
||||
// Delete all chatmessages from chatflowid
|
||||
// Delete all credentials from chatflowid
|
||||
this.app.delete('/api/v1/credentials/:id', async (req: Request, res: Response) => {
|
||||
const results = await this.AppDataSource.getRepository(Credential).delete({ id: req.params.id })
|
||||
return res.json(results)
|
||||
|
|
@ -1790,23 +1792,6 @@ export class App {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get first chat message id
|
||||
* @param {string} chatflowid
|
||||
* @returns {string}
|
||||
*/
|
||||
export async function getChatId(chatflowid: string): Promise<string> {
|
||||
// first chatmessage id as the unique chat id
|
||||
const firstChatMessage = await getDataSource()
|
||||
.getRepository(ChatMessage)
|
||||
.createQueryBuilder('cm')
|
||||
.select('cm.id')
|
||||
.where('chatflowid = :chatflowid', { chatflowid })
|
||||
.orderBy('cm.createdDate', 'ASC')
|
||||
.getOne()
|
||||
return firstChatMessage ? firstChatMessage.id : ''
|
||||
}
|
||||
|
||||
let serverApp: App | undefined
|
||||
|
||||
export async function getAllChatFlow(): Promise<IChatFlow[]> {
|
||||
|
|
|
|||
|
|
@ -547,7 +547,11 @@ export const getVariableValue = (
|
|||
variablePaths.forEach((path) => {
|
||||
const variableValue = variableDict[path]
|
||||
// Replace all occurrence
|
||||
if (typeof variableValue === 'object') {
|
||||
returnVal = returnVal.split(path).join(JSON.stringify(variableValue).replace(/"/g, '\\"'))
|
||||
} else {
|
||||
returnVal = returnVal.split(path).join(variableValue)
|
||||
}
|
||||
})
|
||||
return returnVal
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,8 +67,12 @@ const ExpandTextDialog = ({ show, dialogProps, onCancel, onConfirm }) => {
|
|||
|
||||
useEffect(() => {
|
||||
if (executeCustomFunctionNodeApi.data) {
|
||||
if (typeof executeCustomFunctionNodeApi.data === 'object') {
|
||||
setCodeExecutedResult(JSON.stringify(executeCustomFunctionNodeApi.data, null, 2))
|
||||
} else {
|
||||
setCodeExecutedResult(executeCustomFunctionNodeApi.data)
|
||||
}
|
||||
}
|
||||
}, [executeCustomFunctionNodeApi.data])
|
||||
|
||||
useEffect(() => {
|
||||
|
|
|
|||
Loading…
Reference in New Issue