diff --git a/package.json b/package.json
index 5a9bfcbf3..561694d0e 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise",
- "version": "1.4.9",
+ "version": "1.4.10",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [
@@ -48,7 +48,7 @@
"pretty-quick": "^3.1.3",
"rimraf": "^3.0.2",
"run-script-os": "^1.1.6",
- "turbo": "1.7.4",
+ "turbo": "^1.7.4",
"typescript": "^4.8.4"
},
"engines": {
diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts
index 546fa224c..9a4b8891b 100644
--- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts
+++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts
@@ -1,7 +1,9 @@
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
-import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
+import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { BaseCache } from 'langchain/schema'
-import { ChatGoogleGenerativeAI } from '@langchain/google-genai'
+import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai'
+import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai'
+import type { SafetySetting } from '@google/generative-ai'
class GoogleGenerativeAI_ChatModels implements INode {
label: string
@@ -74,6 +76,73 @@ class GoogleGenerativeAI_ChatModels implements INode {
step: 0.1,
optional: true,
additionalParams: true
+ },
+ {
+ label: 'Top Next Highest Probability Tokens',
+ name: 'topK',
+ type: 'number',
+ description: `Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive`,
+ step: 1,
+ optional: true,
+ additionalParams: true
+ },
+ {
+ label: 'Harm Category',
+ name: 'harmCategory',
+ type: 'multiOptions',
+ description:
+ 'Refer to official guide on how to use Harm Category',
+ options: [
+ {
+ label: 'Dangerous',
+ name: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT
+ },
+ {
+ label: 'Harassment',
+ name: HarmCategory.HARM_CATEGORY_HARASSMENT
+ },
+ {
+ label: 'Hate Speech',
+ name: HarmCategory.HARM_CATEGORY_HATE_SPEECH
+ },
+ {
+ label: 'Sexually Explicit',
+ name: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
+ }
+ ],
+ optional: true,
+ additionalParams: true
+ },
+ {
+ label: 'Harm Block Threshold',
+ name: 'harmBlockThreshold',
+ type: 'multiOptions',
+ description:
+ 'Refer to official guide on how to use Harm Block Threshold',
+ options: [
+ {
+ label: 'Low and Above',
+ name: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
+ },
+ {
+ label: 'Medium and Above',
+ name: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
+ },
+ {
+ label: 'None',
+ name: HarmBlockThreshold.BLOCK_NONE
+ },
+ {
+ label: 'Only High',
+ name: HarmBlockThreshold.BLOCK_ONLY_HIGH
+ },
+ {
+ label: 'Threshold Unspecified',
+ name: HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED
+ }
+ ],
+ optional: true,
+ additionalParams: true
}
]
}
@@ -86,9 +155,12 @@ class GoogleGenerativeAI_ChatModels implements INode {
const modelName = nodeData.inputs?.modelName as string
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
const topP = nodeData.inputs?.topP as string
+ const topK = nodeData.inputs?.topK as string
+ const harmCategory = nodeData.inputs?.harmCategory as string
+ const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
const cache = nodeData.inputs?.cache as BaseCache
- const obj = {
+ const obj: Partial = {
apiKey: apiKey,
modelName: modelName,
maxOutputTokens: 2048
@@ -98,8 +170,23 @@ class GoogleGenerativeAI_ChatModels implements INode {
const model = new ChatGoogleGenerativeAI(obj)
if (topP) model.topP = parseFloat(topP)
+ if (topK) model.topK = parseFloat(topK)
if (cache) model.cache = cache
if (temperature) model.temperature = parseFloat(temperature)
+
+ // Safety Settings
+ let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
+ let harmBlockThresholds: string[] = convertMultiOptionsToStringArray(harmBlockThreshold)
+ if (harmCategories.length != harmBlockThresholds.length)
+ throw new Error(`Harm Category & Harm Block Threshold are not the same length`)
+ const safetySettings: SafetySetting[] = harmCategories.map((harmCategory, index) => {
+ return {
+ category: harmCategory as HarmCategory,
+ threshold: harmBlockThresholds[index] as HarmBlockThreshold
+ }
+ })
+ if (safetySettings.length > 0) model.safetySettings = safetySettings
+
return model
}
}
diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg b/packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg
new file mode 100644
index 000000000..88bcabe34
--- /dev/null
+++ b/packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts
index ca89ca771..442fdc7a6 100644
--- a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts
+++ b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts
@@ -1,7 +1,7 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseRetriever } from 'langchain/schema/retriever'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
-import { getCredentialData, getCredentialParam } from '../../../src'
+import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src'
import { CohereRerank } from './CohereRerank'
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
@@ -15,16 +15,16 @@ class CohereRerankRetriever_Retrievers implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
- outputs: INodeOutputsValue[]
credential: INodeParams
badge: string
+ outputs: INodeOutputsValue[]
constructor() {
this.label = 'Cohere Rerank Retriever'
this.name = 'cohereRerankRetriever'
this.version = 1.0
this.type = 'Cohere Rerank Retriever'
- this.icon = 'compressionRetriever.svg'
+ this.icon = 'Cohere.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description = 'Cohere Rerank indexes the documents from most to least semantically relevant to the query.'
@@ -37,7 +37,7 @@ class CohereRerankRetriever_Retrievers implements INode {
}
this.inputs = [
{
- label: 'Base Retriever',
+ label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
@@ -58,47 +58,84 @@ class CohereRerankRetriever_Retrievers implements INode {
default: 'rerank-english-v2.0',
optional: true
},
+ {
+ label: 'Query',
+ name: 'query',
+ type: 'string',
+ description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
+ optional: true,
+ acceptVariable: true
+ },
{
label: 'Top K',
name: 'topK',
description: 'Number of top results to fetch. Default to the TopK of the Base Retriever',
- placeholder: '0',
+ placeholder: '4',
type: 'number',
- default: 0,
additionalParams: true,
optional: true
},
{
- label: 'Max Chunks Per Document',
+ label: 'Max Chunks Per Doc',
name: 'maxChunksPerDoc',
+ description: 'The maximum number of chunks to produce internally from a document. Default to 10',
placeholder: '10',
type: 'number',
- default: 10,
additionalParams: true,
optional: true
}
]
+ this.outputs = [
+ {
+ label: 'Cohere Rerank Retriever',
+ name: 'retriever',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: ['Document']
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
}
- async init(nodeData: INodeData, _: string, options: ICommonObject): Promise {
+ async init(nodeData: INodeData, input: string, options: ICommonObject): Promise {
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const model = nodeData.inputs?.model as string
+ const query = nodeData.inputs?.query as string
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
const topK = nodeData.inputs?.topK as string
- let k = topK ? parseFloat(topK) : 4
- const maxChunks = nodeData.inputs?.maxChunksPerDoc as string
- let max = maxChunks ? parseInt(maxChunks) : 10
+ const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4
+ const maxChunksPerDoc = nodeData.inputs?.maxChunksPerDoc as string
+ const max_chunks_per_doc = maxChunksPerDoc ? parseFloat(maxChunksPerDoc) : 10
+ const output = nodeData.outputs?.output as string
- if (k <= 0) {
- k = (baseRetriever as VectorStoreRetriever).k
- }
+ const cohereCompressor = new CohereRerank(cohereApiKey, model, k, max_chunks_per_doc)
- const cohereCompressor = new CohereRerank(cohereApiKey, model, k, max)
- return new ContextualCompressionRetriever({
+ const retriever = new ContextualCompressionRetriever({
baseCompressor: cohereCompressor,
baseRetriever: baseRetriever
})
+
+ if (output === 'retriever') return retriever
+ else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
+ else if (output === 'text') {
+ let finaltext = ''
+
+ const docs = await retriever.getRelevantDocuments(query ? query : input)
+
+ for (const doc of docs) finaltext += `${doc.pageContent}\n`
+
+ return handleEscapeCharacters(finaltext, false)
+ }
+
+ return retriever
}
}
diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg b/packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg
deleted file mode 100644
index 23c52d25e..000000000
--- a/packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg
+++ /dev/null
@@ -1,7 +0,0 @@
-
\ No newline at end of file
diff --git a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts
index d373704c0..d1049fa49 100644
--- a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts
+++ b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts
@@ -3,6 +3,7 @@ import { BaseRetriever } from 'langchain/schema/retriever'
import { Embeddings } from 'langchain/embeddings/base'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { EmbeddingsFilter } from 'langchain/retrievers/document_compressors/embeddings_filter'
+import { handleEscapeCharacters } from '../../../src/utils'
class EmbeddingsFilterRetriever_Retrievers implements INode {
label: string
@@ -29,15 +30,22 @@ class EmbeddingsFilterRetriever_Retrievers implements INode {
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [
{
- label: 'Base Retriever',
+ label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
{
label: 'Embeddings',
name: 'embeddings',
- type: 'Embeddings',
- optional: false
+ type: 'Embeddings'
+ },
+ {
+ label: 'Query',
+ name: 'query',
+ type: 'string',
+ description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
+ optional: true,
+ acceptVariable: true
},
{
label: 'Similarity Threshold',
@@ -61,36 +69,64 @@ class EmbeddingsFilterRetriever_Retrievers implements INode {
additionalParams: true
}
]
+ this.outputs = [
+ {
+ label: 'Embeddings Filter Retriever',
+ name: 'retriever',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: ['Document']
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
}
- async init(nodeData: INodeData): Promise {
+ async init(nodeData: INodeData, input: string): Promise {
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const embeddings = nodeData.inputs?.embeddings as Embeddings
+ const query = nodeData.inputs?.query as string
const similarityThreshold = nodeData.inputs?.similarityThreshold as string
const k = nodeData.inputs?.k as string
+ const output = nodeData.outputs?.output as string
if (k === undefined && similarityThreshold === undefined) {
throw new Error(`Must specify one of "k" or "similarity_threshold".`)
}
- let similarityThresholdNumber = 0.8
- if (similarityThreshold) {
- similarityThresholdNumber = parseFloat(similarityThreshold)
- }
- let kNumber = 0.8
- if (k) {
- kNumber = parseFloat(k)
- }
+ const similarityThresholdNumber = similarityThreshold ? parseFloat(similarityThreshold) : 0.8
+ const kNumber = k ? parseFloat(k) : undefined
+
const baseCompressor = new EmbeddingsFilter({
embeddings: embeddings,
similarityThreshold: similarityThresholdNumber,
k: kNumber
})
- return new ContextualCompressionRetriever({
+ const retriever = new ContextualCompressionRetriever({
baseCompressor,
baseRetriever: baseRetriever
})
+
+ if (output === 'retriever') return retriever
+ else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
+ else if (output === 'text') {
+ let finaltext = ''
+
+ const docs = await retriever.getRelevantDocuments(query ? query : input)
+
+ for (const doc of docs) finaltext += `${doc.pageContent}\n`
+
+ return handleEscapeCharacters(finaltext, false)
+ }
+
+ return retriever
}
}
diff --git a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts
index 10d9a6e7a..10fff7646 100644
--- a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts
+++ b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts
@@ -1,8 +1,9 @@
import { VectorStore } from 'langchain/vectorstores/base'
-import { INode, INodeData, INodeParams } from '../../../src/Interface'
+import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { HydeRetriever, HydeRetrieverOptions, PromptKey } from 'langchain/retrievers/hyde'
import { BaseLanguageModel } from 'langchain/base_language'
import { PromptTemplate } from 'langchain/prompts'
+import { handleEscapeCharacters } from '../../../src/utils'
class HydeRetriever_Retrievers implements INode {
label: string
@@ -14,11 +15,12 @@ class HydeRetriever_Retrievers implements INode {
category: string
baseClasses: string[]
inputs: INodeParams[]
+ outputs: INodeOutputsValue[]
constructor() {
- this.label = 'Hyde Retriever'
+ this.label = 'HyDE Retriever'
this.name = 'HydeRetriever'
- this.version = 2.0
+ this.version = 3.0
this.type = 'HydeRetriever'
this.icon = 'hyderetriever.svg'
this.category = 'Retrievers'
@@ -35,6 +37,14 @@ class HydeRetriever_Retrievers implements INode {
name: 'vectorStore',
type: 'VectorStore'
},
+ {
+ label: 'Query',
+ name: 'query',
+ type: 'string',
+ description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
+ optional: true,
+ acceptVariable: true
+ },
{
label: 'Select Defined Prompt',
name: 'promptKey',
@@ -121,15 +131,34 @@ Passage:`
optional: true
}
]
+ this.outputs = [
+ {
+ label: 'HyDE Retriever',
+ name: 'retriever',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: ['Document']
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
}
- async init(nodeData: INodeData): Promise {
+ async init(nodeData: INodeData, input: string): Promise {
const llm = nodeData.inputs?.model as BaseLanguageModel
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
const promptKey = nodeData.inputs?.promptKey as PromptKey
const customPrompt = nodeData.inputs?.customPrompt as string
+ const query = nodeData.inputs?.query as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
+ const output = nodeData.outputs?.output as string
const obj: HydeRetrieverOptions = {
llm,
@@ -141,6 +170,19 @@ Passage:`
else if (promptKey) obj.promptTemplate = promptKey
const retriever = new HydeRetriever(obj)
+
+ if (output === 'retriever') return retriever
+ else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
+ else if (output === 'text') {
+ let finaltext = ''
+
+ const docs = await retriever.getRelevantDocuments(query ? query : input)
+
+ for (const doc of docs) finaltext += `${doc.pageContent}\n`
+
+ return handleEscapeCharacters(finaltext, false)
+ }
+
return retriever
}
}
diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts
index e044468f4..6b710cf30 100644
--- a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts
+++ b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts
@@ -3,6 +3,7 @@ import { BaseRetriever } from 'langchain/schema/retriever'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { BaseLanguageModel } from 'langchain/base_language'
import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract'
+import { handleEscapeCharacters } from '../../../src/utils'
class LLMFilterCompressionRetriever_Retrievers implements INode {
label: string
@@ -22,7 +23,7 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
this.name = 'llmFilterRetriever'
this.version = 1.0
this.type = 'LLMFilterRetriever'
- this.icon = 'compressionRetriever.svg'
+ this.icon = 'llmFilterRetriever.svg'
this.category = 'Retrievers'
this.badge = 'NEW'
this.description =
@@ -30,30 +31,69 @@ class LLMFilterCompressionRetriever_Retrievers implements INode {
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [
{
- label: 'Base Retriever',
+ label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
{
label: 'Language Model',
name: 'model',
- type: 'BaseLanguageModel',
- optional: true
+ type: 'BaseLanguageModel'
+ },
+ {
+ label: 'Query',
+ name: 'query',
+ type: 'string',
+ description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
+ optional: true,
+ acceptVariable: true
+ }
+ ]
+ this.outputs = [
+ {
+ label: 'LLM Filter Retriever',
+ name: 'retriever',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: ['Document']
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
}
]
}
- async init(nodeData: INodeData): Promise {
+ async init(nodeData: INodeData, input: string): Promise {
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
const model = nodeData.inputs?.model as BaseLanguageModel
+ const query = nodeData.inputs?.query as string
+ const output = nodeData.outputs?.output as string
- if (model) {
- return new ContextualCompressionRetriever({
- baseCompressor: LLMChainExtractor.fromLLM(model),
- baseRetriever: baseRetriever
- })
+ if (!model) throw new Error('There must be a LLM model connected to LLM Filter Retriever')
+
+ const retriever = new ContextualCompressionRetriever({
+ baseCompressor: LLMChainExtractor.fromLLM(model),
+ baseRetriever: baseRetriever
+ })
+
+ if (output === 'retriever') return retriever
+ else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
+ else if (output === 'text') {
+ let finaltext = ''
+
+ const docs = await retriever.getRelevantDocuments(query ? query : input)
+
+ for (const doc of docs) finaltext += `${doc.pageContent}\n`
+
+ return handleEscapeCharacters(finaltext, false)
}
- return {}
+
+ return retriever
}
}
diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg b/packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg
deleted file mode 100644
index 23c52d25e..000000000
--- a/packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg
+++ /dev/null
@@ -1,7 +0,0 @@
-
\ No newline at end of file
diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg b/packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg
new file mode 100644
index 000000000..d3f4d15f4
--- /dev/null
+++ b/packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts
index 3229b3a8f..ed15ed243 100644
--- a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts
+++ b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts
@@ -1,9 +1,10 @@
-import { INode, INodeData, INodeParams } from '../../../src/Interface'
+import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { BaseLanguageModel } from 'langchain/base_language'
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
import { BaseRetriever } from 'langchain/schema/retriever'
import { ReciprocalRankFusion } from './ReciprocalRankFusion'
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
+import { handleEscapeCharacters } from '../../../src/utils'
class RRFRetriever_Retrievers implements INode {
label: string
@@ -16,20 +17,21 @@ class RRFRetriever_Retrievers implements INode {
baseClasses: string[]
inputs: INodeParams[]
badge: string
+ outputs: INodeOutputsValue[]
constructor() {
this.label = 'Reciprocal Rank Fusion Retriever'
this.name = 'RRFRetriever'
- this.version = 2.0
+ this.version = 1.0
this.type = 'RRFRetriever'
this.badge = 'NEW'
- this.icon = 'compressionRetriever.svg'
+ this.icon = 'rrfRetriever.svg'
this.category = 'Retrievers'
this.description = 'Reciprocal Rank Fusion to re-rank search results by multiple query generation.'
this.baseClasses = [this.type, 'BaseRetriever']
this.inputs = [
{
- label: 'Base Retriever',
+ label: 'Vector Store Retriever',
name: 'baseRetriever',
type: 'VectorStoreRetriever'
},
@@ -38,6 +40,14 @@ class RRFRetriever_Retrievers implements INode {
name: 'model',
type: 'BaseLanguageModel'
},
+ {
+ label: 'Query',
+ name: 'query',
+ type: 'string',
+ description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
+ optional: true,
+ acceptVariable: true
+ },
{
label: 'Query Count',
name: 'queryCount',
@@ -54,7 +64,6 @@ class RRFRetriever_Retrievers implements INode {
description: 'Number of top results to fetch. Default to the TopK of the Base Retriever',
placeholder: '0',
type: 'number',
- default: 0,
additionalParams: true,
optional: true
},
@@ -71,27 +80,56 @@ class RRFRetriever_Retrievers implements INode {
optional: true
}
]
+ this.outputs = [
+ {
+ label: 'Reciprocal Rank Fusion Retriever',
+ name: 'retriever',
+ baseClasses: this.baseClasses
+ },
+ {
+ label: 'Document',
+ name: 'document',
+ baseClasses: ['Document']
+ },
+ {
+ label: 'Text',
+ name: 'text',
+ baseClasses: ['string', 'json']
+ }
+ ]
}
- async init(nodeData: INodeData): Promise {
+ async init(nodeData: INodeData, input: string): Promise {
const llm = nodeData.inputs?.model as BaseLanguageModel
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
+ const query = nodeData.inputs?.query as string
const queryCount = nodeData.inputs?.queryCount as string
const q = queryCount ? parseFloat(queryCount) : 4
const topK = nodeData.inputs?.topK as string
- let k = topK ? parseFloat(topK) : 4
+ const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4
const constantC = nodeData.inputs?.c as string
- let c = topK ? parseFloat(constantC) : 60
-
- if (k <= 0) {
- k = (baseRetriever as VectorStoreRetriever).k
- }
+ const c = topK ? parseFloat(constantC) : 60
+ const output = nodeData.outputs?.output as string
const ragFusion = new ReciprocalRankFusion(llm, baseRetriever as VectorStoreRetriever, q, k, c)
- return new ContextualCompressionRetriever({
+ const retriever = new ContextualCompressionRetriever({
baseCompressor: ragFusion,
baseRetriever: baseRetriever
})
+
+ if (output === 'retriever') return retriever
+ else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
+ else if (output === 'text') {
+ let finaltext = ''
+
+ const docs = await retriever.getRelevantDocuments(query ? query : input)
+
+ for (const doc of docs) finaltext += `${doc.pageContent}\n`
+
+ return handleEscapeCharacters(finaltext, false)
+ }
+
+ return retriever
}
}
diff --git a/packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg b/packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg
deleted file mode 100644
index 23c52d25e..000000000
--- a/packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg
+++ /dev/null
@@ -1,7 +0,0 @@
-
\ No newline at end of file
diff --git a/packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg b/packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg
new file mode 100644
index 000000000..56fbcc5a1
--- /dev/null
+++ b/packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts
index a9f4b3d87..5f5a9ed0d 100644
--- a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts
+++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts
@@ -18,7 +18,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
constructor() {
this.label = 'Similarity Score Threshold Retriever'
this.name = 'similarityThresholdRetriever'
- this.version = 1.0
+ this.version = 2.0
this.type = 'SimilarityThresholdRetriever'
this.icon = 'similaritythreshold.svg'
this.category = 'Retrievers'
@@ -30,6 +30,14 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
name: 'vectorStore',
type: 'VectorStore'
},
+ {
+ label: 'Query',
+ name: 'query',
+ type: 'string',
+ description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
+ optional: true,
+ acceptVariable: true
+ },
{
label: 'Minimum Similarity Score (%)',
name: 'minSimilarityScore',
@@ -44,7 +52,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
description: `The maximum number of results to fetch`,
type: 'number',
default: 20,
- step: 1
+ step: 1,
+ additionalParams: true
},
{
label: 'K Increment',
@@ -52,7 +61,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
description: `How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.`,
type: 'number',
default: 2,
- step: 1
+ step: 1,
+ additionalParams: true
}
]
this.outputs = [
@@ -77,6 +87,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
async init(nodeData: INodeData, input: string): Promise {
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
const minSimilarityScore = nodeData.inputs?.minSimilarityScore as number
+ const query = nodeData.inputs?.query as string
const maxK = nodeData.inputs?.maxK as string
const kIncrement = nodeData.inputs?.kIncrement as string
@@ -89,11 +100,11 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
})
if (output === 'retriever') return retriever
- else if (output === 'document') return await retriever.getRelevantDocuments(input)
+ else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
else if (output === 'text') {
let finaltext = ''
- const docs = await retriever.getRelevantDocuments(input)
+ const docs = await retriever.getRelevantDocuments(query ? query : input)
for (const doc of docs) finaltext += `${doc.pageContent}\n`
diff --git a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts
index 37511e476..749c3a862 100644
--- a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts
+++ b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts
@@ -73,7 +73,11 @@ class CustomFunction_Utilities implements INode {
if (Object.keys(inputVars).length) {
for (const item in inputVars) {
- sandbox[`$${item}`] = inputVars[item]
+ let value = inputVars[item]
+ if (typeof value === 'string') {
+ value = handleEscapeCharacters(value, true)
+ }
+ sandbox[`$${item}`] = value
}
}
diff --git a/packages/components/nodes/vectorstores/Astra/Astra.ts b/packages/components/nodes/vectorstores/Astra/Astra.ts
index 865f10446..edaadc9c0 100644
--- a/packages/components/nodes/vectorstores/Astra/Astra.ts
+++ b/packages/components/nodes/vectorstores/Astra/Astra.ts
@@ -4,6 +4,7 @@ import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData } from '../../../src/utils'
import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb'
+import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class Astra_VectorStores implements INode {
label: string
@@ -26,7 +27,7 @@ class Astra_VectorStores implements INode {
this.type = 'Astra'
this.icon = 'astra.svg'
this.category = 'Vector Stores'
- this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads`
+ this.description = `Upsert embedded data and perform similarity or mmr search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads`
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
this.badge = 'NEW'
this.credential = {
@@ -74,6 +75,7 @@ class Astra_VectorStores implements INode {
optional: true
}
]
+ addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'Astra Retriever',
@@ -139,9 +141,6 @@ class Astra_VectorStores implements INode {
const embeddings = nodeData.inputs?.embeddings as Embeddings
const vectorDimension = nodeData.inputs?.vectorDimension as number
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
- const output = nodeData.outputs?.output as string
- const topK = nodeData.inputs?.topK as string
- const k = topK ? parseFloat(topK) : 4
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
@@ -176,14 +175,7 @@ class Astra_VectorStores implements INode {
const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig)
- if (output === 'retriever') {
- const retriever = vectorStore.asRetriever(k)
- return retriever
- } else if (output === 'vectorStore') {
- ;(vectorStore as any).k = k
- return vectorStore
- }
- return vectorStore
+ return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}
diff --git a/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts b/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts
index 9bc23f104..6ba7199f0 100644
--- a/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts
+++ b/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts
@@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
+import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
class MongoDBAtlas_VectorStores implements INode {
label: string
@@ -24,7 +25,7 @@ class MongoDBAtlas_VectorStores implements INode {
this.label = 'MongoDB Atlas'
this.name = 'mongoDBAtlas'
this.version = 1.0
- this.description = `Upsert embedded data and perform similarity search upon query using MongoDB Atlas, a managed cloud mongodb database`
+ this.description = `Upsert embedded data and perform similarity or mmr search upon query using MongoDB Atlas, a managed cloud mongodb database`
this.type = 'MongoDB Atlas'
this.icon = 'mongodb.svg'
this.category = 'Vector Stores'
@@ -95,6 +96,7 @@ class MongoDBAtlas_VectorStores implements INode {
optional: true
}
]
+ addMMRInputParams(this.inputs)
this.outputs = [
{
label: 'MongoDB Retriever',
@@ -162,9 +164,6 @@ class MongoDBAtlas_VectorStores implements INode {
let textKey = nodeData.inputs?.textKey as string
let embeddingKey = nodeData.inputs?.embeddingKey as string
const embeddings = nodeData.inputs?.embeddings as Embeddings
- const topK = nodeData.inputs?.topK as string
- const k = topK ? parseFloat(topK) : 4
- const output = nodeData.outputs?.output as string
let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
@@ -181,13 +180,7 @@ class MongoDBAtlas_VectorStores implements INode {
embeddingKey
})
- if (output === 'retriever') {
- return vectorStore.asRetriever(k)
- } else if (output === 'vectorStore') {
- ;(vectorStore as any).k = k
- return vectorStore
- }
- return vectorStore
+ return resolveVectorStoreOrRetriever(nodeData, vectorStore)
}
}
diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts
index 4b91a9b54..6623b1a26 100644
--- a/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts
+++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts
@@ -24,7 +24,7 @@ class Pinecone_VectorStores implements INode {
constructor() {
this.label = 'Pinecone'
this.name = 'pinecone'
- this.version = 3.0
+ this.version = 2.0
this.type = 'Pinecone'
this.icon = 'pinecone.svg'
this.category = 'Vector Stores'
diff --git a/packages/components/package.json b/packages/components/package.json
index c90ea5ccd..894014d42 100644
--- a/packages/components/package.json
+++ b/packages/components/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise-components",
- "version": "1.5.0",
+ "version": "1.5.1",
"description": "Flowiseai Components",
"main": "dist/src/index",
"types": "dist/src/index.d.ts",
@@ -26,6 +26,7 @@
"@gomomento/sdk": "^1.51.1",
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1",
+ "@google/generative-ai": "^0.1.3",
"@huggingface/inference": "^2.6.1",
"@langchain/community": "^0.0.16",
"@langchain/google-genai": "^0.0.6",
@@ -37,7 +38,7 @@
"@supabase/supabase-js": "^2.29.0",
"@types/js-yaml": "^4.0.5",
"@types/jsdom": "^21.1.1",
- "@upstash/redis": "^1.22.1",
+ "@upstash/redis": "1.22.1",
"@zilliz/milvus2-sdk-node": "^2.2.24",
"apify-client": "^2.7.1",
"axios": "1.6.2",
diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts
index 22fa6f4a9..2215eb418 100644
--- a/packages/components/src/utils.ts
+++ b/packages/components/src/utils.ts
@@ -673,3 +673,18 @@ export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[
}
return formatmessages
}
+
+/**
+ * Convert MultiOptions String to String Array
+ * @param {string} inputString
+ * @returns {string[]}
+ */
+export const convertMultiOptionsToStringArray = (inputString: string): string[] => {
+ let ArrayString: string[] = []
+ try {
+ ArrayString = JSON.parse(inputString)
+ } catch (e) {
+ ArrayString = []
+ }
+ return ArrayString
+}
diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json
index 150fe17eb..0062cd43f 100644
--- a/packages/server/marketplaces/chatflows/AutoGPT.json
+++ b/packages/server/marketplaces/chatflows/AutoGPT.json
@@ -511,7 +511,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -552,6 +552,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -576,7 +615,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json
index ab387205e..81e3f2307 100644
--- a/packages/server/marketplaces/chatflows/BabyAGI.json
+++ b/packages/server/marketplaces/chatflows/BabyAGI.json
@@ -166,7 +166,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -207,6 +207,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -231,7 +270,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json
index 0e9e41bdd..4378a47d6 100644
--- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json
+++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json
@@ -301,7 +301,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -342,6 +342,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -366,7 +405,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json
index e2fd64210..253a1dfc7 100644
--- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json
+++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json
@@ -541,7 +541,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -582,6 +582,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -606,7 +645,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json
index abd85d366..f7b2fbfb1 100644
--- a/packages/server/marketplaces/chatflows/Metadata Filter.json
+++ b/packages/server/marketplaces/chatflows/Metadata Filter.json
@@ -625,7 +625,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -666,6 +666,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -690,7 +729,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "{\"id\":{\"$in\":[\"doc1\",\"doc2\"]}}",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json
index 5388d9657..e86b28c93 100644
--- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json
+++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json
@@ -560,7 +560,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -601,6 +601,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -625,7 +664,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
@@ -840,6 +882,45 @@
"additionalParams": true,
"optional": true,
"id": "supabase_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -865,7 +946,10 @@
"tableName": "",
"queryName": "",
"supabaseMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json
index 1b1d8de66..df05feef4 100644
--- a/packages/server/marketplaces/chatflows/WebPage QnA.json
+++ b/packages/server/marketplaces/chatflows/WebPage QnA.json
@@ -643,7 +643,7 @@
"type": "Pinecone",
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
"category": "Vector Stores",
- "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
+ "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
"inputParams": [
{
"label": "Connect Credential",
@@ -684,6 +684,45 @@
"additionalParams": true,
"optional": true,
"id": "pinecone_0-input-topK-number"
+ },
+ {
+ "label": "Search Type",
+ "name": "searchType",
+ "type": "options",
+ "default": "similarity",
+ "options": [
+ {
+ "label": "Similarity",
+ "name": "similarity"
+ },
+ {
+ "label": "Max Marginal Relevance",
+ "name": "mmr"
+ }
+ ],
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-searchType-options"
+ },
+ {
+ "label": "Fetch K (for MMR Search)",
+ "name": "fetchK",
+ "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
+ "placeholder": "20",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-fetchK-number"
+ },
+ {
+ "label": "Lambda (for MMR Search)",
+ "name": "lambda",
+ "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
+ "placeholder": "0.5",
+ "type": "number",
+ "additionalParams": true,
+ "optional": true,
+ "id": "pinecone_0-input-lambda-number"
}
],
"inputAnchors": [
@@ -708,7 +747,10 @@
"pineconeIndex": "",
"pineconeNamespace": "",
"pineconeMetadataFilter": "",
- "topK": ""
+ "topK": "",
+ "searchType": "similarity",
+ "fetchK": "",
+ "lambda": ""
},
"outputAnchors": [
{
diff --git a/packages/server/package.json b/packages/server/package.json
index f1c0b7f79..79ff49616 100644
--- a/packages/server/package.json
+++ b/packages/server/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise",
- "version": "1.4.9",
+ "version": "1.4.10",
"description": "Flowiseai Server",
"main": "dist/index",
"types": "dist/index.d.ts",
diff --git a/packages/ui/package.json b/packages/ui/package.json
index c5549b23c..fef08851c 100644
--- a/packages/ui/package.json
+++ b/packages/ui/package.json
@@ -1,6 +1,6 @@
{
"name": "flowise-ui",
- "version": "1.4.6",
+ "version": "1.4.7",
"license": "SEE LICENSE IN LICENSE.md",
"homepage": "https://flowiseai.com",
"author": {