From c15250f28be3995ac3147c1a57e5b2811aae4d2e Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sat, 13 Jan 2024 16:18:35 +0800 Subject: [PATCH 01/13] add TopK and safetySettings --- .../ChatGoogleGenerativeAI.ts | 87 ++++++++++++++++++- packages/components/src/utils.ts | 17 ++++ 2 files changed, 103 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index 546fa224c..311b4e8db 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -1,7 +1,8 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { convertStringToArrayString, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { BaseCache } from 'langchain/schema' import { ChatGoogleGenerativeAI } from '@langchain/google-genai' +import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai' class GoogleGenerativeAI_ChatModels implements INode { label: string @@ -74,6 +75,72 @@ class GoogleGenerativeAI_ChatModels implements INode { step: 0.1, optional: true, additionalParams: true + }, + { + label: 'topK', + name: 'topK', + type: 'number', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Harm Category', + name: 'harmCategory', + type: 'multiOptions', + description: + 'Refer to official guide on how to use Harm Category', + options: [ + { + label: 'Dangerous', + name: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT + }, + { + label: 'Harassment', + name: HarmCategory.HARM_CATEGORY_HARASSMENT + }, + { + label: 'Hate Speech', + name: HarmCategory.HARM_CATEGORY_HATE_SPEECH + }, + { + label: 'Sexually Explicit', + name: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT + } + ], + optional: true, + additionalParams: true + }, + { + label: 'Harm Block Threshold', + name: 'harmBlockThreshold', + type: 'multiOptions', + description: + 'Refer to official guide on how to use Harm Block Threshold', + options: [ + { + label: 'Low and Above', + name: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + label: 'Medium and Above', + name: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE + }, + { + label: 'None', + name: HarmBlockThreshold.BLOCK_NONE + }, + { + label: 'Only High', + name: HarmBlockThreshold.BLOCK_ONLY_HIGH + }, + { + label: 'Threshold Unspecified', + name: HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED + } + ], + optional: true, + additionalParams: true } ] } @@ -86,6 +153,9 @@ class GoogleGenerativeAI_ChatModels implements INode { const modelName = nodeData.inputs?.modelName as string const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string const topP = nodeData.inputs?.topP as string + const topK = nodeData.inputs?.topK as string + const harmCategory = nodeData.inputs?.harmCategory as string + const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string const cache = nodeData.inputs?.cache as BaseCache const obj = { @@ -98,8 +168,23 @@ class GoogleGenerativeAI_ChatModels implements INode { const model = new ChatGoogleGenerativeAI(obj) if (topP) model.topP = parseFloat(topP) + if (topK) model.topP = parseFloat(topK) if (cache) model.cache = cache if (temperature) model.temperature = parseFloat(temperature) + + // safetySettings + let harmCategories: string[] = convertStringToArrayString(harmCategory) + let harmBlockThresholds: string[] = convertStringToArrayString(harmBlockThreshold) + if (harmCategories.length != harmBlockThresholds.length) + throw new Error(`Harm Category & Harm Block Threshold are not the same length`) + const safetySettings = harmCategories.map((value, index) => { + return { + category: value, + threshold: harmBlockThresholds[index] + } + }) + if (safetySettings) model.safetySettings = safetySettings + return model } } diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 22fa6f4a9..88c553cfc 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -673,3 +673,20 @@ export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[ } return formatmessages } + +/** + * Convert String to Array String + * @param {string} inputString + * @returns {string[]} + */ +export const convertStringToArrayString = (inputString: string): string[] => { + let ArrayString: string[] = [] + if (inputString) { + try { + ArrayString = JSON.parse(inputString) + } catch (e) { + ArrayString = [] + } + } + return ArrayString +} From bdc892df2b36917df5586742c6eaa2fb1528e9fd Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sun, 14 Jan 2024 00:51:28 +0800 Subject: [PATCH 02/13] fix error TS2322: Type '{ category: string; threshold: string; }[]' is not assignable to type 'SafetySetting[] --- .../ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index 311b4e8db..ddae6780b 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -177,13 +177,13 @@ class GoogleGenerativeAI_ChatModels implements INode { let harmBlockThresholds: string[] = convertStringToArrayString(harmBlockThreshold) if (harmCategories.length != harmBlockThresholds.length) throw new Error(`Harm Category & Harm Block Threshold are not the same length`) - const safetySettings = harmCategories.map((value, index) => { + const safetySettings: typeof model.safetySettings = harmCategories.map((value, index) => { return { category: value, threshold: harmBlockThresholds[index] } }) - if (safetySettings) model.safetySettings = safetySettings + if (safetySettings.length > 0) model.safetySettings = safetySettings return model } From 203b182cd54154d451369edc26291cc1f2f0b1cf Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sun, 14 Jan 2024 01:35:39 +0800 Subject: [PATCH 03/13] fix error TS2322 part 2 --- .../ChatGoogleGenerativeAI.ts | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index ddae6780b..737b0bf27 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -1,7 +1,7 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { convertStringToArrayString, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { BaseCache } from 'langchain/schema' -import { ChatGoogleGenerativeAI } from '@langchain/google-genai' +import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai' import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai' class GoogleGenerativeAI_ChatModels implements INode { @@ -158,10 +158,23 @@ class GoogleGenerativeAI_ChatModels implements INode { const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string const cache = nodeData.inputs?.cache as BaseCache - const obj = { + // safetySettings + let harmCategories: string[] = convertStringToArrayString(harmCategory) + let harmBlockThresholds: string[] = convertStringToArrayString(harmBlockThreshold) + if (harmCategories.length != harmBlockThresholds.length) + throw new Error(`Harm Category & Harm Block Threshold are not the same length`) + const safetySettings = harmCategories.map((value, index) => { + return { + category: value, + threshold: harmBlockThresholds[index] + } + }) + + const obj: Partial = { apiKey: apiKey, modelName: modelName, - maxOutputTokens: 2048 + maxOutputTokens: 2048, + safetySettings: safetySettings.length > 0 ? safetySettings : undefined } if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10) @@ -172,19 +185,6 @@ class GoogleGenerativeAI_ChatModels implements INode { if (cache) model.cache = cache if (temperature) model.temperature = parseFloat(temperature) - // safetySettings - let harmCategories: string[] = convertStringToArrayString(harmCategory) - let harmBlockThresholds: string[] = convertStringToArrayString(harmBlockThreshold) - if (harmCategories.length != harmBlockThresholds.length) - throw new Error(`Harm Category & Harm Block Threshold are not the same length`) - const safetySettings: typeof model.safetySettings = harmCategories.map((value, index) => { - return { - category: value, - threshold: harmBlockThresholds[index] - } - }) - if (safetySettings.length > 0) model.safetySettings = safetySettings - return model } } From 4d15e886fe156e3d89c728f406aee0fb1cd91253 Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Sun, 14 Jan 2024 03:38:14 +0800 Subject: [PATCH 04/13] fix error TS2322 part 3 --- .../ChatGoogleGenerativeAI.ts | 73 +++++++++++++++---- packages/components/package.json | 1 + packages/components/src/utils.ts | 4 +- 3 files changed, 61 insertions(+), 17 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index 737b0bf27..bd660b474 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -1,8 +1,9 @@ import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' -import { convertStringToArrayString, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { BaseCache } from 'langchain/schema' import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai' import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai' +import type { SafetySetting } from '@google/generative-ai' class GoogleGenerativeAI_ChatModels implements INode { label: string @@ -158,23 +159,10 @@ class GoogleGenerativeAI_ChatModels implements INode { const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string const cache = nodeData.inputs?.cache as BaseCache - // safetySettings - let harmCategories: string[] = convertStringToArrayString(harmCategory) - let harmBlockThresholds: string[] = convertStringToArrayString(harmBlockThreshold) - if (harmCategories.length != harmBlockThresholds.length) - throw new Error(`Harm Category & Harm Block Threshold are not the same length`) - const safetySettings = harmCategories.map((value, index) => { - return { - category: value, - threshold: harmBlockThresholds[index] - } - }) - const obj: Partial = { apiKey: apiKey, modelName: modelName, - maxOutputTokens: 2048, - safetySettings: safetySettings.length > 0 ? safetySettings : undefined + maxOutputTokens: 2048 } if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10) @@ -185,8 +173,63 @@ class GoogleGenerativeAI_ChatModels implements INode { if (cache) model.cache = cache if (temperature) model.temperature = parseFloat(temperature) + // Safety Settings + let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory) + let harmBlockThresholds: string[] = convertMultiOptionsToStringArray(harmBlockThreshold) + if (harmCategories.length != harmBlockThresholds.length) + throw new Error(`Harm Category & Harm Block Threshold are not the same length`) + const safetySettings: SafetySetting[] = harmCategories.map((value, index) => { + return { + category: categoryInput(value), + threshold: thresholdInput(harmBlockThresholds[index]) + } + }) + if (safetySettings.length > 0) model.safetySettings = safetySettings + return model } } +const categoryInput = (categoryInput: string): HarmCategory => { + let categoryOutput: HarmCategory + switch (categoryInput) { + case HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: + categoryOutput = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT + break + case HarmCategory.HARM_CATEGORY_HATE_SPEECH: + categoryOutput = HarmCategory.HARM_CATEGORY_HATE_SPEECH + break + case HarmCategory.HARM_CATEGORY_HARASSMENT: + categoryOutput = HarmCategory.HARM_CATEGORY_HARASSMENT + break + case HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: + categoryOutput = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT + break + default: + categoryOutput = HarmCategory.HARM_CATEGORY_UNSPECIFIED + } + return categoryOutput +} + +const thresholdInput = (thresholdInput: string): HarmBlockThreshold => { + let thresholdOutput: HarmBlockThreshold + switch (thresholdInput) { + case HarmBlockThreshold.BLOCK_LOW_AND_ABOVE: + thresholdOutput = HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + break + case HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE: + thresholdOutput = HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE + break + case HarmBlockThreshold.BLOCK_NONE: + thresholdOutput = HarmBlockThreshold.BLOCK_NONE + break + case HarmBlockThreshold.BLOCK_ONLY_HIGH: + thresholdOutput = HarmBlockThreshold.BLOCK_ONLY_HIGH + break + default: + thresholdOutput = HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED + } + return thresholdOutput +} + module.exports = { nodeClass: GoogleGenerativeAI_ChatModels } diff --git a/packages/components/package.json b/packages/components/package.json index a2565430b..55e84074d 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -25,6 +25,7 @@ "@gomomento/sdk": "^1.51.1", "@gomomento/sdk-core": "^1.51.1", "@google-ai/generativelanguage": "^0.2.1", + "@google/generative-ai": "^0.1.3", "@huggingface/inference": "^2.6.1", "@langchain/google-genai": "^0.0.6", "@langchain/mistralai": "^0.0.6", diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 88c553cfc..2d983562e 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -675,11 +675,11 @@ export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[ } /** - * Convert String to Array String + * Convert MultiOptions String to String Array * @param {string} inputString * @returns {string[]} */ -export const convertStringToArrayString = (inputString: string): string[] => { +export const convertMultiOptionsToStringArray = (inputString: string): string[] => { let ArrayString: string[] = [] if (inputString) { try { From d7a998f26649c9878bfbc5140f1c65323e6f480a Mon Sep 17 00:00:00 2001 From: automaton82 Date: Mon, 15 Jan 2024 23:27:33 -0500 Subject: [PATCH 05/13] Update package.json Updating turbo to the latest compatible version to fix a bug with github actions not able to build arm64 docker images. --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index 5a9bfcbf3..12fbe20f1 100644 --- a/package.json +++ b/package.json @@ -48,7 +48,7 @@ "pretty-quick": "^3.1.3", "rimraf": "^3.0.2", "run-script-os": "^1.1.6", - "turbo": "1.7.4", + "turbo": "^1.7.4", "typescript": "^4.8.4" }, "engines": { From c77e23b0d15989f78f310145ae8614e53049d887 Mon Sep 17 00:00:00 2001 From: YISH Date: Tue, 16 Jan 2024 15:32:51 +0800 Subject: [PATCH 06/13] Fix CustomFunction receiving excaped inputs --- .../nodes/utilities/CustomFunction/CustomFunction.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts index 37511e476..749c3a862 100644 --- a/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts +++ b/packages/components/nodes/utilities/CustomFunction/CustomFunction.ts @@ -73,7 +73,11 @@ class CustomFunction_Utilities implements INode { if (Object.keys(inputVars).length) { for (const item in inputVars) { - sandbox[`$${item}`] = inputVars[item] + let value = inputVars[item] + if (typeof value === 'string') { + value = handleEscapeCharacters(value, true) + } + sandbox[`$${item}`] = value } } From f3a244a93c497ab5c84baa475a673ea48896a0de Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Tue, 16 Jan 2024 16:19:21 +0800 Subject: [PATCH 07/13] modify google gemini based on request changes --- .../ChatGoogleGenerativeAI.ts | 59 +++---------------- packages/components/src/utils.ts | 9 +-- 2 files changed, 11 insertions(+), 57 deletions(-) diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index bd660b474..9a4b8891b 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -78,10 +78,11 @@ class GoogleGenerativeAI_ChatModels implements INode { additionalParams: true }, { - label: 'topK', + label: 'Top Next Highest Probability Tokens', name: 'topK', type: 'number', - step: 0.1, + description: `Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive`, + step: 1, optional: true, additionalParams: true }, @@ -90,7 +91,7 @@ class GoogleGenerativeAI_ChatModels implements INode { name: 'harmCategory', type: 'multiOptions', description: - 'Refer to official guide on how to use Harm Category', + 'Refer to official guide on how to use Harm Category', options: [ { label: 'Dangerous', @@ -117,7 +118,7 @@ class GoogleGenerativeAI_ChatModels implements INode { name: 'harmBlockThreshold', type: 'multiOptions', description: - 'Refer to official guide on how to use Harm Block Threshold', + 'Refer to official guide on how to use Harm Block Threshold', options: [ { label: 'Low and Above', @@ -169,7 +170,7 @@ class GoogleGenerativeAI_ChatModels implements INode { const model = new ChatGoogleGenerativeAI(obj) if (topP) model.topP = parseFloat(topP) - if (topK) model.topP = parseFloat(topK) + if (topK) model.topK = parseFloat(topK) if (cache) model.cache = cache if (temperature) model.temperature = parseFloat(temperature) @@ -178,10 +179,10 @@ class GoogleGenerativeAI_ChatModels implements INode { let harmBlockThresholds: string[] = convertMultiOptionsToStringArray(harmBlockThreshold) if (harmCategories.length != harmBlockThresholds.length) throw new Error(`Harm Category & Harm Block Threshold are not the same length`) - const safetySettings: SafetySetting[] = harmCategories.map((value, index) => { + const safetySettings: SafetySetting[] = harmCategories.map((harmCategory, index) => { return { - category: categoryInput(value), - threshold: thresholdInput(harmBlockThresholds[index]) + category: harmCategory as HarmCategory, + threshold: harmBlockThresholds[index] as HarmBlockThreshold } }) if (safetySettings.length > 0) model.safetySettings = safetySettings @@ -190,46 +191,4 @@ class GoogleGenerativeAI_ChatModels implements INode { } } -const categoryInput = (categoryInput: string): HarmCategory => { - let categoryOutput: HarmCategory - switch (categoryInput) { - case HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT: - categoryOutput = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT - break - case HarmCategory.HARM_CATEGORY_HATE_SPEECH: - categoryOutput = HarmCategory.HARM_CATEGORY_HATE_SPEECH - break - case HarmCategory.HARM_CATEGORY_HARASSMENT: - categoryOutput = HarmCategory.HARM_CATEGORY_HARASSMENT - break - case HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT: - categoryOutput = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT - break - default: - categoryOutput = HarmCategory.HARM_CATEGORY_UNSPECIFIED - } - return categoryOutput -} - -const thresholdInput = (thresholdInput: string): HarmBlockThreshold => { - let thresholdOutput: HarmBlockThreshold - switch (thresholdInput) { - case HarmBlockThreshold.BLOCK_LOW_AND_ABOVE: - thresholdOutput = HarmBlockThreshold.BLOCK_LOW_AND_ABOVE - break - case HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE: - thresholdOutput = HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE - break - case HarmBlockThreshold.BLOCK_NONE: - thresholdOutput = HarmBlockThreshold.BLOCK_NONE - break - case HarmBlockThreshold.BLOCK_ONLY_HIGH: - thresholdOutput = HarmBlockThreshold.BLOCK_ONLY_HIGH - break - default: - thresholdOutput = HarmBlockThreshold.HARM_BLOCK_THRESHOLD_UNSPECIFIED - } - return thresholdOutput -} - module.exports = { nodeClass: GoogleGenerativeAI_ChatModels } diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index 2d983562e..eacfa4a0b 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -681,12 +681,7 @@ export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[ */ export const convertMultiOptionsToStringArray = (inputString: string): string[] => { let ArrayString: string[] = [] - if (inputString) { - try { - ArrayString = JSON.parse(inputString) - } catch (e) { - ArrayString = [] - } - } + if (inputString) ArrayString = JSON.parse(inputString) + return ArrayString } From 2661a42a136b5b546e26f39c53e6a381a5c1bf85 Mon Sep 17 00:00:00 2001 From: chungyau97 Date: Tue, 16 Jan 2024 22:13:43 +0800 Subject: [PATCH 08/13] modify google gemini based on requested changes --- packages/components/src/utils.ts | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/packages/components/src/utils.ts b/packages/components/src/utils.ts index eacfa4a0b..2215eb418 100644 --- a/packages/components/src/utils.ts +++ b/packages/components/src/utils.ts @@ -681,7 +681,10 @@ export const convertBaseMessagetoIMessage = (messages: BaseMessage[]): IMessage[ */ export const convertMultiOptionsToStringArray = (inputString: string): string[] => { let ArrayString: string[] = [] - if (inputString) ArrayString = JSON.parse(inputString) - + try { + ArrayString = JSON.parse(inputString) + } catch (e) { + ArrayString = [] + } return ArrayString } From 0e7df3a5b51d2cc6f30c3faadcb0b711ee863f62 Mon Sep 17 00:00:00 2001 From: Ilango Date: Tue, 16 Jan 2024 20:52:25 +0530 Subject: [PATCH 09/13] Lock upstash redis version which will fix the credentials and deployment issues --- packages/components/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/package.json b/packages/components/package.json index c90ea5ccd..c35419bc1 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -37,7 +37,7 @@ "@supabase/supabase-js": "^2.29.0", "@types/js-yaml": "^4.0.5", "@types/jsdom": "^21.1.1", - "@upstash/redis": "^1.22.1", + "@upstash/redis": "1.22.1", "@zilliz/milvus2-sdk-node": "^2.2.24", "apify-client": "^2.7.1", "axios": "1.6.2", From 019e7caac36dbc85fcdbf17db5b8f5a3664d53bc Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 16 Jan 2024 16:07:28 +0000 Subject: [PATCH 10/13] =?UTF-8?q?=F0=9F=A5=B3=20flowise-components@1.5.1?= =?UTF-8?q?=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/components/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/components/package.json b/packages/components/package.json index c35419bc1..ddf093998 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -1,6 +1,6 @@ { "name": "flowise-components", - "version": "1.5.0", + "version": "1.5.1", "description": "Flowiseai Components", "main": "dist/src/index", "types": "dist/src/index.d.ts", From f16b29503d58568f00301abdecdf8d046f70f4f7 Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 16 Jan 2024 16:08:25 +0000 Subject: [PATCH 11/13] =?UTF-8?q?=F0=9F=A5=B3=20flowise-ui@1.4.7=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- packages/ui/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ui/package.json b/packages/ui/package.json index c5549b23c..fef08851c 100644 --- a/packages/ui/package.json +++ b/packages/ui/package.json @@ -1,6 +1,6 @@ { "name": "flowise-ui", - "version": "1.4.6", + "version": "1.4.7", "license": "SEE LICENSE IN LICENSE.md", "homepage": "https://flowiseai.com", "author": { From 8c1e62be425810b0d7cc7725e707a8415a670a4d Mon Sep 17 00:00:00 2001 From: Henry Date: Tue, 16 Jan 2024 16:09:13 +0000 Subject: [PATCH 12/13] =?UTF-8?q?=F0=9F=A5=B3=20flowise@1.4.10=20release?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- package.json | 2 +- packages/server/package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 12fbe20f1..561694d0e 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.9", + "version": "1.4.10", "private": true, "homepage": "https://flowiseai.com", "workspaces": [ diff --git a/packages/server/package.json b/packages/server/package.json index f1c0b7f79..79ff49616 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -1,6 +1,6 @@ { "name": "flowise", - "version": "1.4.9", + "version": "1.4.10", "description": "Flowiseai Server", "main": "dist/index", "types": "dist/index.d.ts", From 1bf7944776c83ceb3903fe9d33dba82f2acb1fdf Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 17 Jan 2024 15:55:56 +0000 Subject: [PATCH 13/13] update retrievers and add mmr to other vector stores --- .../CohereRerankRetriever/Cohere.svg | 1 + .../CohereRerankRetriever/CohereRerank.ts | 7 +- .../CohereRerankRetriever.ts | 74 ++++++++++++--- .../compressionRetriever.svg | 7 -- .../EmbeddingsFilterRetriever.ts | 62 ++++++++++--- .../retrievers/HydeRetriever/HydeRetriever.ts | 50 ++++++++++- .../LLMFilterCompressionRetriever.ts | 62 ++++++++++--- .../compressionRetriever.svg | 7 -- .../LLMFilterRetriever/llmFilterRetriever.svg | 1 + .../retrievers/RRFRetriever/RRFRetriever.ts | 64 ++++++++++--- .../RRFRetriever/compressionRetriever.svg | 7 -- .../retrievers/RRFRetriever/rrfRetriever.svg | 1 + .../SimilarityThresholdRetriever.ts | 21 +++-- .../nodes/vectorstores/Astra/Astra.ts | 16 +--- .../vectorstores/MongoDBAtlas/MongoDBAtlas.ts | 15 +--- .../nodes/vectorstores/Pinecone/Pinecone.ts | 2 +- .../marketplaces/chatflows/AutoGPT.json | 46 +++++++++- .../marketplaces/chatflows/BabyAGI.json | 46 +++++++++- .../Conversational Retrieval Agent.json | 46 +++++++++- .../Conversational Retrieval QA Chain.json | 46 +++++++++- .../chatflows/Metadata Filter.json | 46 +++++++++- .../chatflows/Multi Retrieval QA Chain.json | 90 ++++++++++++++++++- .../marketplaces/chatflows/WebPage QnA.json | 46 +++++++++- 23 files changed, 642 insertions(+), 121 deletions(-) create mode 100644 packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg delete mode 100644 packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg delete mode 100644 packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg create mode 100644 packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg delete mode 100644 packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg create mode 100644 packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg b/packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg new file mode 100644 index 000000000..88bcabe34 --- /dev/null +++ b/packages/components/nodes/retrievers/CohereRerankRetriever/Cohere.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts index 55f3c4aad..e70c044f6 100644 --- a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts +++ b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerank.ts @@ -7,11 +7,14 @@ export class CohereRerank extends BaseDocumentCompressor { private COHERE_API_URL = 'https://api.cohere.ai/v1/rerank' private readonly model: string private readonly k: number - constructor(cohereAPIKey: string, model: string, k: number) { + private readonly max_chunks_per_doc: number + + constructor(cohereAPIKey: string, model: string, k: number, max_chunks_per_doc: number) { super() this.cohereAPIKey = cohereAPIKey this.model = model this.k = k + this.max_chunks_per_doc = max_chunks_per_doc } async compressDocuments( documents: Document>[], @@ -32,8 +35,8 @@ export class CohereRerank extends BaseDocumentCompressor { const data = { model: this.model, topN: this.k, - max_chunks_per_doc: 10, query: query, + max_chunks_per_doc: this.max_chunks_per_doc, return_documents: false, documents: documents.map((doc) => doc.pageContent) } diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts index 3c1872b3f..442fdc7a6 100644 --- a/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts +++ b/packages/components/nodes/retrievers/CohereRerankRetriever/CohereRerankRetriever.ts @@ -1,7 +1,7 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { BaseRetriever } from 'langchain/schema/retriever' import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression' -import { getCredentialData, getCredentialParam } from '../../../src' +import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src' import { CohereRerank } from './CohereRerank' import { VectorStoreRetriever } from 'langchain/vectorstores/base' @@ -15,16 +15,16 @@ class CohereRerankRetriever_Retrievers implements INode { category: string baseClasses: string[] inputs: INodeParams[] - outputs: INodeOutputsValue[] credential: INodeParams badge: string + outputs: INodeOutputsValue[] constructor() { this.label = 'Cohere Rerank Retriever' this.name = 'cohereRerankRetriever' this.version = 1.0 this.type = 'Cohere Rerank Retriever' - this.icon = 'compressionRetriever.svg' + this.icon = 'Cohere.svg' this.category = 'Retrievers' this.badge = 'NEW' this.description = 'Cohere Rerank indexes the documents from most to least semantically relevant to the query.' @@ -37,7 +37,7 @@ class CohereRerankRetriever_Retrievers implements INode { } this.inputs = [ { - label: 'Base Retriever', + label: 'Vector Store Retriever', name: 'baseRetriever', type: 'VectorStoreRetriever' }, @@ -58,36 +58,84 @@ class CohereRerankRetriever_Retrievers implements INode { default: 'rerank-english-v2.0', optional: true }, + { + label: 'Query', + name: 'query', + type: 'string', + description: 'Query to retrieve documents from retriever. If not specified, user question will be used', + optional: true, + acceptVariable: true + }, { label: 'Top K', name: 'topK', description: 'Number of top results to fetch. Default to the TopK of the Base Retriever', - placeholder: '0', + placeholder: '4', + type: 'number', + additionalParams: true, + optional: true + }, + { + label: 'Max Chunks Per Doc', + name: 'maxChunksPerDoc', + description: 'The maximum number of chunks to produce internally from a document. Default to 10', + placeholder: '10', type: 'number', - default: 0, additionalParams: true, optional: true } ] + this.outputs = [ + { + label: 'Cohere Rerank Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Document', + name: 'document', + baseClasses: ['Document'] + }, + { + label: 'Text', + name: 'text', + baseClasses: ['string', 'json'] + } + ] } - async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + async init(nodeData: INodeData, input: string, options: ICommonObject): Promise { const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever const model = nodeData.inputs?.model as string + const query = nodeData.inputs?.query as string const credentialData = await getCredentialData(nodeData.credential ?? '', options) const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData) const topK = nodeData.inputs?.topK as string - let k = topK ? parseFloat(topK) : 4 + const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4 + const maxChunksPerDoc = nodeData.inputs?.maxChunksPerDoc as string + const max_chunks_per_doc = maxChunksPerDoc ? parseFloat(maxChunksPerDoc) : 10 + const output = nodeData.outputs?.output as string - if (k <= 0) { - k = (baseRetriever as VectorStoreRetriever).k - } + const cohereCompressor = new CohereRerank(cohereApiKey, model, k, max_chunks_per_doc) - const cohereCompressor = new CohereRerank(cohereApiKey, model, k) - return new ContextualCompressionRetriever({ + const retriever = new ContextualCompressionRetriever({ baseCompressor: cohereCompressor, baseRetriever: baseRetriever }) + + if (output === 'retriever') return retriever + else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) + else if (output === 'text') { + let finaltext = '' + + const docs = await retriever.getRelevantDocuments(query ? query : input) + + for (const doc of docs) finaltext += `${doc.pageContent}\n` + + return handleEscapeCharacters(finaltext, false) + } + + return retriever } } diff --git a/packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg b/packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg deleted file mode 100644 index 23c52d25e..000000000 --- a/packages/components/nodes/retrievers/CohereRerankRetriever/compressionRetriever.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts index d373704c0..d1049fa49 100644 --- a/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts +++ b/packages/components/nodes/retrievers/EmbeddingsFilterRetriever/EmbeddingsFilterRetriever.ts @@ -3,6 +3,7 @@ import { BaseRetriever } from 'langchain/schema/retriever' import { Embeddings } from 'langchain/embeddings/base' import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression' import { EmbeddingsFilter } from 'langchain/retrievers/document_compressors/embeddings_filter' +import { handleEscapeCharacters } from '../../../src/utils' class EmbeddingsFilterRetriever_Retrievers implements INode { label: string @@ -29,15 +30,22 @@ class EmbeddingsFilterRetriever_Retrievers implements INode { this.baseClasses = [this.type, 'BaseRetriever'] this.inputs = [ { - label: 'Base Retriever', + label: 'Vector Store Retriever', name: 'baseRetriever', type: 'VectorStoreRetriever' }, { label: 'Embeddings', name: 'embeddings', - type: 'Embeddings', - optional: false + type: 'Embeddings' + }, + { + label: 'Query', + name: 'query', + type: 'string', + description: 'Query to retrieve documents from retriever. If not specified, user question will be used', + optional: true, + acceptVariable: true }, { label: 'Similarity Threshold', @@ -61,36 +69,64 @@ class EmbeddingsFilterRetriever_Retrievers implements INode { additionalParams: true } ] + this.outputs = [ + { + label: 'Embeddings Filter Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Document', + name: 'document', + baseClasses: ['Document'] + }, + { + label: 'Text', + name: 'text', + baseClasses: ['string', 'json'] + } + ] } - async init(nodeData: INodeData): Promise { + async init(nodeData: INodeData, input: string): Promise { const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever const embeddings = nodeData.inputs?.embeddings as Embeddings + const query = nodeData.inputs?.query as string const similarityThreshold = nodeData.inputs?.similarityThreshold as string const k = nodeData.inputs?.k as string + const output = nodeData.outputs?.output as string if (k === undefined && similarityThreshold === undefined) { throw new Error(`Must specify one of "k" or "similarity_threshold".`) } - let similarityThresholdNumber = 0.8 - if (similarityThreshold) { - similarityThresholdNumber = parseFloat(similarityThreshold) - } - let kNumber = 0.8 - if (k) { - kNumber = parseFloat(k) - } + const similarityThresholdNumber = similarityThreshold ? parseFloat(similarityThreshold) : 0.8 + const kNumber = k ? parseFloat(k) : undefined + const baseCompressor = new EmbeddingsFilter({ embeddings: embeddings, similarityThreshold: similarityThresholdNumber, k: kNumber }) - return new ContextualCompressionRetriever({ + const retriever = new ContextualCompressionRetriever({ baseCompressor, baseRetriever: baseRetriever }) + + if (output === 'retriever') return retriever + else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) + else if (output === 'text') { + let finaltext = '' + + const docs = await retriever.getRelevantDocuments(query ? query : input) + + for (const doc of docs) finaltext += `${doc.pageContent}\n` + + return handleEscapeCharacters(finaltext, false) + } + + return retriever } } diff --git a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts index 10d9a6e7a..10fff7646 100644 --- a/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts +++ b/packages/components/nodes/retrievers/HydeRetriever/HydeRetriever.ts @@ -1,8 +1,9 @@ import { VectorStore } from 'langchain/vectorstores/base' -import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { HydeRetriever, HydeRetrieverOptions, PromptKey } from 'langchain/retrievers/hyde' import { BaseLanguageModel } from 'langchain/base_language' import { PromptTemplate } from 'langchain/prompts' +import { handleEscapeCharacters } from '../../../src/utils' class HydeRetriever_Retrievers implements INode { label: string @@ -14,11 +15,12 @@ class HydeRetriever_Retrievers implements INode { category: string baseClasses: string[] inputs: INodeParams[] + outputs: INodeOutputsValue[] constructor() { - this.label = 'Hyde Retriever' + this.label = 'HyDE Retriever' this.name = 'HydeRetriever' - this.version = 2.0 + this.version = 3.0 this.type = 'HydeRetriever' this.icon = 'hyderetriever.svg' this.category = 'Retrievers' @@ -35,6 +37,14 @@ class HydeRetriever_Retrievers implements INode { name: 'vectorStore', type: 'VectorStore' }, + { + label: 'Query', + name: 'query', + type: 'string', + description: 'Query to retrieve documents from retriever. If not specified, user question will be used', + optional: true, + acceptVariable: true + }, { label: 'Select Defined Prompt', name: 'promptKey', @@ -121,15 +131,34 @@ Passage:` optional: true } ] + this.outputs = [ + { + label: 'HyDE Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Document', + name: 'document', + baseClasses: ['Document'] + }, + { + label: 'Text', + name: 'text', + baseClasses: ['string', 'json'] + } + ] } - async init(nodeData: INodeData): Promise { + async init(nodeData: INodeData, input: string): Promise { const llm = nodeData.inputs?.model as BaseLanguageModel const vectorStore = nodeData.inputs?.vectorStore as VectorStore const promptKey = nodeData.inputs?.promptKey as PromptKey const customPrompt = nodeData.inputs?.customPrompt as string + const query = nodeData.inputs?.query as string const topK = nodeData.inputs?.topK as string const k = topK ? parseFloat(topK) : 4 + const output = nodeData.outputs?.output as string const obj: HydeRetrieverOptions = { llm, @@ -141,6 +170,19 @@ Passage:` else if (promptKey) obj.promptTemplate = promptKey const retriever = new HydeRetriever(obj) + + if (output === 'retriever') return retriever + else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) + else if (output === 'text') { + let finaltext = '' + + const docs = await retriever.getRelevantDocuments(query ? query : input) + + for (const doc of docs) finaltext += `${doc.pageContent}\n` + + return handleEscapeCharacters(finaltext, false) + } + return retriever } } diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts index e044468f4..6b710cf30 100644 --- a/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts +++ b/packages/components/nodes/retrievers/LLMFilterRetriever/LLMFilterCompressionRetriever.ts @@ -3,6 +3,7 @@ import { BaseRetriever } from 'langchain/schema/retriever' import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression' import { BaseLanguageModel } from 'langchain/base_language' import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract' +import { handleEscapeCharacters } from '../../../src/utils' class LLMFilterCompressionRetriever_Retrievers implements INode { label: string @@ -22,7 +23,7 @@ class LLMFilterCompressionRetriever_Retrievers implements INode { this.name = 'llmFilterRetriever' this.version = 1.0 this.type = 'LLMFilterRetriever' - this.icon = 'compressionRetriever.svg' + this.icon = 'llmFilterRetriever.svg' this.category = 'Retrievers' this.badge = 'NEW' this.description = @@ -30,30 +31,69 @@ class LLMFilterCompressionRetriever_Retrievers implements INode { this.baseClasses = [this.type, 'BaseRetriever'] this.inputs = [ { - label: 'Base Retriever', + label: 'Vector Store Retriever', name: 'baseRetriever', type: 'VectorStoreRetriever' }, { label: 'Language Model', name: 'model', - type: 'BaseLanguageModel', - optional: true + type: 'BaseLanguageModel' + }, + { + label: 'Query', + name: 'query', + type: 'string', + description: 'Query to retrieve documents from retriever. If not specified, user question will be used', + optional: true, + acceptVariable: true + } + ] + this.outputs = [ + { + label: 'LLM Filter Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Document', + name: 'document', + baseClasses: ['Document'] + }, + { + label: 'Text', + name: 'text', + baseClasses: ['string', 'json'] } ] } - async init(nodeData: INodeData): Promise { + async init(nodeData: INodeData, input: string): Promise { const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever const model = nodeData.inputs?.model as BaseLanguageModel + const query = nodeData.inputs?.query as string + const output = nodeData.outputs?.output as string - if (model) { - return new ContextualCompressionRetriever({ - baseCompressor: LLMChainExtractor.fromLLM(model), - baseRetriever: baseRetriever - }) + if (!model) throw new Error('There must be a LLM model connected to LLM Filter Retriever') + + const retriever = new ContextualCompressionRetriever({ + baseCompressor: LLMChainExtractor.fromLLM(model), + baseRetriever: baseRetriever + }) + + if (output === 'retriever') return retriever + else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) + else if (output === 'text') { + let finaltext = '' + + const docs = await retriever.getRelevantDocuments(query ? query : input) + + for (const doc of docs) finaltext += `${doc.pageContent}\n` + + return handleEscapeCharacters(finaltext, false) } - return {} + + return retriever } } diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg b/packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg deleted file mode 100644 index 23c52d25e..000000000 --- a/packages/components/nodes/retrievers/LLMFilterRetriever/compressionRetriever.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg b/packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg new file mode 100644 index 000000000..d3f4d15f4 --- /dev/null +++ b/packages/components/nodes/retrievers/LLMFilterRetriever/llmFilterRetriever.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts index 3229b3a8f..ed15ed243 100644 --- a/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts +++ b/packages/components/nodes/retrievers/RRFRetriever/RRFRetriever.ts @@ -1,9 +1,10 @@ -import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { BaseLanguageModel } from 'langchain/base_language' import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression' import { BaseRetriever } from 'langchain/schema/retriever' import { ReciprocalRankFusion } from './ReciprocalRankFusion' import { VectorStoreRetriever } from 'langchain/vectorstores/base' +import { handleEscapeCharacters } from '../../../src/utils' class RRFRetriever_Retrievers implements INode { label: string @@ -16,20 +17,21 @@ class RRFRetriever_Retrievers implements INode { baseClasses: string[] inputs: INodeParams[] badge: string + outputs: INodeOutputsValue[] constructor() { this.label = 'Reciprocal Rank Fusion Retriever' this.name = 'RRFRetriever' - this.version = 2.0 + this.version = 1.0 this.type = 'RRFRetriever' this.badge = 'NEW' - this.icon = 'compressionRetriever.svg' + this.icon = 'rrfRetriever.svg' this.category = 'Retrievers' this.description = 'Reciprocal Rank Fusion to re-rank search results by multiple query generation.' this.baseClasses = [this.type, 'BaseRetriever'] this.inputs = [ { - label: 'Base Retriever', + label: 'Vector Store Retriever', name: 'baseRetriever', type: 'VectorStoreRetriever' }, @@ -38,6 +40,14 @@ class RRFRetriever_Retrievers implements INode { name: 'model', type: 'BaseLanguageModel' }, + { + label: 'Query', + name: 'query', + type: 'string', + description: 'Query to retrieve documents from retriever. If not specified, user question will be used', + optional: true, + acceptVariable: true + }, { label: 'Query Count', name: 'queryCount', @@ -54,7 +64,6 @@ class RRFRetriever_Retrievers implements INode { description: 'Number of top results to fetch. Default to the TopK of the Base Retriever', placeholder: '0', type: 'number', - default: 0, additionalParams: true, optional: true }, @@ -71,27 +80,56 @@ class RRFRetriever_Retrievers implements INode { optional: true } ] + this.outputs = [ + { + label: 'Reciprocal Rank Fusion Retriever', + name: 'retriever', + baseClasses: this.baseClasses + }, + { + label: 'Document', + name: 'document', + baseClasses: ['Document'] + }, + { + label: 'Text', + name: 'text', + baseClasses: ['string', 'json'] + } + ] } - async init(nodeData: INodeData): Promise { + async init(nodeData: INodeData, input: string): Promise { const llm = nodeData.inputs?.model as BaseLanguageModel const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever + const query = nodeData.inputs?.query as string const queryCount = nodeData.inputs?.queryCount as string const q = queryCount ? parseFloat(queryCount) : 4 const topK = nodeData.inputs?.topK as string - let k = topK ? parseFloat(topK) : 4 + const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4 const constantC = nodeData.inputs?.c as string - let c = topK ? parseFloat(constantC) : 60 - - if (k <= 0) { - k = (baseRetriever as VectorStoreRetriever).k - } + const c = topK ? parseFloat(constantC) : 60 + const output = nodeData.outputs?.output as string const ragFusion = new ReciprocalRankFusion(llm, baseRetriever as VectorStoreRetriever, q, k, c) - return new ContextualCompressionRetriever({ + const retriever = new ContextualCompressionRetriever({ baseCompressor: ragFusion, baseRetriever: baseRetriever }) + + if (output === 'retriever') return retriever + else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) + else if (output === 'text') { + let finaltext = '' + + const docs = await retriever.getRelevantDocuments(query ? query : input) + + for (const doc of docs) finaltext += `${doc.pageContent}\n` + + return handleEscapeCharacters(finaltext, false) + } + + return retriever } } diff --git a/packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg b/packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg deleted file mode 100644 index 23c52d25e..000000000 --- a/packages/components/nodes/retrievers/RRFRetriever/compressionRetriever.svg +++ /dev/null @@ -1,7 +0,0 @@ - - - - - - - \ No newline at end of file diff --git a/packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg b/packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg new file mode 100644 index 000000000..56fbcc5a1 --- /dev/null +++ b/packages/components/nodes/retrievers/RRFRetriever/rrfRetriever.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts index a9f4b3d87..5f5a9ed0d 100644 --- a/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts +++ b/packages/components/nodes/retrievers/SimilarityThresholdRetriever/SimilarityThresholdRetriever.ts @@ -18,7 +18,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode { constructor() { this.label = 'Similarity Score Threshold Retriever' this.name = 'similarityThresholdRetriever' - this.version = 1.0 + this.version = 2.0 this.type = 'SimilarityThresholdRetriever' this.icon = 'similaritythreshold.svg' this.category = 'Retrievers' @@ -30,6 +30,14 @@ class SimilarityThresholdRetriever_Retrievers implements INode { name: 'vectorStore', type: 'VectorStore' }, + { + label: 'Query', + name: 'query', + type: 'string', + description: 'Query to retrieve documents from retriever. If not specified, user question will be used', + optional: true, + acceptVariable: true + }, { label: 'Minimum Similarity Score (%)', name: 'minSimilarityScore', @@ -44,7 +52,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode { description: `The maximum number of results to fetch`, type: 'number', default: 20, - step: 1 + step: 1, + additionalParams: true }, { label: 'K Increment', @@ -52,7 +61,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode { description: `How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.`, type: 'number', default: 2, - step: 1 + step: 1, + additionalParams: true } ] this.outputs = [ @@ -77,6 +87,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode { async init(nodeData: INodeData, input: string): Promise { const vectorStore = nodeData.inputs?.vectorStore as VectorStore const minSimilarityScore = nodeData.inputs?.minSimilarityScore as number + const query = nodeData.inputs?.query as string const maxK = nodeData.inputs?.maxK as string const kIncrement = nodeData.inputs?.kIncrement as string @@ -89,11 +100,11 @@ class SimilarityThresholdRetriever_Retrievers implements INode { }) if (output === 'retriever') return retriever - else if (output === 'document') return await retriever.getRelevantDocuments(input) + else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input) else if (output === 'text') { let finaltext = '' - const docs = await retriever.getRelevantDocuments(input) + const docs = await retriever.getRelevantDocuments(query ? query : input) for (const doc of docs) finaltext += `${doc.pageContent}\n` diff --git a/packages/components/nodes/vectorstores/Astra/Astra.ts b/packages/components/nodes/vectorstores/Astra/Astra.ts index 865f10446..edaadc9c0 100644 --- a/packages/components/nodes/vectorstores/Astra/Astra.ts +++ b/packages/components/nodes/vectorstores/Astra/Astra.ts @@ -4,6 +4,7 @@ import { Document } from 'langchain/document' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData } from '../../../src/utils' import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb' +import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils' class Astra_VectorStores implements INode { label: string @@ -26,7 +27,7 @@ class Astra_VectorStores implements INode { this.type = 'Astra' this.icon = 'astra.svg' this.category = 'Vector Stores' - this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads` + this.description = `Upsert embedded data and perform similarity or mmr search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads` this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever'] this.badge = 'NEW' this.credential = { @@ -74,6 +75,7 @@ class Astra_VectorStores implements INode { optional: true } ] + addMMRInputParams(this.inputs) this.outputs = [ { label: 'Astra Retriever', @@ -139,9 +141,6 @@ class Astra_VectorStores implements INode { const embeddings = nodeData.inputs?.embeddings as Embeddings const vectorDimension = nodeData.inputs?.vectorDimension as number const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined - const output = nodeData.outputs?.output as string - const topK = nodeData.inputs?.topK as string - const k = topK ? parseFloat(topK) : 4 const credentialData = await getCredentialData(nodeData.credential ?? '', options) @@ -176,14 +175,7 @@ class Astra_VectorStores implements INode { const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig) - if (output === 'retriever') { - const retriever = vectorStore.asRetriever(k) - return retriever - } else if (output === 'vectorStore') { - ;(vectorStore as any).k = k - return vectorStore - } - return vectorStore + return resolveVectorStoreOrRetriever(nodeData, vectorStore) } } diff --git a/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts b/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts index 9bc23f104..6ba7199f0 100644 --- a/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts +++ b/packages/components/nodes/vectorstores/MongoDBAtlas/MongoDBAtlas.ts @@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base' import { Document } from 'langchain/document' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils' class MongoDBAtlas_VectorStores implements INode { label: string @@ -24,7 +25,7 @@ class MongoDBAtlas_VectorStores implements INode { this.label = 'MongoDB Atlas' this.name = 'mongoDBAtlas' this.version = 1.0 - this.description = `Upsert embedded data and perform similarity search upon query using MongoDB Atlas, a managed cloud mongodb database` + this.description = `Upsert embedded data and perform similarity or mmr search upon query using MongoDB Atlas, a managed cloud mongodb database` this.type = 'MongoDB Atlas' this.icon = 'mongodb.svg' this.category = 'Vector Stores' @@ -95,6 +96,7 @@ class MongoDBAtlas_VectorStores implements INode { optional: true } ] + addMMRInputParams(this.inputs) this.outputs = [ { label: 'MongoDB Retriever', @@ -162,9 +164,6 @@ class MongoDBAtlas_VectorStores implements INode { let textKey = nodeData.inputs?.textKey as string let embeddingKey = nodeData.inputs?.embeddingKey as string const embeddings = nodeData.inputs?.embeddings as Embeddings - const topK = nodeData.inputs?.topK as string - const k = topK ? parseFloat(topK) : 4 - const output = nodeData.outputs?.output as string let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData) @@ -181,13 +180,7 @@ class MongoDBAtlas_VectorStores implements INode { embeddingKey }) - if (output === 'retriever') { - return vectorStore.asRetriever(k) - } else if (output === 'vectorStore') { - ;(vectorStore as any).k = k - return vectorStore - } - return vectorStore + return resolveVectorStoreOrRetriever(nodeData, vectorStore) } } diff --git a/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts b/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts index 4b91a9b54..6623b1a26 100644 --- a/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts +++ b/packages/components/nodes/vectorstores/Pinecone/Pinecone.ts @@ -24,7 +24,7 @@ class Pinecone_VectorStores implements INode { constructor() { this.label = 'Pinecone' this.name = 'pinecone' - this.version = 3.0 + this.version = 2.0 this.type = 'Pinecone' this.icon = 'pinecone.svg' this.category = 'Vector Stores' diff --git a/packages/server/marketplaces/chatflows/AutoGPT.json b/packages/server/marketplaces/chatflows/AutoGPT.json index 150fe17eb..0062cd43f 100644 --- a/packages/server/marketplaces/chatflows/AutoGPT.json +++ b/packages/server/marketplaces/chatflows/AutoGPT.json @@ -511,7 +511,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -552,6 +552,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -576,7 +615,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/BabyAGI.json b/packages/server/marketplaces/chatflows/BabyAGI.json index ab387205e..81e3f2307 100644 --- a/packages/server/marketplaces/chatflows/BabyAGI.json +++ b/packages/server/marketplaces/chatflows/BabyAGI.json @@ -166,7 +166,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -207,6 +207,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -231,7 +270,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json index 0e9e41bdd..4378a47d6 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval Agent.json @@ -301,7 +301,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -342,6 +342,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -366,7 +405,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json index e2fd64210..253a1dfc7 100644 --- a/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Conversational Retrieval QA Chain.json @@ -541,7 +541,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -582,6 +582,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -606,7 +645,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Metadata Filter.json b/packages/server/marketplaces/chatflows/Metadata Filter.json index abd85d366..f7b2fbfb1 100644 --- a/packages/server/marketplaces/chatflows/Metadata Filter.json +++ b/packages/server/marketplaces/chatflows/Metadata Filter.json @@ -625,7 +625,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -666,6 +666,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -690,7 +729,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "{\"id\":{\"$in\":[\"doc1\",\"doc2\"]}}", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json index 5388d9657..e86b28c93 100644 --- a/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json +++ b/packages/server/marketplaces/chatflows/Multi Retrieval QA Chain.json @@ -560,7 +560,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -601,6 +601,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -625,7 +664,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { @@ -840,6 +882,45 @@ "additionalParams": true, "optional": true, "id": "supabase_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -865,7 +946,10 @@ "tableName": "", "queryName": "", "supabaseMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ { diff --git a/packages/server/marketplaces/chatflows/WebPage QnA.json b/packages/server/marketplaces/chatflows/WebPage QnA.json index 1b1d8de66..df05feef4 100644 --- a/packages/server/marketplaces/chatflows/WebPage QnA.json +++ b/packages/server/marketplaces/chatflows/WebPage QnA.json @@ -643,7 +643,7 @@ "type": "Pinecone", "baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"], "category": "Vector Stores", - "description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database", + "description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database", "inputParams": [ { "label": "Connect Credential", @@ -684,6 +684,45 @@ "additionalParams": true, "optional": true, "id": "pinecone_0-input-topK-number" + }, + { + "label": "Search Type", + "name": "searchType", + "type": "options", + "default": "similarity", + "options": [ + { + "label": "Similarity", + "name": "similarity" + }, + { + "label": "Max Marginal Relevance", + "name": "mmr" + } + ], + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-searchType-options" + }, + { + "label": "Fetch K (for MMR Search)", + "name": "fetchK", + "description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR", + "placeholder": "20", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-fetchK-number" + }, + { + "label": "Lambda (for MMR Search)", + "name": "lambda", + "description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR", + "placeholder": "0.5", + "type": "number", + "additionalParams": true, + "optional": true, + "id": "pinecone_0-input-lambda-number" } ], "inputAnchors": [ @@ -708,7 +747,10 @@ "pineconeIndex": "", "pineconeNamespace": "", "pineconeMetadataFilter": "", - "topK": "" + "topK": "", + "searchType": "similarity", + "fetchK": "", + "lambda": "" }, "outputAnchors": [ {