Merge pull request #1426 from vinodkiran/FEATURE/RAG-VectorStores-Updates
FEATURE Addition : MMR and Document Compressors
This commit is contained in:
commit
207a3f3dd7
|
|
@ -0,0 +1 @@
|
|||
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path fill-rule="evenodd" clip-rule="evenodd" d="M11.776 18.304c.64 0 1.92-.032 3.712-.768 2.08-.864 6.176-2.4 9.152-4 2.08-1.12 2.976-2.592 2.976-4.576 0-2.72-2.208-4.96-4.96-4.96h-11.52A7.143 7.143 0 0 0 4 11.136c0 3.936 3.008 7.168 7.776 7.168Z" fill="#39594D"/><path fill-rule="evenodd" clip-rule="evenodd" d="M13.728 23.2c0-1.92 1.152-3.68 2.944-4.416l3.616-1.504C23.968 15.776 28 18.464 28 22.432A5.572 5.572 0 0 1 22.432 28h-3.936c-2.624 0-4.768-2.144-4.768-4.8Z" fill="#D18EE2"/><path d="M8.128 19.232A4.138 4.138 0 0 0 4 23.36v.544C4 26.144 5.856 28 8.128 28a4.138 4.138 0 0 0 4.128-4.128v-.544c-.032-2.24-1.856-4.096-4.128-4.096Z" fill="#FF7759"/></svg>
|
||||
|
After Width: | Height: | Size: 738 B |
|
|
@ -0,0 +1,55 @@
|
|||
import { Callbacks } from 'langchain/callbacks'
|
||||
import { Document } from 'langchain/document'
|
||||
import { BaseDocumentCompressor } from 'langchain/retrievers/document_compressors'
|
||||
import axios from 'axios'
|
||||
export class CohereRerank extends BaseDocumentCompressor {
|
||||
private cohereAPIKey: any
|
||||
private COHERE_API_URL = 'https://api.cohere.ai/v1/rerank'
|
||||
private readonly model: string
|
||||
private readonly k: number
|
||||
private readonly maxChunksPerDoc: number
|
||||
constructor(cohereAPIKey: string, model: string, k: number, maxChunksPerDoc: number) {
|
||||
super()
|
||||
this.cohereAPIKey = cohereAPIKey
|
||||
this.model = model
|
||||
this.k = k
|
||||
this.maxChunksPerDoc = maxChunksPerDoc
|
||||
}
|
||||
async compressDocuments(
|
||||
documents: Document<Record<string, any>>[],
|
||||
query: string,
|
||||
_?: Callbacks | undefined
|
||||
): Promise<Document<Record<string, any>>[]> {
|
||||
// avoid empty api call
|
||||
if (documents.length === 0) {
|
||||
return []
|
||||
}
|
||||
const config = {
|
||||
headers: {
|
||||
Authorization: `Bearer ${this.cohereAPIKey}`,
|
||||
'Content-Type': 'application/json',
|
||||
Accept: 'application/json'
|
||||
}
|
||||
}
|
||||
const data = {
|
||||
model: this.model,
|
||||
topN: this.k,
|
||||
max_chunks_per_doc: this.maxChunksPerDoc,
|
||||
query: query,
|
||||
return_documents: false,
|
||||
documents: documents.map((doc) => doc.pageContent)
|
||||
}
|
||||
try {
|
||||
let returnedDocs = await axios.post(this.COHERE_API_URL, data, config)
|
||||
const finalResults: Document<Record<string, any>>[] = []
|
||||
returnedDocs.data.results.forEach((result: any) => {
|
||||
const doc = documents[result.index]
|
||||
doc.metadata.relevance_score = result.relevance_score
|
||||
finalResults.push(doc)
|
||||
})
|
||||
return finalResults
|
||||
} catch (error) {
|
||||
return documents
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { BaseRetriever } from 'langchain/schema/retriever'
|
||||
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
|
||||
import { getCredentialData, getCredentialParam, handleEscapeCharacters } from '../../../src'
|
||||
import { CohereRerank } from './CohereRerank'
|
||||
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
|
||||
|
||||
class CohereRerankRetriever_Retrievers implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
credential: INodeParams
|
||||
badge: string
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Cohere Rerank Retriever'
|
||||
this.name = 'cohereRerankRetriever'
|
||||
this.version = 1.0
|
||||
this.type = 'Cohere Rerank Retriever'
|
||||
this.icon = 'Cohere.svg'
|
||||
this.category = 'Retrievers'
|
||||
this.badge = 'NEW'
|
||||
this.description = 'Cohere Rerank indexes the documents from most to least semantically relevant to the query.'
|
||||
this.baseClasses = [this.type, 'BaseRetriever']
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['cohereApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store Retriever',
|
||||
name: 'baseRetriever',
|
||||
type: 'VectorStoreRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Model Name',
|
||||
name: 'model',
|
||||
type: 'options',
|
||||
options: [
|
||||
{
|
||||
label: 'rerank-english-v2.0',
|
||||
name: 'rerank-english-v2.0'
|
||||
},
|
||||
{
|
||||
label: 'rerank-multilingual-v2.0',
|
||||
name: 'rerank-multilingual-v2.0'
|
||||
}
|
||||
],
|
||||
default: 'rerank-english-v2.0',
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Default to the TopK of the Base Retriever',
|
||||
placeholder: '4',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Chunks Per Doc',
|
||||
name: 'maxChunksPerDoc',
|
||||
description: 'The maximum number of chunks to produce internally from a document. Default to 10',
|
||||
placeholder: '10',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Cohere Rerank Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: ['Document']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
|
||||
const model = nodeData.inputs?.model as string
|
||||
const query = nodeData.inputs?.query as string
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const cohereApiKey = getCredentialParam('cohereApiKey', credentialData, nodeData)
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4
|
||||
const maxChunksPerDoc = nodeData.inputs?.maxChunksPerDoc as string
|
||||
const max_chunks_per_doc = maxChunksPerDoc ? parseFloat(maxChunksPerDoc) : 10
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
const cohereCompressor = new CohereRerank(cohereApiKey, model, k, max_chunks_per_doc)
|
||||
|
||||
const retriever = new ContextualCompressionRetriever({
|
||||
baseCompressor: cohereCompressor,
|
||||
baseRetriever: baseRetriever
|
||||
})
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(query ? query : input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
|
||||
return retriever
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: CohereRerankRetriever_Retrievers }
|
||||
|
|
@ -0,0 +1,133 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { BaseRetriever } from 'langchain/schema/retriever'
|
||||
import { Embeddings } from 'langchain/embeddings/base'
|
||||
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
|
||||
import { EmbeddingsFilter } from 'langchain/retrievers/document_compressors/embeddings_filter'
|
||||
import { handleEscapeCharacters } from '../../../src/utils'
|
||||
|
||||
class EmbeddingsFilterRetriever_Retrievers implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
badge: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'Embeddings Filter Retriever'
|
||||
this.name = 'embeddingsFilterRetriever'
|
||||
this.version = 1.0
|
||||
this.type = 'EmbeddingsFilterRetriever'
|
||||
this.icon = 'compressionRetriever.svg'
|
||||
this.category = 'Retrievers'
|
||||
this.badge = 'NEW'
|
||||
this.description = 'A document compressor that uses embeddings to drop documents unrelated to the query'
|
||||
this.baseClasses = [this.type, 'BaseRetriever']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store Retriever',
|
||||
name: 'baseRetriever',
|
||||
type: 'VectorStoreRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
name: 'embeddings',
|
||||
type: 'Embeddings'
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Similarity Threshold',
|
||||
name: 'similarityThreshold',
|
||||
description:
|
||||
'Threshold for determining when two documents are similar enough to be considered redundant. Must be specified if `k` is not set',
|
||||
type: 'number',
|
||||
default: 0.8,
|
||||
step: 0.1,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'K',
|
||||
name: 'k',
|
||||
description:
|
||||
'The number of relevant documents to return. Can be explicitly set to undefined, in which case similarity_threshold must be specified. Defaults to 20',
|
||||
type: 'number',
|
||||
default: 20,
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Embeddings Filter Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: ['Document']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string): Promise<any> {
|
||||
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const query = nodeData.inputs?.query as string
|
||||
const similarityThreshold = nodeData.inputs?.similarityThreshold as string
|
||||
const k = nodeData.inputs?.k as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
if (k === undefined && similarityThreshold === undefined) {
|
||||
throw new Error(`Must specify one of "k" or "similarity_threshold".`)
|
||||
}
|
||||
|
||||
const similarityThresholdNumber = similarityThreshold ? parseFloat(similarityThreshold) : 0.8
|
||||
const kNumber = k ? parseFloat(k) : undefined
|
||||
|
||||
const baseCompressor = new EmbeddingsFilter({
|
||||
embeddings: embeddings,
|
||||
similarityThreshold: similarityThresholdNumber,
|
||||
k: kNumber
|
||||
})
|
||||
|
||||
const retriever = new ContextualCompressionRetriever({
|
||||
baseCompressor,
|
||||
baseRetriever: baseRetriever
|
||||
})
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(query ? query : input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
|
||||
return retriever
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: EmbeddingsFilterRetriever_Retrievers }
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-chart-bar" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path stroke="none" d="M0 0h24v24H0z" fill="none"/>
|
||||
<path d="M3 12m0 1a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v6a1 1 0 0 1 -1 1h-4a1 1 0 0 1 -1 -1z" />
|
||||
<path d="M9 8m0 1a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v10a1 1 0 0 1 -1 1h-4a1 1 0 0 1 -1 -1z" />
|
||||
<path d="M15 4m0 1a1 1 0 0 1 1 -1h4a1 1 0 0 1 1 1v14a1 1 0 0 1 -1 1h-4a1 1 0 0 1 -1 -1z" />
|
||||
<path d="M4 20l14 0" />
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 600 B |
|
|
@ -1,8 +1,9 @@
|
|||
import { VectorStore } from 'langchain/vectorstores/base'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { HydeRetriever, HydeRetrieverOptions, PromptKey } from 'langchain/retrievers/hyde'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { PromptTemplate } from 'langchain/prompts'
|
||||
import { handleEscapeCharacters } from '../../../src/utils'
|
||||
|
||||
class HydeRetriever_Retrievers implements INode {
|
||||
label: string
|
||||
|
|
@ -14,11 +15,12 @@ class HydeRetriever_Retrievers implements INode {
|
|||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Hyde Retriever'
|
||||
this.label = 'HyDE Retriever'
|
||||
this.name = 'HydeRetriever'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'HydeRetriever'
|
||||
this.icon = 'hyderetriever.svg'
|
||||
this.category = 'Retrievers'
|
||||
|
|
@ -35,6 +37,14 @@ class HydeRetriever_Retrievers implements INode {
|
|||
name: 'vectorStore',
|
||||
type: 'VectorStore'
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Select Defined Prompt',
|
||||
name: 'promptKey',
|
||||
|
|
@ -121,15 +131,34 @@ Passage:`
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'HyDE Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: ['Document']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
async init(nodeData: INodeData, input: string): Promise<any> {
|
||||
const llm = nodeData.inputs?.model as BaseLanguageModel
|
||||
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
|
||||
const promptKey = nodeData.inputs?.promptKey as PromptKey
|
||||
const customPrompt = nodeData.inputs?.customPrompt as string
|
||||
const query = nodeData.inputs?.query as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
const obj: HydeRetrieverOptions<any> = {
|
||||
llm,
|
||||
|
|
@ -141,6 +170,19 @@ Passage:`
|
|||
else if (promptKey) obj.promptTemplate = promptKey
|
||||
|
||||
const retriever = new HydeRetriever(obj)
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(query ? query : input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
|
||||
return retriever
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,100 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { BaseRetriever } from 'langchain/schema/retriever'
|
||||
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { LLMChainExtractor } from 'langchain/retrievers/document_compressors/chain_extract'
|
||||
import { handleEscapeCharacters } from '../../../src/utils'
|
||||
|
||||
class LLMFilterCompressionRetriever_Retrievers implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
outputs: INodeOutputsValue[]
|
||||
badge: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'LLM Filter Retriever'
|
||||
this.name = 'llmFilterRetriever'
|
||||
this.version = 1.0
|
||||
this.type = 'LLMFilterRetriever'
|
||||
this.icon = 'llmFilterRetriever.svg'
|
||||
this.category = 'Retrievers'
|
||||
this.badge = 'NEW'
|
||||
this.description =
|
||||
'Iterate over the initially returned documents and extract, from each, only the content that is relevant to the query'
|
||||
this.baseClasses = [this.type, 'BaseRetriever']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store Retriever',
|
||||
name: 'baseRetriever',
|
||||
type: 'VectorStoreRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Language Model',
|
||||
name: 'model',
|
||||
type: 'BaseLanguageModel'
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'LLM Filter Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: ['Document']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string): Promise<any> {
|
||||
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
|
||||
const model = nodeData.inputs?.model as BaseLanguageModel
|
||||
const query = nodeData.inputs?.query as string
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
if (!model) throw new Error('There must be a LLM model connected to LLM Filter Retriever')
|
||||
|
||||
const retriever = new ContextualCompressionRetriever({
|
||||
baseCompressor: LLMChainExtractor.fromLLM(model),
|
||||
baseRetriever: baseRetriever
|
||||
})
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(query ? query : input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
|
||||
return retriever
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: LLMFilterCompressionRetriever_Retrievers }
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-filter-check" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M11.18 20.274l-2.18 .726v-8.5l-4.48 -4.928a2 2 0 0 1 -.52 -1.345v-2.227h16v2.172a2 2 0 0 1 -.586 1.414l-4.414 4.414v3" /><path d="M15 19l2 2l4 -4" /></svg>
|
||||
|
After Width: | Height: | Size: 446 B |
|
|
@ -0,0 +1,136 @@
|
|||
import { INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { ContextualCompressionRetriever } from 'langchain/retrievers/contextual_compression'
|
||||
import { BaseRetriever } from 'langchain/schema/retriever'
|
||||
import { ReciprocalRankFusion } from './ReciprocalRankFusion'
|
||||
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
|
||||
import { handleEscapeCharacters } from '../../../src/utils'
|
||||
|
||||
class RRFRetriever_Retrievers implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
badge: string
|
||||
outputs: INodeOutputsValue[]
|
||||
|
||||
constructor() {
|
||||
this.label = 'Reciprocal Rank Fusion Retriever'
|
||||
this.name = 'RRFRetriever'
|
||||
this.version = 1.0
|
||||
this.type = 'RRFRetriever'
|
||||
this.badge = 'NEW'
|
||||
this.icon = 'rrfRetriever.svg'
|
||||
this.category = 'Retrievers'
|
||||
this.description = 'Reciprocal Rank Fusion to re-rank search results by multiple query generation.'
|
||||
this.baseClasses = [this.type, 'BaseRetriever']
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Vector Store Retriever',
|
||||
name: 'baseRetriever',
|
||||
type: 'VectorStoreRetriever'
|
||||
},
|
||||
{
|
||||
label: 'Language Model',
|
||||
name: 'model',
|
||||
type: 'BaseLanguageModel'
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Query Count',
|
||||
name: 'queryCount',
|
||||
description: 'Number of synthetic queries to generate. Default to 4',
|
||||
placeholder: '4',
|
||||
type: 'number',
|
||||
default: 4,
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Top K',
|
||||
name: 'topK',
|
||||
description: 'Number of top results to fetch. Default to the TopK of the Base Retriever',
|
||||
placeholder: '0',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Constant',
|
||||
name: 'c',
|
||||
description:
|
||||
'A constant added to the rank, controlling the balance between the importance of high-ranked items and the consideration given to lower-ranked items.\n' +
|
||||
'Default is 60',
|
||||
placeholder: '60',
|
||||
type: 'number',
|
||||
default: 60,
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Reciprocal Rank Fusion Retriever',
|
||||
name: 'retriever',
|
||||
baseClasses: this.baseClasses
|
||||
},
|
||||
{
|
||||
label: 'Document',
|
||||
name: 'document',
|
||||
baseClasses: ['Document']
|
||||
},
|
||||
{
|
||||
label: 'Text',
|
||||
name: 'text',
|
||||
baseClasses: ['string', 'json']
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string): Promise<any> {
|
||||
const llm = nodeData.inputs?.model as BaseLanguageModel
|
||||
const baseRetriever = nodeData.inputs?.baseRetriever as BaseRetriever
|
||||
const query = nodeData.inputs?.query as string
|
||||
const queryCount = nodeData.inputs?.queryCount as string
|
||||
const q = queryCount ? parseFloat(queryCount) : 4
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : (baseRetriever as VectorStoreRetriever).k ?? 4
|
||||
const constantC = nodeData.inputs?.c as string
|
||||
const c = topK ? parseFloat(constantC) : 60
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
const ragFusion = new ReciprocalRankFusion(llm, baseRetriever as VectorStoreRetriever, q, k, c)
|
||||
const retriever = new ContextualCompressionRetriever({
|
||||
baseCompressor: ragFusion,
|
||||
baseRetriever: baseRetriever
|
||||
})
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(query ? query : input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
return handleEscapeCharacters(finaltext, false)
|
||||
}
|
||||
|
||||
return retriever
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: RRFRetriever_Retrievers }
|
||||
|
|
@ -0,0 +1,96 @@
|
|||
import { BaseDocumentCompressor } from 'langchain/retrievers/document_compressors'
|
||||
import { Document } from 'langchain/document'
|
||||
import { Callbacks } from 'langchain/callbacks'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { VectorStoreRetriever } from 'langchain/vectorstores/base'
|
||||
|
||||
export class ReciprocalRankFusion extends BaseDocumentCompressor {
|
||||
private readonly llm: BaseLanguageModel
|
||||
private readonly queryCount: number
|
||||
private readonly topK: number
|
||||
private readonly c: number
|
||||
private baseRetriever: VectorStoreRetriever
|
||||
constructor(llm: BaseLanguageModel, baseRetriever: VectorStoreRetriever, queryCount: number, topK: number, c: number) {
|
||||
super()
|
||||
this.queryCount = queryCount
|
||||
this.llm = llm
|
||||
this.baseRetriever = baseRetriever
|
||||
this.topK = topK
|
||||
this.c = c
|
||||
}
|
||||
async compressDocuments(
|
||||
documents: Document<Record<string, any>>[],
|
||||
query: string,
|
||||
_?: Callbacks | undefined
|
||||
): Promise<Document<Record<string, any>>[]> {
|
||||
// avoid empty api call
|
||||
if (documents.length === 0) {
|
||||
return []
|
||||
}
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
SystemMessagePromptTemplate.fromTemplate(
|
||||
'You are a helpful assistant that generates multiple search queries based on a single input query.'
|
||||
),
|
||||
HumanMessagePromptTemplate.fromTemplate(
|
||||
'Generate multiple search queries related to: {input}. Provide these alternative questions separated by newlines, do not add any numbers.'
|
||||
),
|
||||
HumanMessagePromptTemplate.fromTemplate('OUTPUT (' + this.queryCount + ' queries):')
|
||||
])
|
||||
const llmChain = new LLMChain({
|
||||
llm: this.llm,
|
||||
prompt: chatPrompt
|
||||
})
|
||||
const multipleQueries = await llmChain.call({ input: query })
|
||||
const queries = []
|
||||
queries.push(query)
|
||||
multipleQueries.text.split('\n').map((q: string) => {
|
||||
queries.push(q)
|
||||
})
|
||||
const docList: Document<Record<string, any>>[][] = []
|
||||
for (let i = 0; i < queries.length; i++) {
|
||||
const resultOne = await this.baseRetriever.vectorStore.similaritySearch(queries[i], 5)
|
||||
const docs: any[] = []
|
||||
resultOne.forEach((doc) => {
|
||||
docs.push(doc)
|
||||
})
|
||||
docList.push(docs)
|
||||
}
|
||||
|
||||
return this.reciprocalRankFunction(docList, this.c)
|
||||
}
|
||||
|
||||
reciprocalRankFunction(docList: Document<Record<string, any>>[][], k: number): Document<Record<string, any>>[] {
|
||||
docList.forEach((docs: Document<Record<string, any>>[]) => {
|
||||
docs.forEach((doc: any, index: number) => {
|
||||
let rank = index + 1
|
||||
if (doc.metadata.relevancy_score) {
|
||||
doc.metadata.relevancy_score += 1 / (rank + k)
|
||||
} else {
|
||||
doc.metadata.relevancy_score = 1 / (rank + k)
|
||||
}
|
||||
})
|
||||
})
|
||||
const scoreArray: any[] = []
|
||||
docList.forEach((docs: Document<Record<string, any>>[]) => {
|
||||
docs.forEach((doc: any) => {
|
||||
scoreArray.push(doc.metadata.relevancy_score)
|
||||
})
|
||||
})
|
||||
scoreArray.sort((a, b) => b - a)
|
||||
const rerankedDocuments: Document<Record<string, any>>[] = []
|
||||
const seenScores: any[] = []
|
||||
scoreArray.forEach((score) => {
|
||||
docList.forEach((docs) => {
|
||||
docs.forEach((doc: any) => {
|
||||
if (doc.metadata.relevancy_score === score && seenScores.indexOf(score) === -1) {
|
||||
rerankedDocuments.push(doc)
|
||||
seenScores.push(doc.metadata.relevancy_score)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
return rerankedDocuments.splice(0, this.topK)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-math-x-divide-y-2" width="24" height="24" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M3 21l18 -18" /><path d="M15 14l3 4.5" /><path d="M21 14l-4.5 7" /><path d="M3 4l6 6" /><path d="M3 10l6 -6" /></svg>
|
||||
|
After Width: | Height: | Size: 413 B |
|
|
@ -18,7 +18,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
|
|||
constructor() {
|
||||
this.label = 'Similarity Score Threshold Retriever'
|
||||
this.name = 'similarityThresholdRetriever'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'SimilarityThresholdRetriever'
|
||||
this.icon = 'similaritythreshold.svg'
|
||||
this.category = 'Retrievers'
|
||||
|
|
@ -30,6 +30,14 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
|
|||
name: 'vectorStore',
|
||||
type: 'VectorStore'
|
||||
},
|
||||
{
|
||||
label: 'Query',
|
||||
name: 'query',
|
||||
type: 'string',
|
||||
description: 'Query to retrieve documents from retriever. If not specified, user question will be used',
|
||||
optional: true,
|
||||
acceptVariable: true
|
||||
},
|
||||
{
|
||||
label: 'Minimum Similarity Score (%)',
|
||||
name: 'minSimilarityScore',
|
||||
|
|
@ -44,7 +52,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
|
|||
description: `The maximum number of results to fetch`,
|
||||
type: 'number',
|
||||
default: 20,
|
||||
step: 1
|
||||
step: 1,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'K Increment',
|
||||
|
|
@ -52,7 +61,8 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
|
|||
description: `How much to increase K by each time. It'll fetch N results, then N + kIncrement, then N + kIncrement * 2, etc.`,
|
||||
type: 'number',
|
||||
default: 2,
|
||||
step: 1
|
||||
step: 1,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.outputs = [
|
||||
|
|
@ -77,6 +87,7 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
|
|||
async init(nodeData: INodeData, input: string): Promise<any> {
|
||||
const vectorStore = nodeData.inputs?.vectorStore as VectorStore
|
||||
const minSimilarityScore = nodeData.inputs?.minSimilarityScore as number
|
||||
const query = nodeData.inputs?.query as string
|
||||
const maxK = nodeData.inputs?.maxK as string
|
||||
const kIncrement = nodeData.inputs?.kIncrement as string
|
||||
|
||||
|
|
@ -89,11 +100,11 @@ class SimilarityThresholdRetriever_Retrievers implements INode {
|
|||
})
|
||||
|
||||
if (output === 'retriever') return retriever
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(input)
|
||||
else if (output === 'document') return await retriever.getRelevantDocuments(query ? query : input)
|
||||
else if (output === 'text') {
|
||||
let finaltext = ''
|
||||
|
||||
const docs = await retriever.getRelevantDocuments(input)
|
||||
const docs = await retriever.getRelevantDocuments(query ? query : input)
|
||||
|
||||
for (const doc of docs) finaltext += `${doc.pageContent}\n`
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import { Document } from 'langchain/document'
|
|||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData } from '../../../src/utils'
|
||||
import { AstraDBVectorStore, AstraLibArgs } from '@langchain/community/vectorstores/astradb'
|
||||
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
|
||||
|
||||
class Astra_VectorStores implements INode {
|
||||
label: string
|
||||
|
|
@ -26,7 +27,7 @@ class Astra_VectorStores implements INode {
|
|||
this.type = 'Astra'
|
||||
this.icon = 'astra.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = `Upsert embedded data and perform similarity search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads`
|
||||
this.description = `Upsert embedded data and perform similarity or mmr search upon query using DataStax Astra DB, a serverless vector database that’s perfect for managing mission-critical AI workloads`
|
||||
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
|
||||
this.badge = 'NEW'
|
||||
this.credential = {
|
||||
|
|
@ -74,6 +75,7 @@ class Astra_VectorStores implements INode {
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
addMMRInputParams(this.inputs)
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Astra Retriever',
|
||||
|
|
@ -139,9 +141,6 @@ class Astra_VectorStores implements INode {
|
|||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const vectorDimension = nodeData.inputs?.vectorDimension as number
|
||||
const similarityMetric = nodeData.inputs?.similarityMetric as 'cosine' | 'euclidean' | 'dot_product' | undefined
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
|
||||
|
|
@ -176,14 +175,7 @@ class Astra_VectorStores implements INode {
|
|||
|
||||
const vectorStore = await AstraDBVectorStore.fromExistingIndex(embeddings, astraConfig)
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = vectorStore.asRetriever(k)
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
|
|||
import { Document } from 'langchain/document'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
|
||||
|
||||
class MongoDBAtlas_VectorStores implements INode {
|
||||
label: string
|
||||
|
|
@ -24,7 +25,7 @@ class MongoDBAtlas_VectorStores implements INode {
|
|||
this.label = 'MongoDB Atlas'
|
||||
this.name = 'mongoDBAtlas'
|
||||
this.version = 1.0
|
||||
this.description = `Upsert embedded data and perform similarity search upon query using MongoDB Atlas, a managed cloud mongodb database`
|
||||
this.description = `Upsert embedded data and perform similarity or mmr search upon query using MongoDB Atlas, a managed cloud mongodb database`
|
||||
this.type = 'MongoDB Atlas'
|
||||
this.icon = 'mongodb.svg'
|
||||
this.category = 'Vector Stores'
|
||||
|
|
@ -95,6 +96,7 @@ class MongoDBAtlas_VectorStores implements INode {
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
addMMRInputParams(this.inputs)
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'MongoDB Retriever',
|
||||
|
|
@ -162,9 +164,6 @@ class MongoDBAtlas_VectorStores implements INode {
|
|||
let textKey = nodeData.inputs?.textKey as string
|
||||
let embeddingKey = nodeData.inputs?.embeddingKey as string
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
const output = nodeData.outputs?.output as string
|
||||
|
||||
let mongoDBConnectUrl = getCredentialParam('mongoDBConnectUrl', credentialData, nodeData)
|
||||
|
||||
|
|
@ -181,13 +180,7 @@ class MongoDBAtlas_VectorStores implements INode {
|
|||
embeddingKey
|
||||
})
|
||||
|
||||
if (output === 'retriever') {
|
||||
return vectorStore.asRetriever(k)
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
|
|||
import { Document } from 'langchain/document'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
|
||||
|
||||
class Pinecone_VectorStores implements INode {
|
||||
label: string
|
||||
|
|
@ -23,11 +24,11 @@ class Pinecone_VectorStores implements INode {
|
|||
constructor() {
|
||||
this.label = 'Pinecone'
|
||||
this.name = 'pinecone'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Pinecone'
|
||||
this.icon = 'pinecone.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = `Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database`
|
||||
this.description = `Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database`
|
||||
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
|
||||
this.badge = 'NEW'
|
||||
this.credential = {
|
||||
|
|
@ -79,6 +80,7 @@ class Pinecone_VectorStores implements INode {
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
addMMRInputParams(this.inputs)
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Pinecone Retriever',
|
||||
|
|
@ -140,9 +142,6 @@ class Pinecone_VectorStores implements INode {
|
|||
const pineconeMetadataFilter = nodeData.inputs?.pineconeMetadataFilter
|
||||
const docs = nodeData.inputs?.document as Document[]
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const pineconeApiKey = getCredentialParam('pineconeApiKey', credentialData, nodeData)
|
||||
|
|
@ -175,14 +174,7 @@ class Pinecone_VectorStores implements INode {
|
|||
|
||||
const vectorStore = await PineconeStore.fromExistingIndex(embeddings, obj)
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = vectorStore.asRetriever(k)
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
|
|||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { SupabaseLibArgs, SupabaseVectorStore } from 'langchain/vectorstores/supabase'
|
||||
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
|
||||
|
||||
class Supabase_VectorStores implements INode {
|
||||
label: string
|
||||
|
|
@ -23,11 +24,11 @@ class Supabase_VectorStores implements INode {
|
|||
constructor() {
|
||||
this.label = 'Supabase'
|
||||
this.name = 'supabase'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Supabase'
|
||||
this.icon = 'supabase.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description = 'Upsert embedded data and perform similarity search upon query using Supabase via pgvector extension'
|
||||
this.description = 'Upsert embedded data and perform similarity or mmr search upon query using Supabase via pgvector extension'
|
||||
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
|
||||
this.badge = 'NEW'
|
||||
this.credential = {
|
||||
|
|
@ -81,6 +82,7 @@ class Supabase_VectorStores implements INode {
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
addMMRInputParams(this.inputs)
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Supabase Retriever',
|
||||
|
|
@ -135,9 +137,6 @@ class Supabase_VectorStores implements INode {
|
|||
const queryName = nodeData.inputs?.queryName as string
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const supabaseMetadataFilter = nodeData.inputs?.supabaseMetadataFilter
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const supabaseApiKey = getCredentialParam('supabaseApiKey', credentialData, nodeData)
|
||||
|
|
@ -157,14 +156,7 @@ class Supabase_VectorStores implements INode {
|
|||
|
||||
const vectorStore = await SupabaseVectorStore.fromExistingIndex(embeddings, obj)
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = vectorStore.asRetriever(k)
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,75 @@
|
|||
import { INodeData } from '../../src'
|
||||
|
||||
export const resolveVectorStoreOrRetriever = (nodeData: INodeData, vectorStore: any) => {
|
||||
const output = nodeData.outputs?.output as string
|
||||
const searchType = nodeData.outputs?.searchType as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
if (output === 'retriever') {
|
||||
if ('mmr' === searchType) {
|
||||
const fetchK = nodeData.inputs?.fetchK as string
|
||||
const lambda = nodeData.inputs?.lambda as string
|
||||
const f = fetchK ? parseInt(fetchK) : 20
|
||||
const l = lambda ? parseFloat(lambda) : 0.5
|
||||
return vectorStore.asRetriever({
|
||||
searchType: 'mmr',
|
||||
k: k,
|
||||
searchKwargs: {
|
||||
fetchK: f,
|
||||
lambda: l
|
||||
}
|
||||
})
|
||||
} else {
|
||||
// "searchType" is "similarity"
|
||||
return vectorStore.asRetriever(k)
|
||||
}
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
}
|
||||
|
||||
export const addMMRInputParams = (inputs: any[]) => {
|
||||
const mmrInputParams = [
|
||||
{
|
||||
label: 'Search Type',
|
||||
name: 'searchType',
|
||||
type: 'options',
|
||||
default: 'similarity',
|
||||
options: [
|
||||
{
|
||||
label: 'Similarity',
|
||||
name: 'similarity'
|
||||
},
|
||||
{
|
||||
label: 'Max Marginal Relevance',
|
||||
name: 'mmr'
|
||||
}
|
||||
],
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Fetch K (for MMR Search)',
|
||||
name: 'fetchK',
|
||||
description: 'Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR',
|
||||
placeholder: '20',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Lambda (for MMR Search)',
|
||||
name: 'lambda',
|
||||
description:
|
||||
'Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR',
|
||||
placeholder: '0.5',
|
||||
type: 'number',
|
||||
additionalParams: true,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
|
||||
inputs.push(...mmrInputParams)
|
||||
}
|
||||
|
|
@ -5,6 +5,7 @@ import { Document } from 'langchain/document'
|
|||
import { Embeddings } from 'langchain/embeddings/base'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
|
||||
|
||||
class Weaviate_VectorStores implements INode {
|
||||
label: string
|
||||
|
|
@ -23,12 +24,12 @@ class Weaviate_VectorStores implements INode {
|
|||
constructor() {
|
||||
this.label = 'Weaviate'
|
||||
this.name = 'weaviate'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Weaviate'
|
||||
this.icon = 'weaviate.png'
|
||||
this.category = 'Vector Stores'
|
||||
this.description =
|
||||
'Upsert embedded data and perform similarity search upon query using Weaviate, a scalable open-source vector database'
|
||||
'Upsert embedded data and perform similarity or mmr search using Weaviate, a scalable open-source vector database'
|
||||
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
|
||||
this.badge = 'NEW'
|
||||
this.credential = {
|
||||
|
|
@ -107,6 +108,7 @@ class Weaviate_VectorStores implements INode {
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
addMMRInputParams(this.inputs)
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Weaviate Retriever',
|
||||
|
|
@ -174,9 +176,6 @@ class Weaviate_VectorStores implements INode {
|
|||
const weaviateTextKey = nodeData.inputs?.weaviateTextKey as string
|
||||
const weaviateMetadataKeys = nodeData.inputs?.weaviateMetadataKeys as string
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const weaviateApiKey = getCredentialParam('weaviateApiKey', credentialData, nodeData)
|
||||
|
|
@ -199,14 +198,7 @@ class Weaviate_VectorStores implements INode {
|
|||
|
||||
const vectorStore = await WeaviateStore.fromExistingIndex(embeddings, obj)
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = vectorStore.asRetriever(k)
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import { Embeddings } from 'langchain/embeddings/base'
|
|||
import { Document } from 'langchain/document'
|
||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
|
||||
|
||||
class Zep_VectorStores implements INode {
|
||||
label: string
|
||||
|
|
@ -23,12 +24,12 @@ class Zep_VectorStores implements INode {
|
|||
constructor() {
|
||||
this.label = 'Zep'
|
||||
this.name = 'zep'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'Zep'
|
||||
this.icon = 'zep.svg'
|
||||
this.category = 'Vector Stores'
|
||||
this.description =
|
||||
'Upsert embedded data and perform similarity search upon query using Zep, a fast and scalable building block for LLM apps'
|
||||
'Upsert embedded data and perform similarity or mmr search upon query using Zep, a fast and scalable building block for LLM apps'
|
||||
this.baseClasses = [this.type, 'VectorStoreRetriever', 'BaseRetriever']
|
||||
this.badge = 'NEW'
|
||||
this.credential = {
|
||||
|
|
@ -88,6 +89,7 @@ class Zep_VectorStores implements INode {
|
|||
optional: true
|
||||
}
|
||||
]
|
||||
addMMRInputParams(this.inputs)
|
||||
this.outputs = [
|
||||
{
|
||||
label: 'Zep Retriever',
|
||||
|
|
@ -144,9 +146,6 @@ class Zep_VectorStores implements INode {
|
|||
const zepMetadataFilter = nodeData.inputs?.zepMetadataFilter
|
||||
const dimension = nodeData.inputs?.dimension as number
|
||||
const embeddings = nodeData.inputs?.embeddings as Embeddings
|
||||
const output = nodeData.outputs?.output as string
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
const k = topK ? parseFloat(topK) : 4
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const apiKey = getCredentialParam('apiKey', credentialData, nodeData)
|
||||
|
|
@ -165,14 +164,7 @@ class Zep_VectorStores implements INode {
|
|||
|
||||
const vectorStore = await ZepExistingVS.fromExistingIndex(embeddings, zepConfig)
|
||||
|
||||
if (output === 'retriever') {
|
||||
const retriever = vectorStore.asRetriever(k)
|
||||
return retriever
|
||||
} else if (output === 'vectorStore') {
|
||||
;(vectorStore as any).k = k
|
||||
return vectorStore
|
||||
}
|
||||
return vectorStore
|
||||
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -210,7 +202,7 @@ class ZepExistingVS extends ZepVectorStore {
|
|||
this.args = args
|
||||
}
|
||||
|
||||
async initalizeCollection(args: IZepConfig & Partial<ZepFilter>) {
|
||||
async initializeCollection(args: IZepConfig & Partial<ZepFilter>) {
|
||||
this.client = await ZepClient.init(args.apiUrl, args.apiKey)
|
||||
try {
|
||||
this.collection = await this.client.document.getCollection(args.collectionName)
|
||||
|
|
@ -259,7 +251,7 @@ class ZepExistingVS extends ZepVectorStore {
|
|||
const newfilter = {
|
||||
where: { and: ANDFilters }
|
||||
}
|
||||
await this.initalizeCollection(this.args!).catch((err) => {
|
||||
await this.initializeCollection(this.args!).catch((err) => {
|
||||
console.error('Error initializing collection:', err)
|
||||
throw err
|
||||
})
|
||||
|
|
|
|||
|
|
@ -511,7 +511,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -552,6 +552,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -576,7 +615,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -166,7 +166,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -207,6 +207,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -231,7 +270,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -301,7 +301,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -342,6 +342,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -366,7 +405,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -541,7 +541,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -582,6 +582,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -606,7 +645,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -625,7 +625,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -666,6 +666,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -690,7 +729,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "{\"id\":{\"$in\":[\"doc1\",\"doc2\"]}}",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -560,7 +560,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -601,6 +601,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -625,7 +664,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
@ -840,6 +882,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "supabase_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -865,7 +946,10 @@
|
|||
"tableName": "",
|
||||
"queryName": "",
|
||||
"supabaseMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -643,7 +643,7 @@
|
|||
"type": "Pinecone",
|
||||
"baseClasses": ["Pinecone", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Upsert embedded data and perform similarity search upon query using Pinecone, a leading fully managed hosted vector database",
|
||||
"description": "Upsert embedded data and perform similarity or mmr search using Pinecone, a leading fully managed hosted vector database",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
|
|
@ -684,6 +684,45 @@
|
|||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-topK-number"
|
||||
},
|
||||
{
|
||||
"label": "Search Type",
|
||||
"name": "searchType",
|
||||
"type": "options",
|
||||
"default": "similarity",
|
||||
"options": [
|
||||
{
|
||||
"label": "Similarity",
|
||||
"name": "similarity"
|
||||
},
|
||||
{
|
||||
"label": "Max Marginal Relevance",
|
||||
"name": "mmr"
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-searchType-options"
|
||||
},
|
||||
{
|
||||
"label": "Fetch K (for MMR Search)",
|
||||
"name": "fetchK",
|
||||
"description": "Number of initial documents to fetch for MMR reranking. Default to 20. Used only when the search type is MMR",
|
||||
"placeholder": "20",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-fetchK-number"
|
||||
},
|
||||
{
|
||||
"label": "Lambda (for MMR Search)",
|
||||
"name": "lambda",
|
||||
"description": "Number between 0 and 1 that determines the degree of diversity among the results, where 0 corresponds to maximum diversity and 1 to minimum diversity. Used only when the search type is MMR",
|
||||
"placeholder": "0.5",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "pinecone_0-input-lambda-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
|
|
@ -708,7 +747,10 @@
|
|||
"pineconeIndex": "",
|
||||
"pineconeNamespace": "",
|
||||
"pineconeMetadataFilter": "",
|
||||
"topK": ""
|
||||
"topK": "",
|
||||
"searchType": "similarity",
|
||||
"fetchK": "",
|
||||
"lambda": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue