Merge branch 'main' into add_mmr_vectara

This commit is contained in:
Ofer Mendelevitch 2024-01-05 11:14:22 -08:00
commit dd07c3d778
16 changed files with 106 additions and 53 deletions

View File

@ -2,22 +2,6 @@
Version 2.0, January 2004
http://www.apache.org/licenses/
Flowise is governed by the Apache License 2.0, with additional terms and conditions outlined below:
Flowise can be used for commercial purposes for "backend-as-a-service" for your applications or as a development platform for enterprises. However, under specific conditions, you must reach out to the project's administrators to secure a commercial license:
a. Multi-tenant SaaS service: Unless you have explicit written authorization from Flowise, you may not utilize the Flowise source code to operate a multi-tenant SaaS service that closely resembles the Flowise cloud-based services.
b. Logo and copyright information: While using Flowise in commercial application, you are prohibited from removing or altering the LOGO or copyright information displayed in the Flowise console and UI.
For inquiries regarding licensing matters, please contact hello@flowiseai.com via email.
Contributors are required to consent to the following terms related to their contributed code:
a. The project maintainers have the authority to modify the open-source agreement to be more stringent or lenient.
b. Contributed code can be used for commercial purposes, including Flowise's cloud-based services.
All other rights and restrictions are in accordance with the Apache License 2.0.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.

View File

@ -1,6 +1,6 @@
{
"name": "flowise",
"version": "1.4.8",
"version": "1.4.9",
"private": true,
"homepage": "https://flowiseai.com",
"workspaces": [

View File

@ -1,7 +1,6 @@
import { OpenAIBaseInput } from 'langchain/dist/types/openai-types'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { AzureOpenAIInput, ChatOpenAI } from 'langchain/chat_models/openai'
import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
import { BaseCache } from 'langchain/schema'
import { BaseLLMParams } from 'langchain/llms/base'
@ -123,7 +122,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIBaseInput> = {
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
temperature: parseFloat(temperature),
modelName,
azureOpenAIApiKey,

View File

@ -124,13 +124,13 @@ class ChatMistral_ChatModels implements INode {
const safeMode = nodeData.inputs?.safeMode as boolean
const randomSeed = nodeData.inputs?.safeMode as string
const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string
// Waiting fix from langchain + mistral to enable streaming - https://github.com/mistralai/client-js/issues/18
const streaming = nodeData.inputs?.streaming as boolean
const cache = nodeData.inputs?.cache as BaseCache
const obj: ChatMistralAIInput = {
apiKey: apiKey,
modelName: modelName
modelName: modelName,
streaming: streaming ?? true
}
if (maxOutputTokens) obj.maxTokens = parseInt(maxOutputTokens, 10)

View File

@ -1,8 +1,7 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ChatOllama } from 'langchain/chat_models/ollama'
import { ChatOllama, ChatOllamaInput } from 'langchain/chat_models/ollama'
import { BaseCache } from 'langchain/schema'
import { OllamaInput } from 'langchain/dist/util/ollama'
import { BaseLLMParams } from 'langchain/llms/base'
class ChatOllama_ChatModels implements INode {
@ -209,7 +208,7 @@ class ChatOllama_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const obj: OllamaInput & BaseLLMParams = {
const obj: ChatOllamaInput & BaseLLMParams = {
baseUrl,
temperature: parseFloat(temperature),
model: modelName

View File

@ -20,7 +20,7 @@ class Airtable_DocumentLoaders implements INode {
constructor() {
this.label = 'Airtable'
this.name = 'airtable'
this.version = 1.0
this.version = 2.0
this.type = 'Document'
this.icon = 'airtable.svg'
this.category = 'Document Loaders'
@ -55,6 +55,15 @@ class Airtable_DocumentLoaders implements INode {
description:
'If your table URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, tblJdmvbrgizbYICO is the table id'
},
{
label: 'View Id',
name: 'viewId',
type: 'string',
placeholder: 'viw9UrP77Id0CE4ee',
description:
'If your view URL looks like: https://airtable.com/app11RobdGoX0YNsC/tblJdmvbrgizbYICO/viw9UrP77Id0CE4ee, viw9UrP77Id0CE4ee is the view id',
optional: true
},
{
label: 'Return All',
name: 'returnAll',
@ -83,6 +92,7 @@ class Airtable_DocumentLoaders implements INode {
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const baseId = nodeData.inputs?.baseId as string
const tableId = nodeData.inputs?.tableId as string
const viewId = nodeData.inputs?.viewId as string
const returnAll = nodeData.inputs?.returnAll as boolean
const limit = nodeData.inputs?.limit as string
const textSplitter = nodeData.inputs?.textSplitter as TextSplitter
@ -94,6 +104,7 @@ class Airtable_DocumentLoaders implements INode {
const airtableOptions: AirtableLoaderParams = {
baseId,
tableId,
viewId,
returnAll,
accessToken,
limit: limit ? parseInt(limit, 10) : 100
@ -133,6 +144,7 @@ interface AirtableLoaderParams {
baseId: string
tableId: string
accessToken: string
viewId?: string
limit?: number
returnAll?: boolean
}
@ -153,16 +165,19 @@ class AirtableLoader extends BaseDocumentLoader {
public readonly tableId: string
public readonly viewId?: string
public readonly accessToken: string
public readonly limit: number
public readonly returnAll: boolean
constructor({ baseId, tableId, accessToken, limit = 100, returnAll = false }: AirtableLoaderParams) {
constructor({ baseId, tableId, viewId, accessToken, limit = 100, returnAll = false }: AirtableLoaderParams) {
super()
this.baseId = baseId
this.tableId = tableId
this.viewId = viewId
this.accessToken = accessToken
this.limit = limit
this.returnAll = returnAll
@ -203,7 +218,7 @@ class AirtableLoader extends BaseDocumentLoader {
}
private async loadLimit(): Promise<Document[]> {
const params = { maxRecords: this.limit }
const params = { maxRecords: this.limit, view: this.viewId }
const data = await this.fetchAirtableData(`https://api.airtable.com/v0/${this.baseId}/${this.tableId}`, params)
if (data.records.length === 0) {
return []
@ -212,7 +227,7 @@ class AirtableLoader extends BaseDocumentLoader {
}
private async loadAll(): Promise<Document[]> {
const params: ICommonObject = { pageSize: 100 }
const params: ICommonObject = { pageSize: 100, view: this.viewId }
let data: AirtableLoaderResponse
let returnPages: AirtableLoaderPage[] = []

View File

@ -1,7 +1,7 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { OllamaInput } from 'langchain/llms/ollama'
import { OllamaEmbeddings } from 'langchain/embeddings/ollama'
import { OllamaInput } from 'langchain/dist/util/ollama'
class OllamaEmbedding_Embeddings implements INode {
label: string

View File

@ -1,8 +1,7 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { Ollama } from 'langchain/llms/ollama'
import { Ollama, OllamaInput } from 'langchain/llms/ollama'
import { BaseCache } from 'langchain/schema'
import { OllamaInput } from 'langchain/dist/util/ollama'
import { BaseLLMParams } from 'langchain/llms/base'
class Ollama_LLMs implements INode {

View File

@ -24,7 +24,7 @@ class Postgres_VectorStores implements INode {
constructor() {
this.label = 'Postgres'
this.name = 'postgres'
this.version = 1.0
this.version = 2.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
@ -60,6 +60,13 @@ class Postgres_VectorStores implements INode {
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
@ -117,6 +124,7 @@ class Postgres_VectorStores implements INode {
const docs = nodeData.inputs?.document as Document[]
const embeddings = nodeData.inputs?.embeddings as Embeddings
const additionalConfig = nodeData.inputs?.additionalConfig as string
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
@ -134,7 +142,8 @@ class Postgres_VectorStores implements INode {
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string
database: nodeData.inputs?.database as string,
ssl: sslConnection
}
const args = {

View File

@ -23,7 +23,7 @@ class Postgres_Existing_VectorStores implements INode {
constructor() {
this.label = 'Postgres Load Existing Index'
this.name = 'postgresExistingIndex'
this.version = 1.0
this.version = 2.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
@ -52,6 +52,13 @@ class Postgres_Existing_VectorStores implements INode {
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
@ -109,6 +116,7 @@ class Postgres_Existing_VectorStores implements INode {
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
@ -126,7 +134,8 @@ class Postgres_Existing_VectorStores implements INode {
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string
database: nodeData.inputs?.database as string,
ssl: sslConnection
}
const args = {

View File

@ -24,7 +24,7 @@ class PostgresUpsert_VectorStores implements INode {
constructor() {
this.label = 'Postgres Upsert Document'
this.name = 'postgresUpsert'
this.version = 1.0
this.version = 2.0
this.type = 'Postgres'
this.icon = 'postgres.svg'
this.category = 'Vector Stores'
@ -59,6 +59,13 @@ class PostgresUpsert_VectorStores implements INode {
name: 'database',
type: 'string'
},
{
label: 'SSL Connection',
name: 'sslConnection',
type: 'boolean',
default: false,
optional: false
},
{
label: 'Port',
name: 'port',
@ -117,6 +124,7 @@ class PostgresUpsert_VectorStores implements INode {
const output = nodeData.outputs?.output as string
const topK = nodeData.inputs?.topK as string
const k = topK ? parseFloat(topK) : 4
const sslConnection = nodeData.inputs?.sslConnection as boolean
let additionalConfiguration = {}
if (additionalConfig) {
@ -134,7 +142,8 @@ class PostgresUpsert_VectorStores implements INode {
port: nodeData.inputs?.port as number,
username: user,
password: password,
database: nodeData.inputs?.database as string
database: nodeData.inputs?.database as string,
ssl: sslConnection
}
const args = {

View File

@ -149,9 +149,12 @@ class Qdrant_VectorStores implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData)
const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl)
const client = new QdrantClient({
url: qdrantServerUrl,
apiKey: qdrantApiKey
apiKey: qdrantApiKey,
port: port
})
const flattenDocs = docs && docs.length ? flatten(docs) : []
@ -198,9 +201,12 @@ class Qdrant_VectorStores implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const qdrantApiKey = getCredentialParam('qdrantApiKey', credentialData, nodeData)
const port = Qdrant_VectorStores.determinePortByUrl(qdrantServerUrl)
const client = new QdrantClient({
url: qdrantServerUrl,
apiKey: qdrantApiKey
apiKey: qdrantApiKey,
port: port
})
const dbConfig: QdrantLibArgs = {
@ -242,6 +248,28 @@ class Qdrant_VectorStores implements INode {
}
return vectorStore
}
/**
* Determine the port number from the given URL.
*
* The problem is when not doing this the qdrant-client.js will fall back on 6663 when you enter a port 443 and 80.
* See: https://stackoverflow.com/questions/59104197/nodejs-new-url-urlhttps-myurl-com80-lists-the-port-as-empty
* @param qdrantServerUrl the url to get the port from
*/
static determinePortByUrl(qdrantServerUrl: string): number {
const parsedUrl = new URL(qdrantServerUrl)
let port = parsedUrl.port ? parseInt(parsedUrl.port) : 6663
if (parsedUrl.protocol === 'https:' && parsedUrl.port === '') {
port = 443
}
if (parsedUrl.protocol === 'http:' && parsedUrl.port === '') {
port = 80
}
return port
}
}
module.exports = { nodeClass: Qdrant_VectorStores }

View File

@ -26,8 +26,8 @@
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1",
"@huggingface/inference": "^2.6.1",
"@langchain/google-genai": "^0.0.3",
"@langchain/mistralai": "^0.0.3",
"@langchain/google-genai": "^0.0.6",
"@langchain/mistralai": "^0.0.6",
"@notionhq/client": "^2.2.8",
"@opensearch-project/opensearch": "^1.2.0",
"@pinecone-database/pinecone": "^1.1.1",
@ -52,10 +52,10 @@
"html-to-text": "^9.0.5",
"husky": "^8.0.3",
"ioredis": "^5.3.2",
"langchain": "^0.0.213",
"langfuse": "^1.2.0",
"langfuse-langchain": "^1.0.31",
"langsmith": "^0.0.49",
"langchain": "^0.0.214",
"langfuse": "2.0.2",
"langfuse-langchain": "2.0.2",
"langsmith": "0.0.53",
"linkifyjs": "^4.1.1",
"llmonitor": "^0.5.5",
"mammoth": "^1.5.1",

View File

@ -538,7 +538,7 @@ export class AnalyticHandler {
if (trace) {
const generation = trace.generation({
name,
prompt: input
input: input
})
this.handlers['langFuse'].generation = { [generation.id]: generation }
returnIds['langFuse'].generation = generation.id
@ -583,7 +583,7 @@ export class AnalyticHandler {
const generation: LangfuseGenerationClient | undefined = this.handlers['langFuse'].generation[returnIds['langFuse'].generation]
if (generation) {
generation.end({
completion: output
output: output
})
}
}
@ -618,7 +618,7 @@ export class AnalyticHandler {
const generation: LangfuseGenerationClient | undefined = this.handlers['langFuse'].generation[returnIds['langFuse'].generation]
if (generation) {
generation.end({
completion: error
output: error
})
}
}

View File

@ -1,6 +1,6 @@
{
"name": "flowise",
"version": "1.4.8",
"version": "1.4.9",
"description": "Flowiseai Server",
"main": "dist/index",
"types": "dist/index.d.ts",

View File

@ -818,7 +818,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
*/
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
const streamAvailableLLMs = {
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'],
LLMs: ['azureOpenAI', 'openAI', 'ollama']
}
@ -875,7 +875,9 @@ export const getEncryptionKey = async (): Promise<string> => {
return await fs.promises.readFile(getEncryptionKeyPath(), 'utf8')
} catch (error) {
const encryptKey = generateEncryptKey()
const defaultLocation = path.join(getUserHome(), '.flowise', 'encryption.key')
const defaultLocation = process.env.SECRETKEY_PATH
? path.join(process.env.SECRETKEY_PATH, 'encryption.key')
: path.join(getUserHome(), '.flowise', 'encryption.key')
await fs.promises.writeFile(defaultLocation, encryptKey)
return encryptKey
}