Merge branch 'main' into maintenance/pnpm-vite-jsx
# Conflicts: # packages/components/package.json
This commit is contained in:
commit
98bdda52d7
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.4.2",
|
||||
"version": "1.4.3",
|
||||
"private": true,
|
||||
"homepage": "https://flowiseai.com",
|
||||
"workspaces": [
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ class LangfuseApi implements INodeCredential {
|
|||
this.name = 'langfuseApi'
|
||||
this.version = 1.0
|
||||
this.description =
|
||||
'Refer to <a target="_blank" href="https://langfuse.com/docs/get-started/">official guide</a> on how to get API key on Langfuse'
|
||||
'Refer to <a target="_blank" href="https://langfuse.com/docs/flowise">integration guide</a> on how to get API keys on Langfuse'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Secret Key',
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class ConversationalRetrievalAgent_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'Conversational Retrieval Agent'
|
||||
this.name = 'conversationalRetrievalAgent'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'agent.svg'
|
||||
|
|
@ -40,9 +40,9 @@ class ConversationalRetrievalAgent_Agents implements INode {
|
|||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI Chat Model',
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
type: 'ChatOpenAI'
|
||||
type: 'ChatOpenAI | AzureChatOpenAI'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
|
|
|
|||
|
|
@ -20,11 +20,11 @@ class OpenAIFunctionAgent_Agents implements INode {
|
|||
constructor() {
|
||||
this.label = 'OpenAI Function Agent'
|
||||
this.name = 'openAIFunctionAgent'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'openai.png'
|
||||
this.description = `An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call`
|
||||
this.description = `An agent that uses Function Calling to pick the tool and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
{
|
||||
|
|
@ -39,11 +39,9 @@ class OpenAIFunctionAgent_Agents implements INode {
|
|||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'OpenAI Chat Model',
|
||||
label: 'OpenAI/Azure Chat Model',
|
||||
name: 'model',
|
||||
description:
|
||||
'Only works with gpt-3.5-turbo-0613 and gpt-4-0613. Refer <a target="_blank" href="https://platform.openai.com/docs/guides/gpt/function-calling">docs</a> for more info',
|
||||
type: 'BaseChatModel'
|
||||
type: 'ChatOpenAI | AzureChatOpenAI'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { BaseOutputParser } from 'langchain/schema/output_parser'
|
||||
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
|
||||
|
|
@ -141,7 +141,7 @@ class LLMChain_Chains implements INode {
|
|||
|
||||
const runPrediction = async (
|
||||
inputVariables: string[],
|
||||
chain: LLMChain<string | object>,
|
||||
chain: LLMChain<string | object | BaseLanguageModel<any, BaseLanguageModelCallOptions>>,
|
||||
input: string,
|
||||
promptValuesRaw: ICommonObject | undefined,
|
||||
options: ICommonObject,
|
||||
|
|
@ -164,7 +164,7 @@ const runPrediction = async (
|
|||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the LLM chain
|
||||
input = await checkInputs(moderations, chain.llm, input)
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
streamResponse(isStreaming, e.message, socketIO, socketIOClientId)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'AWS Bedrock'
|
||||
this.name = 'awsChatBedrock'
|
||||
this.version = 2.0
|
||||
this.version = 3.0
|
||||
this.type = 'AWSChatBedrock'
|
||||
this.icon = 'awsBedrock.png'
|
||||
this.category = 'Chat Models'
|
||||
|
|
@ -97,7 +97,8 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
options: [
|
||||
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
|
||||
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }
|
||||
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
|
||||
{ label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' }
|
||||
],
|
||||
default: 'anthropic.claude-v2'
|
||||
},
|
||||
|
|
@ -128,12 +129,14 @@ class AWSChatBedrock_ChatModels implements INode {
|
|||
const iTemperature = nodeData.inputs?.temperature as string
|
||||
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
|
||||
const obj: BaseBedrockInput & BaseLLMParams = {
|
||||
region: iRegion,
|
||||
model: iModel,
|
||||
maxTokens: parseInt(iMax_tokens_to_sample, 10),
|
||||
temperature: parseFloat(iTemperature)
|
||||
temperature: parseFloat(iTemperature),
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class AWSBedrockEmbedding_Embeddings implements INode {
|
|||
constructor() {
|
||||
this.label = 'AWS Bedrock Embeddings'
|
||||
this.name = 'AWSBedrockEmbeddings'
|
||||
this.version = 1.0
|
||||
this.version = 2.0
|
||||
this.type = 'AWSBedrockEmbeddings'
|
||||
this.icon = 'awsBedrock.png'
|
||||
this.category = 'Embeddings'
|
||||
|
|
@ -81,7 +81,9 @@ class AWSBedrockEmbedding_Embeddings implements INode {
|
|||
type: 'options',
|
||||
options: [
|
||||
{ label: 'amazon.titan-embed-text-v1', name: 'amazon.titan-embed-text-v1' },
|
||||
{ label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' }
|
||||
{ label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' },
|
||||
{ label: 'cohere.embed-english-v3', name: 'cohere.embed-english-v3' },
|
||||
{ label: 'cohere.embed-multilingual-v3', name: 'cohere.embed-multilingual-v3' }
|
||||
],
|
||||
default: 'amazon.titan-embed-text-v1'
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class AWSBedrock_LLMs implements INode {
|
|||
constructor() {
|
||||
this.label = 'AWS Bedrock'
|
||||
this.name = 'awsBedrock'
|
||||
this.version = 1.2
|
||||
this.version = 2.0
|
||||
this.type = 'AWSBedrock'
|
||||
this.icon = 'awsBedrock.png'
|
||||
this.category = 'LLMs'
|
||||
|
|
@ -98,6 +98,7 @@ class AWSBedrock_LLMs implements INode {
|
|||
{ label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
|
||||
{ label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
|
||||
{ label: 'cohere.command-text-v14', name: 'cohere.command-text-v14' },
|
||||
{ label: 'cohere.command-light-text-v14', name: 'cohere.command-light-text-v14' },
|
||||
{ label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
|
||||
{ label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
|
||||
{ label: 'ai21.j2-mid', name: 'ai21.j2-mid' },
|
||||
|
|
|
|||
|
|
@ -1,13 +1,12 @@
|
|||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { Server } from 'socket.io'
|
||||
|
||||
export abstract class Moderation {
|
||||
abstract checkForViolations(llm: BaseLanguageModel, input: string): Promise<string>
|
||||
abstract checkForViolations(input: string): Promise<string>
|
||||
}
|
||||
|
||||
export const checkInputs = async (inputModerations: Moderation[], llm: BaseLanguageModel, input: string): Promise<string> => {
|
||||
export const checkInputs = async (inputModerations: Moderation[], input: string): Promise<string> => {
|
||||
for (const moderation of inputModerations) {
|
||||
input = await moderation.checkForViolations(llm, input)
|
||||
input = await moderation.checkForViolations(input)
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src'
|
||||
import { Moderation } from '../Moderation'
|
||||
import { OpenAIModerationRunner } from './OpenAIModerationRunner'
|
||||
|
||||
|
|
@ -12,6 +12,7 @@ class OpenAIModeration implements INode {
|
|||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
credential: INodeParams
|
||||
inputs: INodeParams[]
|
||||
|
||||
constructor() {
|
||||
|
|
@ -23,6 +24,12 @@ class OpenAIModeration implements INode {
|
|||
this.category = 'Moderation'
|
||||
this.description = 'Check whether content complies with OpenAI usage policies.'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(Moderation)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
type: 'credential',
|
||||
credentialNames: ['openAIApi']
|
||||
}
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Error Message',
|
||||
|
|
@ -35,8 +42,11 @@ class OpenAIModeration implements INode {
|
|||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const runner = new OpenAIModerationRunner()
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
|
||||
|
||||
const runner = new OpenAIModerationRunner(openAIApiKey)
|
||||
const moderationErrorMessage = nodeData.inputs?.moderationErrorMessage as string
|
||||
if (moderationErrorMessage) runner.setErrorMessage(moderationErrorMessage)
|
||||
return runner
|
||||
|
|
|
|||
|
|
@ -1,18 +1,21 @@
|
|||
import { Moderation } from '../Moderation'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { OpenAIModerationChain } from 'langchain/chains'
|
||||
|
||||
export class OpenAIModerationRunner implements Moderation {
|
||||
private openAIApiKey = ''
|
||||
private moderationErrorMessage: string = "Text was found that violates OpenAI's content policy."
|
||||
|
||||
async checkForViolations(llm: BaseLanguageModel, input: string): Promise<string> {
|
||||
const openAIApiKey = (llm as any).openAIApiKey
|
||||
if (!openAIApiKey) {
|
||||
constructor(openAIApiKey: string) {
|
||||
this.openAIApiKey = openAIApiKey
|
||||
}
|
||||
|
||||
async checkForViolations(input: string): Promise<string> {
|
||||
if (!this.openAIApiKey) {
|
||||
throw Error('OpenAI API key not found')
|
||||
}
|
||||
// Create a new instance of the OpenAIModerationChain
|
||||
const moderation = new OpenAIModerationChain({
|
||||
openAIApiKey: openAIApiKey,
|
||||
openAIApiKey: this.openAIApiKey,
|
||||
throwError: false // If set to true, the call will throw an error when the moderation chain detects violating content. If set to false, violating content will return "Text was found that violates OpenAI's content policy.".
|
||||
})
|
||||
// Send the user's input to the moderation chain and wait for the result
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
import { Moderation } from '../Moderation'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
|
||||
export class SimplePromptModerationRunner implements Moderation {
|
||||
private readonly denyList: string = ''
|
||||
|
|
@ -13,7 +12,7 @@ export class SimplePromptModerationRunner implements Moderation {
|
|||
this.moderationErrorMessage = moderationErrorMessage
|
||||
}
|
||||
|
||||
async checkForViolations(_: BaseLanguageModel, input: string): Promise<string> {
|
||||
async checkForViolations(input: string): Promise<string> {
|
||||
this.denyList.split('\n').forEach((denyListItem) => {
|
||||
if (denyListItem && denyListItem !== '' && input.includes(denyListItem)) {
|
||||
throw Error(this.moderationErrorMessage)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { BaseOutputParser } from 'langchain/schema/output_parser'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
|
||||
import { ICommonObject } from '../../src'
|
||||
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
|
||||
|
||||
|
|
@ -15,7 +15,7 @@ export const formatResponse = (response: string | object): string | object => {
|
|||
|
||||
export const injectOutputParser = (
|
||||
outputParser: BaseOutputParser<unknown>,
|
||||
chain: LLMChain<string, BaseLanguageModel>,
|
||||
chain: LLMChain<string | object | BaseLanguageModel<any, BaseLanguageModelCallOptions>>,
|
||||
promptValues: ICommonObject | undefined = undefined
|
||||
) => {
|
||||
if (outputParser && chain.prompt) {
|
||||
|
|
|
|||
|
|
@ -31,7 +31,8 @@ class InMemoryVectorStore_VectorStores implements INode {
|
|||
label: 'Document',
|
||||
name: 'document',
|
||||
type: 'Document',
|
||||
list: true
|
||||
list: true,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Embeddings',
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-components",
|
||||
"version": "1.4.2",
|
||||
"version": "1.4.3",
|
||||
"description": "Flowiseai Components",
|
||||
"main": "dist/src/index",
|
||||
"types": "dist/src/index.d.ts",
|
||||
|
|
@ -26,7 +26,8 @@
|
|||
"@dqbd/tiktoken": "^1.0.7",
|
||||
"@elastic/elasticsearch": "^8.9.0",
|
||||
"@getzep/zep-js": "^0.6.3",
|
||||
"@gomomento/sdk": "^1.40.2",
|
||||
"@gomomento/sdk": "^1.51.1",
|
||||
"@gomomento/sdk-core": "^1.51.1",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@notionhq/client": "^2.2.8",
|
||||
|
|
@ -55,7 +56,7 @@
|
|||
"html-to-text": "^9.0.5",
|
||||
"ioredis": "^5.3.2",
|
||||
"jsdom": "^22.1.0",
|
||||
"langchain": "^0.0.165",
|
||||
"langchain": "^0.0.196",
|
||||
"langfuse-langchain": "^1.0.31",
|
||||
"langsmith": "^0.0.32",
|
||||
"linkifyjs": "^4.1.1",
|
||||
|
|
|
|||
|
|
@ -334,7 +334,7 @@
|
|||
"id": "openAIFunctionAgent_0",
|
||||
"label": "OpenAI Function Agent",
|
||||
"name": "openAIFunctionAgent",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
|
|
@ -365,11 +365,10 @@
|
|||
"id": "openAIFunctionAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "OpenAI Chat Model",
|
||||
"label": "OpenAI/Azure Chat Model",
|
||||
"name": "model",
|
||||
"description": "Only works with gpt-3.5-turbo-0613 and gpt-4-0613. Refer <a target=\"_blank\" href=\"https://platform.openai.com/docs/guides/gpt/function-calling\">docs</a> for more info",
|
||||
"type": "BaseChatModel",
|
||||
"id": "openAIFunctionAgent_0-input-model-BaseChatModel"
|
||||
"type": "ChatOpenAI | AzureChatOpenAI",
|
||||
"id": "openAIFunctionAgent_0-input-model-ChatOpenAI | AzureChatOpenAI"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
|
|
|
|||
|
|
@ -98,7 +98,7 @@
|
|||
"data": {
|
||||
"id": "conversationalRetrievalAgent_0",
|
||||
"label": "Conversational Retrieval Agent",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"name": "conversationalRetrievalAgent",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
|
|
@ -130,10 +130,10 @@
|
|||
"id": "conversationalRetrievalAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "OpenAI Chat Model",
|
||||
"label": "OpenAI/Azure Chat Model",
|
||||
"name": "model",
|
||||
"type": "ChatOpenAI",
|
||||
"id": "conversationalRetrievalAgent_0-input-model-ChatOpenAI"
|
||||
"type": "ChatOpenAI | AzureChatOpenAI",
|
||||
"id": "conversationalRetrievalAgent_0-input-model-ChatOpenAI | AzureChatOpenAI"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
|
|
|
|||
|
|
@ -206,7 +206,7 @@
|
|||
"id": "openAIFunctionAgent_0",
|
||||
"label": "OpenAI Function Agent",
|
||||
"name": "openAIFunctionAgent",
|
||||
"version": 1,
|
||||
"version": 2,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
|
|
@ -237,11 +237,10 @@
|
|||
"id": "openAIFunctionAgent_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "OpenAI Chat Model",
|
||||
"label": "OpenAI/Azure Chat Model",
|
||||
"name": "model",
|
||||
"description": "Only works with gpt-3.5-turbo-0613 and gpt-4-0613. Refer <a target=\"_blank\" href=\"https://platform.openai.com/docs/guides/gpt/function-calling\">docs</a> for more info",
|
||||
"type": "BaseChatModel",
|
||||
"id": "openAIFunctionAgent_0-input-model-BaseChatModel"
|
||||
"type": "ChatOpenAI | AzureChatOpenAI",
|
||||
"id": "openAIFunctionAgent_0-input-model-ChatOpenAI | AzureChatOpenAI"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise",
|
||||
"version": "1.4.2",
|
||||
"version": "1.4.3",
|
||||
"description": "Flowiseai Server",
|
||||
"main": "dist/index",
|
||||
"types": "dist/index.d.ts",
|
||||
|
|
|
|||
|
|
@ -844,7 +844,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
|
|||
*/
|
||||
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
|
||||
const streamAvailableLLMs = {
|
||||
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama'],
|
||||
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
|
||||
LLMs: ['azureOpenAI', 'openAI', 'ollama']
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "flowise-ui",
|
||||
"version": "1.4.0",
|
||||
"version": "1.4.1",
|
||||
"license": "SEE LICENSE IN LICENSE.md",
|
||||
"homepage": "https://flowiseai.com",
|
||||
"author": {
|
||||
|
|
|
|||
|
|
@ -68,10 +68,14 @@ const AddNodes = ({ nodesData, node }) => {
|
|||
else newNodes.push(vsNode)
|
||||
}
|
||||
delete obj['Vector Stores']
|
||||
if (deprecatingNodes.length) {
|
||||
obj['Vector Stores;DEPRECATING'] = deprecatingNodes
|
||||
accordianCategories['Vector Stores;DEPRECATING'] = isFilter ? true : false
|
||||
}
|
||||
if (newNodes.length) {
|
||||
obj['Vector Stores;NEW'] = newNodes
|
||||
accordianCategories['Vector Stores;NEW'] = isFilter ? true : false
|
||||
}
|
||||
setNodes(obj)
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue