Merge branch 'main' into maintenance/pnpm-vite-jsx

# Conflicts:
#	packages/components/package.json
This commit is contained in:
Henry 2023-11-24 17:33:01 +00:00
commit 98bdda52d7
22 changed files with 81 additions and 62 deletions

View File

@ -1,6 +1,6 @@
{ {
"name": "flowise", "name": "flowise",
"version": "1.4.2", "version": "1.4.3",
"private": true, "private": true,
"homepage": "https://flowiseai.com", "homepage": "https://flowiseai.com",
"workspaces": [ "workspaces": [

View File

@ -12,7 +12,7 @@ class LangfuseApi implements INodeCredential {
this.name = 'langfuseApi' this.name = 'langfuseApi'
this.version = 1.0 this.version = 1.0
this.description = this.description =
'Refer to <a target="_blank" href="https://langfuse.com/docs/get-started/">official guide</a> on how to get API key on Langfuse' 'Refer to <a target="_blank" href="https://langfuse.com/docs/flowise">integration guide</a> on how to get API keys on Langfuse'
this.inputs = [ this.inputs = [
{ {
label: 'Secret Key', label: 'Secret Key',

View File

@ -21,7 +21,7 @@ class ConversationalRetrievalAgent_Agents implements INode {
constructor() { constructor() {
this.label = 'Conversational Retrieval Agent' this.label = 'Conversational Retrieval Agent'
this.name = 'conversationalRetrievalAgent' this.name = 'conversationalRetrievalAgent'
this.version = 1.0 this.version = 2.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'agent.svg' this.icon = 'agent.svg'
@ -40,9 +40,9 @@ class ConversationalRetrievalAgent_Agents implements INode {
type: 'BaseChatMemory' type: 'BaseChatMemory'
}, },
{ {
label: 'OpenAI Chat Model', label: 'OpenAI/Azure Chat Model',
name: 'model', name: 'model',
type: 'ChatOpenAI' type: 'ChatOpenAI | AzureChatOpenAI'
}, },
{ {
label: 'System Message', label: 'System Message',

View File

@ -20,11 +20,11 @@ class OpenAIFunctionAgent_Agents implements INode {
constructor() { constructor() {
this.label = 'OpenAI Function Agent' this.label = 'OpenAI Function Agent'
this.name = 'openAIFunctionAgent' this.name = 'openAIFunctionAgent'
this.version = 1.0 this.version = 2.0
this.type = 'AgentExecutor' this.type = 'AgentExecutor'
this.category = 'Agents' this.category = 'Agents'
this.icon = 'openai.png' this.icon = 'openai.png'
this.description = `An agent that uses OpenAI's Function Calling functionality to pick the tool and args to call` this.description = `An agent that uses Function Calling to pick the tool and args to call`
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)] this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
this.inputs = [ this.inputs = [
{ {
@ -39,11 +39,9 @@ class OpenAIFunctionAgent_Agents implements INode {
type: 'BaseChatMemory' type: 'BaseChatMemory'
}, },
{ {
label: 'OpenAI Chat Model', label: 'OpenAI/Azure Chat Model',
name: 'model', name: 'model',
description: type: 'ChatOpenAI | AzureChatOpenAI'
'Only works with gpt-3.5-turbo-0613 and gpt-4-0613. Refer <a target="_blank" href="https://platform.openai.com/docs/guides/gpt/function-calling">docs</a> for more info',
type: 'BaseChatModel'
}, },
{ {
label: 'System Message', label: 'System Message',

View File

@ -1,7 +1,7 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { LLMChain } from 'langchain/chains' import { LLMChain } from 'langchain/chains'
import { BaseLanguageModel } from 'langchain/base_language' import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { BaseOutputParser } from 'langchain/schema/output_parser' import { BaseOutputParser } from 'langchain/schema/output_parser'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
@ -141,7 +141,7 @@ class LLMChain_Chains implements INode {
const runPrediction = async ( const runPrediction = async (
inputVariables: string[], inputVariables: string[],
chain: LLMChain<string | object>, chain: LLMChain<string | object | BaseLanguageModel<any, BaseLanguageModelCallOptions>>,
input: string, input: string,
promptValuesRaw: ICommonObject | undefined, promptValuesRaw: ICommonObject | undefined,
options: ICommonObject, options: ICommonObject,
@ -164,7 +164,7 @@ const runPrediction = async (
if (moderations && moderations.length > 0) { if (moderations && moderations.length > 0) {
try { try {
// Use the output of the moderation chain as input for the LLM chain // Use the output of the moderation chain as input for the LLM chain
input = await checkInputs(moderations, chain.llm, input) input = await checkInputs(moderations, input)
} catch (e) { } catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500)) await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(isStreaming, e.message, socketIO, socketIOClientId) streamResponse(isStreaming, e.message, socketIO, socketIOClientId)

View File

@ -27,7 +27,7 @@ class AWSChatBedrock_ChatModels implements INode {
constructor() { constructor() {
this.label = 'AWS Bedrock' this.label = 'AWS Bedrock'
this.name = 'awsChatBedrock' this.name = 'awsChatBedrock'
this.version = 2.0 this.version = 3.0
this.type = 'AWSChatBedrock' this.type = 'AWSChatBedrock'
this.icon = 'awsBedrock.png' this.icon = 'awsBedrock.png'
this.category = 'Chat Models' this.category = 'Chat Models'
@ -97,7 +97,8 @@ class AWSChatBedrock_ChatModels implements INode {
options: [ options: [
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' }, { label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' }, { label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' } { label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
{ label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' }
], ],
default: 'anthropic.claude-v2' default: 'anthropic.claude-v2'
}, },
@ -128,12 +129,14 @@ class AWSChatBedrock_ChatModels implements INode {
const iTemperature = nodeData.inputs?.temperature as string const iTemperature = nodeData.inputs?.temperature as string
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const streaming = nodeData.inputs?.streaming as boolean
const obj: BaseBedrockInput & BaseLLMParams = { const obj: BaseBedrockInput & BaseLLMParams = {
region: iRegion, region: iRegion,
model: iModel, model: iModel,
maxTokens: parseInt(iMax_tokens_to_sample, 10), maxTokens: parseInt(iMax_tokens_to_sample, 10),
temperature: parseFloat(iTemperature) temperature: parseFloat(iTemperature),
streaming: streaming ?? true
} }
/** /**

View File

@ -18,7 +18,7 @@ class AWSBedrockEmbedding_Embeddings implements INode {
constructor() { constructor() {
this.label = 'AWS Bedrock Embeddings' this.label = 'AWS Bedrock Embeddings'
this.name = 'AWSBedrockEmbeddings' this.name = 'AWSBedrockEmbeddings'
this.version = 1.0 this.version = 2.0
this.type = 'AWSBedrockEmbeddings' this.type = 'AWSBedrockEmbeddings'
this.icon = 'awsBedrock.png' this.icon = 'awsBedrock.png'
this.category = 'Embeddings' this.category = 'Embeddings'
@ -81,7 +81,9 @@ class AWSBedrockEmbedding_Embeddings implements INode {
type: 'options', type: 'options',
options: [ options: [
{ label: 'amazon.titan-embed-text-v1', name: 'amazon.titan-embed-text-v1' }, { label: 'amazon.titan-embed-text-v1', name: 'amazon.titan-embed-text-v1' },
{ label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' } { label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' },
{ label: 'cohere.embed-english-v3', name: 'cohere.embed-english-v3' },
{ label: 'cohere.embed-multilingual-v3', name: 'cohere.embed-multilingual-v3' }
], ],
default: 'amazon.titan-embed-text-v1' default: 'amazon.titan-embed-text-v1'
} }

View File

@ -27,7 +27,7 @@ class AWSBedrock_LLMs implements INode {
constructor() { constructor() {
this.label = 'AWS Bedrock' this.label = 'AWS Bedrock'
this.name = 'awsBedrock' this.name = 'awsBedrock'
this.version = 1.2 this.version = 2.0
this.type = 'AWSBedrock' this.type = 'AWSBedrock'
this.icon = 'awsBedrock.png' this.icon = 'awsBedrock.png'
this.category = 'LLMs' this.category = 'LLMs'
@ -98,6 +98,7 @@ class AWSBedrock_LLMs implements INode {
{ label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' }, { label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
{ label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' }, { label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
{ label: 'cohere.command-text-v14', name: 'cohere.command-text-v14' }, { label: 'cohere.command-text-v14', name: 'cohere.command-text-v14' },
{ label: 'cohere.command-light-text-v14', name: 'cohere.command-light-text-v14' },
{ label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' }, { label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
{ label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' }, { label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
{ label: 'ai21.j2-mid', name: 'ai21.j2-mid' }, { label: 'ai21.j2-mid', name: 'ai21.j2-mid' },

View File

@ -1,13 +1,12 @@
import { BaseLanguageModel } from 'langchain/base_language'
import { Server } from 'socket.io' import { Server } from 'socket.io'
export abstract class Moderation { export abstract class Moderation {
abstract checkForViolations(llm: BaseLanguageModel, input: string): Promise<string> abstract checkForViolations(input: string): Promise<string>
} }
export const checkInputs = async (inputModerations: Moderation[], llm: BaseLanguageModel, input: string): Promise<string> => { export const checkInputs = async (inputModerations: Moderation[], input: string): Promise<string> => {
for (const moderation of inputModerations) { for (const moderation of inputModerations) {
input = await moderation.checkForViolations(llm, input) input = await moderation.checkForViolations(input)
} }
return input return input
} }

View File

@ -1,5 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src'
import { Moderation } from '../Moderation' import { Moderation } from '../Moderation'
import { OpenAIModerationRunner } from './OpenAIModerationRunner' import { OpenAIModerationRunner } from './OpenAIModerationRunner'
@ -12,6 +12,7 @@ class OpenAIModeration implements INode {
icon: string icon: string
category: string category: string
baseClasses: string[] baseClasses: string[]
credential: INodeParams
inputs: INodeParams[] inputs: INodeParams[]
constructor() { constructor() {
@ -23,6 +24,12 @@ class OpenAIModeration implements INode {
this.category = 'Moderation' this.category = 'Moderation'
this.description = 'Check whether content complies with OpenAI usage policies.' this.description = 'Check whether content complies with OpenAI usage policies.'
this.baseClasses = [this.type, ...getBaseClasses(Moderation)] this.baseClasses = [this.type, ...getBaseClasses(Moderation)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [ this.inputs = [
{ {
label: 'Error Message', label: 'Error Message',
@ -35,8 +42,11 @@ class OpenAIModeration implements INode {
] ]
} }
async init(nodeData: INodeData): Promise<any> { async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const runner = new OpenAIModerationRunner() const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const runner = new OpenAIModerationRunner(openAIApiKey)
const moderationErrorMessage = nodeData.inputs?.moderationErrorMessage as string const moderationErrorMessage = nodeData.inputs?.moderationErrorMessage as string
if (moderationErrorMessage) runner.setErrorMessage(moderationErrorMessage) if (moderationErrorMessage) runner.setErrorMessage(moderationErrorMessage)
return runner return runner

View File

@ -1,18 +1,21 @@
import { Moderation } from '../Moderation' import { Moderation } from '../Moderation'
import { BaseLanguageModel } from 'langchain/base_language'
import { OpenAIModerationChain } from 'langchain/chains' import { OpenAIModerationChain } from 'langchain/chains'
export class OpenAIModerationRunner implements Moderation { export class OpenAIModerationRunner implements Moderation {
private openAIApiKey = ''
private moderationErrorMessage: string = "Text was found that violates OpenAI's content policy." private moderationErrorMessage: string = "Text was found that violates OpenAI's content policy."
async checkForViolations(llm: BaseLanguageModel, input: string): Promise<string> { constructor(openAIApiKey: string) {
const openAIApiKey = (llm as any).openAIApiKey this.openAIApiKey = openAIApiKey
if (!openAIApiKey) { }
async checkForViolations(input: string): Promise<string> {
if (!this.openAIApiKey) {
throw Error('OpenAI API key not found') throw Error('OpenAI API key not found')
} }
// Create a new instance of the OpenAIModerationChain // Create a new instance of the OpenAIModerationChain
const moderation = new OpenAIModerationChain({ const moderation = new OpenAIModerationChain({
openAIApiKey: openAIApiKey, openAIApiKey: this.openAIApiKey,
throwError: false // If set to true, the call will throw an error when the moderation chain detects violating content. If set to false, violating content will return "Text was found that violates OpenAI's content policy.". throwError: false // If set to true, the call will throw an error when the moderation chain detects violating content. If set to false, violating content will return "Text was found that violates OpenAI's content policy.".
}) })
// Send the user's input to the moderation chain and wait for the result // Send the user's input to the moderation chain and wait for the result

View File

@ -1,5 +1,4 @@
import { Moderation } from '../Moderation' import { Moderation } from '../Moderation'
import { BaseLanguageModel } from 'langchain/base_language'
export class SimplePromptModerationRunner implements Moderation { export class SimplePromptModerationRunner implements Moderation {
private readonly denyList: string = '' private readonly denyList: string = ''
@ -13,7 +12,7 @@ export class SimplePromptModerationRunner implements Moderation {
this.moderationErrorMessage = moderationErrorMessage this.moderationErrorMessage = moderationErrorMessage
} }
async checkForViolations(_: BaseLanguageModel, input: string): Promise<string> { async checkForViolations(input: string): Promise<string> {
this.denyList.split('\n').forEach((denyListItem) => { this.denyList.split('\n').forEach((denyListItem) => {
if (denyListItem && denyListItem !== '' && input.includes(denyListItem)) { if (denyListItem && denyListItem !== '' && input.includes(denyListItem)) {
throw Error(this.moderationErrorMessage) throw Error(this.moderationErrorMessage)

View File

@ -1,6 +1,6 @@
import { BaseOutputParser } from 'langchain/schema/output_parser' import { BaseOutputParser } from 'langchain/schema/output_parser'
import { LLMChain } from 'langchain/chains' import { LLMChain } from 'langchain/chains'
import { BaseLanguageModel } from 'langchain/base_language' import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
import { ICommonObject } from '../../src' import { ICommonObject } from '../../src'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts' import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
@ -15,7 +15,7 @@ export const formatResponse = (response: string | object): string | object => {
export const injectOutputParser = ( export const injectOutputParser = (
outputParser: BaseOutputParser<unknown>, outputParser: BaseOutputParser<unknown>,
chain: LLMChain<string, BaseLanguageModel>, chain: LLMChain<string | object | BaseLanguageModel<any, BaseLanguageModelCallOptions>>,
promptValues: ICommonObject | undefined = undefined promptValues: ICommonObject | undefined = undefined
) => { ) => {
if (outputParser && chain.prompt) { if (outputParser && chain.prompt) {

View File

@ -31,7 +31,8 @@ class InMemoryVectorStore_VectorStores implements INode {
label: 'Document', label: 'Document',
name: 'document', name: 'document',
type: 'Document', type: 'Document',
list: true list: true,
optional: true
}, },
{ {
label: 'Embeddings', label: 'Embeddings',

View File

@ -1,6 +1,6 @@
{ {
"name": "flowise-components", "name": "flowise-components",
"version": "1.4.2", "version": "1.4.3",
"description": "Flowiseai Components", "description": "Flowiseai Components",
"main": "dist/src/index", "main": "dist/src/index",
"types": "dist/src/index.d.ts", "types": "dist/src/index.d.ts",
@ -26,7 +26,8 @@
"@dqbd/tiktoken": "^1.0.7", "@dqbd/tiktoken": "^1.0.7",
"@elastic/elasticsearch": "^8.9.0", "@elastic/elasticsearch": "^8.9.0",
"@getzep/zep-js": "^0.6.3", "@getzep/zep-js": "^0.6.3",
"@gomomento/sdk": "^1.40.2", "@gomomento/sdk": "^1.51.1",
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1", "@google-ai/generativelanguage": "^0.2.1",
"@huggingface/inference": "^2.6.1", "@huggingface/inference": "^2.6.1",
"@notionhq/client": "^2.2.8", "@notionhq/client": "^2.2.8",
@ -55,7 +56,7 @@
"html-to-text": "^9.0.5", "html-to-text": "^9.0.5",
"ioredis": "^5.3.2", "ioredis": "^5.3.2",
"jsdom": "^22.1.0", "jsdom": "^22.1.0",
"langchain": "^0.0.165", "langchain": "^0.0.196",
"langfuse-langchain": "^1.0.31", "langfuse-langchain": "^1.0.31",
"langsmith": "^0.0.32", "langsmith": "^0.0.32",
"linkifyjs": "^4.1.1", "linkifyjs": "^4.1.1",

View File

@ -334,7 +334,7 @@
"id": "openAIFunctionAgent_0", "id": "openAIFunctionAgent_0",
"label": "OpenAI Function Agent", "label": "OpenAI Function Agent",
"name": "openAIFunctionAgent", "name": "openAIFunctionAgent",
"version": 1, "version": 2,
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"], "baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents", "category": "Agents",
@ -365,11 +365,10 @@
"id": "openAIFunctionAgent_0-input-memory-BaseChatMemory" "id": "openAIFunctionAgent_0-input-memory-BaseChatMemory"
}, },
{ {
"label": "OpenAI Chat Model", "label": "OpenAI/Azure Chat Model",
"name": "model", "name": "model",
"description": "Only works with gpt-3.5-turbo-0613 and gpt-4-0613. Refer <a target=\"_blank\" href=\"https://platform.openai.com/docs/guides/gpt/function-calling\">docs</a> for more info", "type": "ChatOpenAI | AzureChatOpenAI",
"type": "BaseChatModel", "id": "openAIFunctionAgent_0-input-model-ChatOpenAI | AzureChatOpenAI"
"id": "openAIFunctionAgent_0-input-model-BaseChatModel"
} }
], ],
"inputs": { "inputs": {

View File

@ -98,7 +98,7 @@
"data": { "data": {
"id": "conversationalRetrievalAgent_0", "id": "conversationalRetrievalAgent_0",
"label": "Conversational Retrieval Agent", "label": "Conversational Retrieval Agent",
"version": 1, "version": 2,
"name": "conversationalRetrievalAgent", "name": "conversationalRetrievalAgent",
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"], "baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
@ -130,10 +130,10 @@
"id": "conversationalRetrievalAgent_0-input-memory-BaseChatMemory" "id": "conversationalRetrievalAgent_0-input-memory-BaseChatMemory"
}, },
{ {
"label": "OpenAI Chat Model", "label": "OpenAI/Azure Chat Model",
"name": "model", "name": "model",
"type": "ChatOpenAI", "type": "ChatOpenAI | AzureChatOpenAI",
"id": "conversationalRetrievalAgent_0-input-model-ChatOpenAI" "id": "conversationalRetrievalAgent_0-input-model-ChatOpenAI | AzureChatOpenAI"
} }
], ],
"inputs": { "inputs": {

View File

@ -206,7 +206,7 @@
"id": "openAIFunctionAgent_0", "id": "openAIFunctionAgent_0",
"label": "OpenAI Function Agent", "label": "OpenAI Function Agent",
"name": "openAIFunctionAgent", "name": "openAIFunctionAgent",
"version": 1, "version": 2,
"type": "AgentExecutor", "type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain"], "baseClasses": ["AgentExecutor", "BaseChain"],
"category": "Agents", "category": "Agents",
@ -237,11 +237,10 @@
"id": "openAIFunctionAgent_0-input-memory-BaseChatMemory" "id": "openAIFunctionAgent_0-input-memory-BaseChatMemory"
}, },
{ {
"label": "OpenAI Chat Model", "label": "OpenAI/Azure Chat Model",
"name": "model", "name": "model",
"description": "Only works with gpt-3.5-turbo-0613 and gpt-4-0613. Refer <a target=\"_blank\" href=\"https://platform.openai.com/docs/guides/gpt/function-calling\">docs</a> for more info", "type": "ChatOpenAI | AzureChatOpenAI",
"type": "BaseChatModel", "id": "openAIFunctionAgent_0-input-model-ChatOpenAI | AzureChatOpenAI"
"id": "openAIFunctionAgent_0-input-model-BaseChatModel"
} }
], ],
"inputs": { "inputs": {

View File

@ -1,6 +1,6 @@
{ {
"name": "flowise", "name": "flowise",
"version": "1.4.2", "version": "1.4.3",
"description": "Flowiseai Server", "description": "Flowiseai Server",
"main": "dist/index", "main": "dist/index",
"types": "dist/index.d.ts", "types": "dist/index.d.ts",

View File

@ -844,7 +844,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
*/ */
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => { export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
const streamAvailableLLMs = { const streamAvailableLLMs = {
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama'], 'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
LLMs: ['azureOpenAI', 'openAI', 'ollama'] LLMs: ['azureOpenAI', 'openAI', 'ollama']
} }

View File

@ -1,6 +1,6 @@
{ {
"name": "flowise-ui", "name": "flowise-ui",
"version": "1.4.0", "version": "1.4.1",
"license": "SEE LICENSE IN LICENSE.md", "license": "SEE LICENSE IN LICENSE.md",
"homepage": "https://flowiseai.com", "homepage": "https://flowiseai.com",
"author": { "author": {

View File

@ -68,10 +68,14 @@ const AddNodes = ({ nodesData, node }) => {
else newNodes.push(vsNode) else newNodes.push(vsNode)
} }
delete obj['Vector Stores'] delete obj['Vector Stores']
obj['Vector Stores;DEPRECATING'] = deprecatingNodes if (deprecatingNodes.length) {
accordianCategories['Vector Stores;DEPRECATING'] = isFilter ? true : false obj['Vector Stores;DEPRECATING'] = deprecatingNodes
obj['Vector Stores;NEW'] = newNodes accordianCategories['Vector Stores;DEPRECATING'] = isFilter ? true : false
accordianCategories['Vector Stores;NEW'] = isFilter ? true : false }
if (newNodes.length) {
obj['Vector Stores;NEW'] = newNodes
accordianCategories['Vector Stores;NEW'] = isFilter ? true : false
}
setNodes(obj) setNodes(obj)
} }