diff --git a/packages/components/credentials/GoogleGenerativeAI.credential.ts b/packages/components/credentials/GoogleGenerativeAI.credential.ts index 9a1f3f285..e5ad45bfa 100644 --- a/packages/components/credentials/GoogleGenerativeAI.credential.ts +++ b/packages/components/credentials/GoogleGenerativeAI.credential.ts @@ -11,7 +11,8 @@ class GoogleGenerativeAICredential implements INodeCredential { this.label = 'Google Generative AI' this.name = 'googleGenerativeAI' this.version = 1.0 - this.description = 'Get your API Key here.' + this.description = + 'You can get your API key from official page here.' this.inputs = [ { label: 'Google AI API Key', diff --git a/packages/components/credentials/MistralApi.credential.ts b/packages/components/credentials/MistralApi.credential.ts new file mode 100644 index 000000000..a254f6659 --- /dev/null +++ b/packages/components/credentials/MistralApi.credential.ts @@ -0,0 +1,25 @@ +import { INodeParams, INodeCredential } from '../src/Interface' + +class MistralAICredential implements INodeCredential { + label: string + name: string + version: number + description: string + inputs: INodeParams[] + + constructor() { + this.label = 'MistralAI API' + this.name = 'mistralAIApi' + this.version = 1.0 + this.description = 'You can get your API key from official console here.' + this.inputs = [ + { + label: 'MistralAI API Key', + name: 'mistralAIAPIKey', + type: 'password' + } + ] + } +} + +module.exports = { credClass: MistralAICredential } diff --git a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts index 95ee0575d..7044645f6 100644 --- a/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts +++ b/packages/components/nodes/chatmodels/ChatGoogleGenerativeAI/ChatGoogleGenerativeAI.ts @@ -49,8 +49,7 @@ class GoogleGenerativeAI_ChatModels implements INode { name: 'gemini-pro' } ], - default: 'gemini-pro', - optional: true + default: 'gemini-pro' }, { label: 'Temperature', diff --git a/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts b/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts new file mode 100644 index 000000000..d9db85cd5 --- /dev/null +++ b/packages/components/nodes/chatmodels/ChatMistral/ChatMistral.ts @@ -0,0 +1,151 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { BaseCache } from 'langchain/schema' +import { ChatMistralAI, ChatMistralAIInput } from '@langchain/mistralai' + +class ChatMistral_ChatModels implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + credential: INodeParams + inputs: INodeParams[] + + constructor() { + this.label = 'ChatMistralAI' + this.name = 'chatMistralAI' + this.version = 1.0 + this.type = 'ChatMistralAI' + this.icon = 'mistralai.png' + this.category = 'Chat Models' + this.description = 'Wrapper around Mistral large language models that use the Chat endpoint' + this.baseClasses = [this.type, ...getBaseClasses(ChatMistralAI)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['mistralAIApi'] + } + this.inputs = [ + { + label: 'Cache', + name: 'cache', + type: 'BaseCache', + optional: true + }, + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'mistral-tiny', + name: 'mistral-tiny' + }, + { + label: 'mistral-small', + name: 'mistral-small' + }, + { + label: 'mistral-medium', + name: 'mistral-medium' + } + ], + default: 'mistral-tiny' + }, + { + label: 'Temperature', + name: 'temperature', + type: 'number', + description: + 'What sampling temperature to use, between 0.0 and 1.0. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.', + step: 0.1, + default: 0.9, + optional: true + }, + { + label: 'Max Output Tokens', + name: 'maxOutputTokens', + type: 'number', + description: 'The maximum number of tokens to generate in the completion.', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Top Probability', + name: 'topP', + type: 'number', + description: + 'Nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.', + step: 0.1, + optional: true, + additionalParams: true + }, + { + label: 'Random Seed', + name: 'randomSeed', + type: 'number', + description: 'The seed to use for random sampling. If set, different calls will generate deterministic results.', + step: 1, + optional: true, + additionalParams: true + }, + { + label: 'Safe Mode', + name: 'safeMode', + type: 'boolean', + description: 'Whether to inject a safety prompt before all conversations.', + optional: true, + additionalParams: true + }, + { + label: 'Override Endpoint', + name: 'overrideEndpoint', + type: 'string', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('mistralAIAPIKey', credentialData, nodeData) + + const temperature = nodeData.inputs?.temperature as string + const modelName = nodeData.inputs?.modelName as string + const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string + const topP = nodeData.inputs?.topP as string + const safeMode = nodeData.inputs?.safeMode as boolean + const randomSeed = nodeData.inputs?.safeMode as string + const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string + // Waiting fix from langchain to enable streaming + const streaming = nodeData.inputs?.streaming as boolean + + const cache = nodeData.inputs?.cache as BaseCache + + const obj: ChatMistralAIInput = { + apiKey: apiKey, + modelName: modelName + } + + if (maxOutputTokens) obj.maxTokens = parseInt(maxOutputTokens, 10) + if (topP) obj.topP = parseFloat(topP) + if (cache) obj.cache = cache + if (temperature) obj.temperature = parseFloat(temperature) + if (randomSeed) obj.randomSeed = parseFloat(randomSeed) + if (safeMode) obj.safeMode = safeMode + if (overrideEndpoint) obj.endpoint = overrideEndpoint + + const model = new ChatMistralAI(obj) + + return model + } +} + +module.exports = { nodeClass: ChatMistral_ChatModels } diff --git a/packages/components/nodes/chatmodels/ChatMistral/mistralai.png b/packages/components/nodes/chatmodels/ChatMistral/mistralai.png new file mode 100644 index 000000000..1019f495d Binary files /dev/null and b/packages/components/nodes/chatmodels/ChatMistral/mistralai.png differ diff --git a/packages/components/nodes/embeddings/MistralEmbedding/MistralEmbedding.ts b/packages/components/nodes/embeddings/MistralEmbedding/MistralEmbedding.ts new file mode 100644 index 000000000..d0a0198c3 --- /dev/null +++ b/packages/components/nodes/embeddings/MistralEmbedding/MistralEmbedding.ts @@ -0,0 +1,95 @@ +import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' +import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' +import { MistralAIEmbeddings, MistralAIEmbeddingsParams } from '@langchain/mistralai' + +class MistralEmbedding_Embeddings implements INode { + label: string + name: string + version: number + type: string + icon: string + category: string + description: string + baseClasses: string[] + inputs: INodeParams[] + credential: INodeParams + + constructor() { + this.label = 'MistralAI Embeddings' + this.name = 'mistralAI Embeddings' + this.version = 1.0 + this.type = 'MistralAIEmbeddings' + this.icon = 'mistralai.png' + this.category = 'Embeddings' + this.description = 'MistralAI API to generate embeddings for a given text' + this.baseClasses = [this.type, ...getBaseClasses(MistralAIEmbeddings)] + this.credential = { + label: 'Connect Credential', + name: 'credential', + type: 'credential', + credentialNames: ['mistralAIApi'] + } + this.inputs = [ + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'mistral-embed', + name: 'mistral-embed' + } + ], + default: 'mistral-embed' + }, + { + label: 'Batch Size', + name: 'batchSize', + type: 'number', + step: 1, + default: 512, + optional: true, + additionalParams: true + }, + { + label: 'Strip New Lines', + name: 'stripNewLines', + type: 'boolean', + default: true, + optional: true, + additionalParams: true + }, + { + label: 'Override Endpoint', + name: 'overrideEndpoint', + type: 'string', + optional: true, + additionalParams: true + } + ] + } + + async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { + const modelName = nodeData.inputs?.modelName as string + const batchSize = nodeData.inputs?.batchSize as string + const stripNewLines = nodeData.inputs?.stripNewLines as boolean + const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string + + const credentialData = await getCredentialData(nodeData.credential ?? '', options) + const apiKey = getCredentialParam('mistralAIAPIKey', credentialData, nodeData) + + const obj: MistralAIEmbeddingsParams = { + apiKey: apiKey, + modelName: modelName + } + + if (batchSize) obj.batchSize = parseInt(batchSize, 10) + if (stripNewLines) obj.stripNewLines = stripNewLines + if (overrideEndpoint) obj.endpoint = overrideEndpoint + + const model = new MistralAIEmbeddings(obj) + return model + } +} + +module.exports = { nodeClass: MistralEmbedding_Embeddings } diff --git a/packages/components/nodes/embeddings/MistralEmbedding/mistralai.png b/packages/components/nodes/embeddings/MistralEmbedding/mistralai.png new file mode 100644 index 000000000..1019f495d Binary files /dev/null and b/packages/components/nodes/embeddings/MistralEmbedding/mistralai.png differ diff --git a/packages/components/package.json b/packages/components/package.json index 6bdc908a3..cbc347ff6 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -27,6 +27,7 @@ "@google-ai/generativelanguage": "^0.2.1", "@huggingface/inference": "^2.6.1", "@langchain/google-genai": "^0.0.3", + "@langchain/mistralai": "^0.0.2", "@notionhq/client": "^2.2.8", "@opensearch-project/opensearch": "^1.2.0", "@pinecone-database/pinecone": "^1.1.1", diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index 2bf1c04a4..5f12c66ec 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -711,6 +711,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component /** * Check to see if flow valid for stream + * TODO: perform check from component level. i.e: set streaming on component, and check here * @param {IReactFlowNode[]} reactFlowNodes * @param {INodeData} endingNodeData * @returns {boolean}