diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2c91906c9..04cb80b4d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -141,7 +141,6 @@ Flowise support different environment variables to configure your instance. You | DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false | | SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` | | FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String | -| LOCALAI_CHAT_MODELS | JSON-encoded string representing an array of chat models for LocalAI. Each object in the array should have a 'label' and 'name' property. | String | '[]' (Empty Array) | You can also specify the env variables when using `npx`. For example: diff --git a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts index 258db1f80..c44f03ce1 100644 --- a/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts +++ b/packages/components/nodes/chatmodels/ChatLocalAI/ChatLocalAI.ts @@ -33,9 +33,6 @@ class ChatLocalAI_ChatModels implements INode { credentialNames: ['LocalAIApi'], optional: true } - - const modelOptions = JSON.parse(process.env.LOCALAI_CHAT_MODELS || '[]'); - this.inputs = [ { label: 'Cache', @@ -52,10 +49,8 @@ class ChatLocalAI_ChatModels implements INode { { label: 'Model Name', name: 'modelName', - type: 'options', - options: modelOptions, - default: modelOptions.length > 0 ? modelOptions[0].name : '', - optional: true + type: 'string', + placeholder: 'gpt4all-lora-quantized.bin' }, { label: 'Temperature', @@ -99,22 +94,22 @@ class ChatLocalAI_ChatModels implements INode { const topP = nodeData.inputs?.topP as string const timeout = nodeData.inputs?.timeout as string const basePath = nodeData.inputs?.basePath as string - const credentialData = await getCredentialData(nodeData.credential ?? '', options) - const openAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData) + const localAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData) const cache = nodeData.inputs?.cache as BaseCache - const obj: Partial & BaseLLMParams & { openAIApiKey?: string } = { + const obj: Partial & BaseLLMParams & { localAIApiKey?: string } = { temperature: parseFloat(temperature), modelName, - openAIApiKey + openAIApiKey: 'sk-' } if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) if (topP) obj.topP = parseFloat(topP) if (timeout) obj.timeout = parseInt(timeout, 10) if (cache) obj.cache = cache + if (localAIApiKey) obj.openAIApiKey = localAIApiKey const model = new OpenAIChat(obj, { basePath }) diff --git a/packages/server/.env.example b/packages/server/.env.example index 9b7be0ff8..6e746a4df 100644 --- a/packages/server/.env.example +++ b/packages/server/.env.example @@ -26,5 +26,3 @@ PORT=3000 # LANGCHAIN_ENDPOINT=https://api.smith.langchain.com # LANGCHAIN_API_KEY=your_api_key # LANGCHAIN_PROJECT=your_project - -# LOCALAI_CHAT_MODELS='[{"label": "model1", "name": "model1"}, {"label": "model2", "name": "model2"}]'