Revert model var to string, refactor for case without a key and just override if so
This commit is contained in:
parent
d9b75cdf8e
commit
06201e7cf0
|
|
@ -141,7 +141,6 @@ Flowise support different environment variables to configure your instance. You
|
|||
| DATABASE_SSL | Database connection overssl (When DATABASE_TYPE is postgre) | Boolean | false |
|
||||
| SECRETKEY_PATH | Location where encryption key (used to encrypt/decrypt credentials) is saved | String | `your-path/Flowise/packages/server` |
|
||||
| FLOWISE_SECRETKEY_OVERWRITE | Encryption key to be used instead of the key stored in SECRETKEY_PATH | String |
|
||||
| LOCALAI_CHAT_MODELS | JSON-encoded string representing an array of chat models for LocalAI. Each object in the array should have a 'label' and 'name' property. | String | '[]' (Empty Array) |
|
||||
|
||||
You can also specify the env variables when using `npx`. For example:
|
||||
|
||||
|
|
|
|||
|
|
@ -33,9 +33,6 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
credentialNames: ['LocalAIApi'],
|
||||
optional: true
|
||||
}
|
||||
|
||||
const modelOptions = JSON.parse(process.env.LOCALAI_CHAT_MODELS || '[]');
|
||||
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Cache',
|
||||
|
|
@ -52,10 +49,8 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
{
|
||||
label: 'Model Name',
|
||||
name: 'modelName',
|
||||
type: 'options',
|
||||
options: modelOptions,
|
||||
default: modelOptions.length > 0 ? modelOptions[0].name : '',
|
||||
optional: true
|
||||
type: 'string',
|
||||
placeholder: 'gpt4all-lora-quantized.bin'
|
||||
},
|
||||
{
|
||||
label: 'Temperature',
|
||||
|
|
@ -99,22 +94,22 @@ class ChatLocalAI_ChatModels implements INode {
|
|||
const topP = nodeData.inputs?.topP as string
|
||||
const timeout = nodeData.inputs?.timeout as string
|
||||
const basePath = nodeData.inputs?.basePath as string
|
||||
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const openAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData)
|
||||
const localAIApiKey = getCredentialParam('LocalAIApiKey', credentialData, nodeData)
|
||||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
|
||||
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { localAIApiKey?: string } = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
openAIApiKey
|
||||
openAIApiKey: 'sk-'
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (timeout) obj.timeout = parseInt(timeout, 10)
|
||||
if (cache) obj.cache = cache
|
||||
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
|
||||
|
||||
const model = new OpenAIChat(obj, { basePath })
|
||||
|
||||
|
|
|
|||
|
|
@ -26,5 +26,3 @@ PORT=3000
|
|||
# LANGCHAIN_ENDPOINT=https://api.smith.langchain.com
|
||||
# LANGCHAIN_API_KEY=your_api_key
|
||||
# LANGCHAIN_PROJECT=your_project
|
||||
|
||||
# LOCALAI_CHAT_MODELS='[{"label": "model1", "name": "model1"}, {"label": "model2", "name": "model2"}]'
|
||||
|
|
|
|||
Loading…
Reference in New Issue