* adding DeepSeekr1 distill to groq * Added max_tokens to groq.ts and chatGroqLlamaindex.ts plus updated groq models removing the outdated models and adding new models such as compound-beta * Patched OpenAI typo on ChatGroq_LLamaIndex.ts * Patching groq llamaindex * Patched pnpm lint error * Removed retundant image * Update ChatGroq_LlamaIndex.ts --------- Co-authored-by: Henry Heng <henryheng@flowiseai.com>
This commit is contained in:
parent
d75e847091
commit
ac9d732550
|
|
@ -668,6 +668,22 @@
|
|||
{
|
||||
"name": "groqChat",
|
||||
"models": [
|
||||
{
|
||||
"label": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
||||
"name": "meta-llama/llama-4-maverick-17b-128e-instruct"
|
||||
},
|
||||
{
|
||||
"label": "meta-llama/llama-4-scout-17b-16e-instruct",
|
||||
"name": "meta-llama/llama-4-scout-17b-16e-instruct"
|
||||
},
|
||||
{
|
||||
"label": "coumpound-beta",
|
||||
"name": "compound-beta"
|
||||
},
|
||||
{
|
||||
"label": "compound-beta-mini",
|
||||
"name": "compound-beta-mini"
|
||||
},
|
||||
{
|
||||
"label": "deepseek-r1-distill-llama-70b",
|
||||
"name": "deepseek-r1-distill-llama-70b"
|
||||
|
|
@ -696,29 +712,13 @@
|
|||
"label": "llama-3.2-90b-text-preview",
|
||||
"name": "llama-3.2-90b-text-preview"
|
||||
},
|
||||
{
|
||||
"label": "llama-3.1-405b-reasoning",
|
||||
"name": "llama-3.1-405b-reasoning"
|
||||
},
|
||||
{
|
||||
"label": "llama-3.1-70b-versatile",
|
||||
"name": "llama-3.1-70b-versatile"
|
||||
},
|
||||
{
|
||||
"label": "llama-3.1-8b-instant",
|
||||
"name": "llama-3.1-8b-instant"
|
||||
},
|
||||
{
|
||||
"label": "llama3-groq-70b-8192-tool-use-preview",
|
||||
"name": "llama3-groq-70b-8192-tool-use-preview"
|
||||
},
|
||||
{
|
||||
"label": "llama3-groq-8b-8192-tool-use-preview",
|
||||
"name": "llama3-groq-8b-8192-tool-use-preview"
|
||||
},
|
||||
{
|
||||
"label": "gemma-7b-it",
|
||||
"name": "gemma-7b-it"
|
||||
"label": "gemma-2-9b-it",
|
||||
"name": "gemma-2-9b-it"
|
||||
},
|
||||
{
|
||||
"label": "llama3-70b-8192",
|
||||
|
|
@ -729,16 +729,16 @@
|
|||
"name": "llama3-8b-8192"
|
||||
},
|
||||
{
|
||||
"label": "mixtral-8x7b-32768",
|
||||
"name": "mixtral-8x7b-32768"
|
||||
"label": "mixtral-saba-24b",
|
||||
"name": "mixtral-saba-24b"
|
||||
},
|
||||
{
|
||||
"label": "meta-llama/llama-4-maverick-17b-128e-instruct",
|
||||
"name": "meta-llama/llama-4-maverick-17b-128e-instruct"
|
||||
"label": "qwen-qwq-32b",
|
||||
"name": "qwen-qwq-32b"
|
||||
},
|
||||
{
|
||||
"label": "meta-llama/llama-4-scout-17b-16e-instruct",
|
||||
"name": "meta-llama/llama-4-scout-17b-16e-instruct"
|
||||
"label": "allam-2-7b",
|
||||
"name": "allam-2-7b"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
|
|||
|
|
@ -48,6 +48,14 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
|
|||
step: 0.1,
|
||||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokens',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -62,7 +70,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
|
|||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||
const groqApiKey = getCredentialParam('groqApiKey', credentialData, nodeData)
|
||||
|
||||
|
|
@ -71,7 +79,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
|
|||
model: modelName,
|
||||
apiKey: groqApiKey
|
||||
}
|
||||
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
const model = new Groq(obj)
|
||||
return model
|
||||
}
|
||||
|
|
|
|||
|
|
@ -54,6 +54,14 @@ class Groq_ChatModels implements INode {
|
|||
default: 0.9,
|
||||
optional: true
|
||||
},
|
||||
{
|
||||
label: 'Max Tokens',
|
||||
name: 'maxTokens',
|
||||
type: 'number',
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Streaming',
|
||||
name: 'streaming',
|
||||
|
|
@ -73,6 +81,7 @@ class Groq_ChatModels implements INode {
|
|||
|
||||
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
|
||||
const modelName = nodeData.inputs?.modelName as string
|
||||
const maxTokens = nodeData.inputs?.maxTokens as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const temperature = nodeData.inputs?.temperature as string
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
|
|
@ -86,6 +95,7 @@ class Groq_ChatModels implements INode {
|
|||
apiKey: groqApiKey,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
|
||||
if (cache) obj.cache = cache
|
||||
|
||||
const model = new ChatGroq(obj)
|
||||
|
|
|
|||
Loading…
Reference in New Issue