feat: add thinking budget parameter support for ChatGoogleVertexAI (#4557)

* chore: update @langchain/google-vertexai add thinkingBudget parameter support

* Update pnpm-lock.yaml

* Update pnpm-lock.yaml

---------

Co-authored-by: Henry <hzj94@hotmail.com>
Co-authored-by: Henry Heng <henryheng@flowiseai.com>
This commit is contained in:
Yuki 2025-07-03 03:53:06 +08:00 committed by GitHub
parent 0627693133
commit 768de6140c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 102 additions and 40 deletions

View File

@ -65,7 +65,7 @@ class GoogleVertexAI_ChatModels implements INode {
constructor() { constructor() {
this.label = 'ChatGoogleVertexAI' this.label = 'ChatGoogleVertexAI'
this.name = 'chatGoogleVertexAI' this.name = 'chatGoogleVertexAI'
this.version = 5.1 this.version = 5.2
this.type = 'ChatGoogleVertexAI' this.type = 'ChatGoogleVertexAI'
this.icon = 'GoogleVertex.svg' this.icon = 'GoogleVertex.svg'
this.category = 'Chat Models' this.category = 'Chat Models'
@ -151,6 +151,16 @@ class GoogleVertexAI_ChatModels implements INode {
step: 1, step: 1,
optional: true, optional: true,
additionalParams: true additionalParams: true
},
{
label: 'Thinking Budget',
name: 'thinkingBudget',
type: 'number',
description: 'Number of tokens to use for thinking process (0 to disable)',
step: 1,
placeholder: '1024',
optional: true,
additionalParams: true
} }
] ]
} }
@ -192,6 +202,7 @@ class GoogleVertexAI_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const topK = nodeData.inputs?.topK as string const topK = nodeData.inputs?.topK as string
const streaming = nodeData.inputs?.streaming as boolean const streaming = nodeData.inputs?.streaming as boolean
const thinkingBudget = nodeData.inputs?.thinkingBudget as string
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
@ -211,6 +222,7 @@ class GoogleVertexAI_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP) if (topP) obj.topP = parseFloat(topP)
if (cache) obj.cache = cache if (cache) obj.cache = cache
if (topK) obj.topK = parseFloat(topK) if (topK) obj.topK = parseFloat(topK)
if (thinkingBudget) obj.thinkingBudget = parseInt(thinkingBudget, 10)
const model = new ChatVertexAI(nodeData.id, obj) const model = new ChatVertexAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption) model.setMultiModalOption(multiModalOption)

View File

@ -45,7 +45,7 @@
"@langchain/core": "0.3.61", "@langchain/core": "0.3.61",
"@langchain/exa": "^0.0.5", "@langchain/exa": "^0.0.5",
"@langchain/google-genai": "0.2.3", "@langchain/google-genai": "0.2.3",
"@langchain/google-vertexai": "^0.2.0", "@langchain/google-vertexai": "^0.2.10",
"@langchain/groq": "0.1.2", "@langchain/groq": "0.1.2",
"@langchain/langgraph": "^0.0.22", "@langchain/langgraph": "^0.0.22",
"@langchain/mistralai": "^0.2.0", "@langchain/mistralai": "^0.2.0",

File diff suppressed because one or more lines are too long