Fix(FlowiseChatGoogleGenerativeAI): Prevent "parts must not be empty" API error in Seq Agents (#4292)

* Fix(FlowiseChatGoogleGenerativeAI): Prevent "parts must not be empty" API error in Seq Agents

* Fix: Update pnpm-lock.yaml to resolve CI issues

* convert role function and tool to function

* remove comment

---------

Co-authored-by: Henry <hzj94@hotmail.com>
This commit is contained in:
toi500 2025-04-14 18:27:09 +02:00 committed by GitHub
parent d3510d1054
commit d71369c3b7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 84 additions and 69 deletions

View File

@ -121,7 +121,7 @@ Flowise has 3 different modules in a single mono repository.
Flowise support different environment variables to configure your instance. You can specify the following variables in the `.env` file inside `packages/server` folder. Read [more](https://docs.flowiseai.com/environment-variables)
| Variable | Description | Type | Default |
|------------------------------------|----------------------------------------------------------------------------------|--------------------------------------------------|-------------------------------------|
| ---------------------------------- | -------------------------------------------------------------------------------- | ------------------------------------------------ | ----------------------------------- |
| PORT | The HTTP port Flowise runs on | Number | 3000 |
| CORS_ORIGINS | The allowed origins for all cross-origin HTTP calls | String | |
| IFRAME_ORIGINS | The allowed origins for iframe src embedding | String | |

View File

@ -83,7 +83,7 @@
"pnpm": ">=9"
},
"resolutions": {
"@google/generative-ai": "^0.22.0",
"@google/generative-ai": "^0.24.0",
"@grpc/grpc-js": "^1.10.10",
"@langchain/core": "0.3.37",
"@qdrant/openapi-typescript-fetch": "1.2.6",

View File

@ -420,12 +420,16 @@
"name": "chatGoogleGenerativeAI",
"models": [
{
"label": "gemini-2.0-flash-001",
"name": "gemini-2.0-flash-001"
"label": "gemini-2.5-pro-preview-03-25",
"name": "gemini-2.5-pro-preview-03-25"
},
{
"label": "gemini-2.0-flash-lite-001",
"name": "gemini-2.0-flash-lite-001"
"label": "gemini-2.0-flash",
"name": "gemini-2.0-flash"
},
{
"label": "gemini-2.0-flash-lite",
"name": "gemini-2.0-flash-lite"
},
{
"label": "gemini-1.5-flash",
@ -1336,6 +1340,10 @@
{
"label": "text-embedding-004",
"name": "text-embedding-004"
},
{
"label": "gemini-embedding-exp-03-07",
"name": "gemini-embedding-exp-03-07"
}
]
},

View File

@ -415,24 +415,18 @@ function getMessageAuthor(message: BaseMessage) {
}
function convertAuthorToRole(author: string) {
switch (author) {
/**
* Note: Gemini currently is not supporting system messages
* we will convert them to human messages and merge with following
* */
switch (author.toLowerCase()) {
case 'ai':
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
case 'assistant':
case 'model':
return 'model'
case 'system':
case 'human':
return 'user'
case 'function':
case 'tool':
return 'function'
case 'system':
case 'human':
default:
// Instead of throwing, we return model (Needed for Multi Agent)
// throw new Error(`Unknown / unsupported author: ${author}`)
return 'model'
return 'user'
}
}
@ -520,17 +514,29 @@ function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: b
function checkIfEmptyContentAndSameRole(contents: Content[]) {
let prevRole = ''
const removedContents: Content[] = []
const validContents: Content[] = []
for (const content of contents) {
const role = content.role
if (content.parts.length && content.parts[0].text === '' && role === prevRole) {
removedContents.push(content)
// Skip only if completely empty
if (!content.parts || !content.parts.length) {
continue
}
prevRole = role
// Ensure role is always either 'user' or 'model'
content.role = content.role === 'model' ? 'model' : 'user'
// Handle consecutive messages
if (content.role === prevRole && validContents.length > 0) {
// Merge with previous content if same role
validContents[validContents.length - 1].parts.push(...content.parts)
continue
}
return contents.filter((content) => !removedContents.includes(content))
validContents.push(content)
prevRole = content.role
}
return validContents
}
function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
@ -568,7 +574,7 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel
}
}
let actualRole = role
if (actualRole === 'function') {
if (actualRole === 'function' || actualRole === 'tool') {
// GenerativeAI API will throw an error if the role is not "user" or "model."
actualRole = 'user'
}

View File

@ -36,7 +36,7 @@
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^2.5.0",
"@google-cloud/storage": "^7.15.2",
"@google/generative-ai": "^0.15.0",
"@google/generative-ai": "^0.24.0",
"@huggingface/inference": "^2.6.1",
"@langchain/anthropic": "0.3.14",
"@langchain/aws": "0.1.4",
@ -45,7 +45,7 @@
"@langchain/community": "^0.3.24",
"@langchain/core": "0.3.37",
"@langchain/exa": "^0.0.5",
"@langchain/google-genai": "0.1.9",
"@langchain/google-genai": "0.2.3",
"@langchain/google-vertexai": "^0.2.0",
"@langchain/groq": "0.1.2",
"@langchain/langgraph": "^0.0.22",

File diff suppressed because one or more lines are too long