diff --git a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts index 491099478..476a40da0 100644 --- a/packages/components/nodes/agents/XMLAgent/XMLAgent.ts +++ b/packages/components/nodes/agents/XMLAgent/XMLAgent.ts @@ -1,8 +1,8 @@ import { flatten } from 'lodash' import { ChainValues } from '@langchain/core/utils/types' import { AgentStep } from '@langchain/core/agents' +import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { RunnableSequence } from '@langchain/core/runnables' -import { ChatOpenAI } from '@langchain/openai' import { Tool } from '@langchain/core/tools' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts' import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser' @@ -139,7 +139,7 @@ const prepareAgent = async ( flowObj: { sessionId?: string; chatId?: string; input?: string }, chatHistory: IMessage[] = [] ) => { - const model = nodeData.inputs?.model as ChatOpenAI + const model = nodeData.inputs?.model as BaseChatModel const memory = nodeData.inputs?.memory as FlowiseMemory const systemMessage = nodeData.inputs?.systemMessage as string let tools = nodeData.inputs?.tools diff --git a/packages/components/nodes/chains/LLMChain/LLMChain.ts b/packages/components/nodes/chains/LLMChain/LLMChain.ts index dc59760c0..fa0fd61c3 100644 --- a/packages/components/nodes/chains/LLMChain/LLMChain.ts +++ b/packages/components/nodes/chains/LLMChain/LLMChain.ts @@ -9,7 +9,6 @@ import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' -import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' class LLMChain_Chains implements INode { @@ -164,7 +163,6 @@ const runPrediction = async ( const socketIO = isStreaming ? options.socketIO : undefined const socketIOClientId = isStreaming ? options.socketIOClientId : '' const moderations = nodeData.inputs?.inputModeration as Moderation[] - let model = nodeData.inputs?.model as ChatOpenAI if (moderations && moderations.length > 0) { try { @@ -185,8 +183,8 @@ const runPrediction = async ( const promptValues = handleEscapeCharacters(promptValuesRaw, true) if (llmSupportsVision(chain.llm)) { - const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption) const visionChatModel = chain.llm as IVisionChatModal + const messageContent = addImagesToMessages(nodeData, options, visionChatModel.multiModalOption) if (messageContent?.length) { // Change model to gpt-4-vision && max token to higher when using gpt-4-vision visionChatModel.setVisionModel()