update model typecast

This commit is contained in:
Henry 2024-03-09 17:25:56 +08:00
parent c35eb0b7e5
commit 66a83f886a
2 changed files with 3 additions and 5 deletions

View File

@ -1,8 +1,8 @@
import { flatten } from 'lodash' import { flatten } from 'lodash'
import { ChainValues } from '@langchain/core/utils/types' import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents' import { AgentStep } from '@langchain/core/agents'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { RunnableSequence } from '@langchain/core/runnables' import { RunnableSequence } from '@langchain/core/runnables'
import { ChatOpenAI } from '@langchain/openai'
import { Tool } from '@langchain/core/tools' import { Tool } from '@langchain/core/tools'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts' import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser' import { XMLAgentOutputParser } from 'langchain/agents/xml/output_parser'
@ -139,7 +139,7 @@ const prepareAgent = async (
flowObj: { sessionId?: string; chatId?: string; input?: string }, flowObj: { sessionId?: string; chatId?: string; input?: string },
chatHistory: IMessage[] = [] chatHistory: IMessage[] = []
) => { ) => {
const model = nodeData.inputs?.model as ChatOpenAI const model = nodeData.inputs?.model as BaseChatModel
const memory = nodeData.inputs?.memory as FlowiseMemory const memory = nodeData.inputs?.memory as FlowiseMemory
const systemMessage = nodeData.inputs?.systemMessage as string const systemMessage = nodeData.inputs?.systemMessage as string
let tools = nodeData.inputs?.tools let tools = nodeData.inputs?.tools

View File

@ -9,7 +9,6 @@ import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
class LLMChain_Chains implements INode { class LLMChain_Chains implements INode {
@ -164,7 +163,6 @@ const runPrediction = async (
const socketIO = isStreaming ? options.socketIO : undefined const socketIO = isStreaming ? options.socketIO : undefined
const socketIOClientId = isStreaming ? options.socketIOClientId : '' const socketIOClientId = isStreaming ? options.socketIOClientId : ''
const moderations = nodeData.inputs?.inputModeration as Moderation[] const moderations = nodeData.inputs?.inputModeration as Moderation[]
let model = nodeData.inputs?.model as ChatOpenAI
if (moderations && moderations.length > 0) { if (moderations && moderations.length > 0) {
try { try {
@ -185,8 +183,8 @@ const runPrediction = async (
const promptValues = handleEscapeCharacters(promptValuesRaw, true) const promptValues = handleEscapeCharacters(promptValuesRaw, true)
if (llmSupportsVision(chain.llm)) { if (llmSupportsVision(chain.llm)) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
const visionChatModel = chain.llm as IVisionChatModal const visionChatModel = chain.llm as IVisionChatModal
const messageContent = addImagesToMessages(nodeData, options, visionChatModel.multiModalOption)
if (messageContent?.length) { if (messageContent?.length) {
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision // Change model to gpt-4-vision && max token to higher when using gpt-4-vision
visionChatModel.setVisionModel() visionChatModel.setVisionModel()