Multimodal Fixes...removing all static methods/variables.
This commit is contained in:
parent
9c874bb49a
commit
52ffa1772b
|
|
@ -9,7 +9,7 @@ import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams }
|
|||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { ChatConversationalAgent } from 'langchain/agents'
|
||||
import { renderTemplate } from '@langchain/core/prompts'
|
||||
import { injectChainNodeData } from '../../../src/multiModalUtils'
|
||||
import { injectAgentExecutorNodeData } from '../../../src/multiModalUtils'
|
||||
|
||||
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
|
||||
|
||||
|
|
@ -85,9 +85,9 @@ class ConversationalAgent_Agents implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
injectChainNodeData(nodeData, options)
|
||||
|
||||
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
injectAgentExecutorNodeData(executor, nodeData, options)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ import { Tool } from 'langchain/tools'
|
|||
import { BaseLanguageModel } from 'langchain/base_language'
|
||||
import { flatten } from 'lodash'
|
||||
import { additionalCallbacks } from '../../../src/handler'
|
||||
import { injectChainNodeData } from '../../../src/multiModalUtils'
|
||||
import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils'
|
||||
|
||||
class MRKLAgentChat_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -48,14 +48,14 @@ class MRKLAgentChat_Agents implements INode {
|
|||
tools = flatten(tools)
|
||||
const executor = await initializeAgentExecutorWithOptions(tools, model, {
|
||||
agentType: 'chat-zero-shot-react-description',
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
})
|
||||
return executor
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const executor = nodeData.instance as AgentExecutor
|
||||
injectChainNodeData(nodeData, options)
|
||||
injectLcAgentExecutorNodeData(executor, nodeData, options)
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,15 +2,14 @@ import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../
|
|||
import { ConversationChain } from 'langchain/chains'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { StringOutputParser } from 'langchain/schema/output_parser'
|
||||
import { HumanMessage } from 'langchain/schema'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { injectRunnableNodeData } from '../../../src/multiModalUtils'
|
||||
|
||||
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
|
||||
const inputKey = 'input'
|
||||
|
|
@ -94,9 +93,10 @@ class ConversationChain_Chains implements INode {
|
|||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | object> {
|
||||
const memory = nodeData.inputs?.memory
|
||||
// injectChainNodeData(nodeData, options)
|
||||
|
||||
const chain = prepareChain(nodeData, options, this.sessionId)
|
||||
injectRunnableNodeData(chain, nodeData, options)
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
|
|
@ -146,7 +146,7 @@ class ConversationChain_Chains implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
|
||||
const prepareChatPrompt = (nodeData: INodeData) => {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
|
||||
|
|
@ -154,10 +154,12 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
|
|||
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
|
||||
const sysPrompt = chatPromptTemplate.promptMessages[0]
|
||||
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
|
||||
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
|
||||
if (humanImageMessages.length) messages.push(...humanImageMessages)
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
sysPrompt,
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
humanPrompt
|
||||
])
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
if ((chatPromptTemplate as any).promptValues) {
|
||||
// @ts-ignore
|
||||
chatPrompt.promptValues = (chatPromptTemplate as any).promptValues
|
||||
|
|
@ -166,44 +168,22 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
|
|||
return chatPrompt
|
||||
}
|
||||
|
||||
const messages = [
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
|
||||
]
|
||||
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
])
|
||||
|
||||
return chatPrompt
|
||||
}
|
||||
|
||||
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
|
||||
const chatHistory = options.chatHistory
|
||||
let model = nodeData.inputs?.model as ChatOpenAI
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const memoryKey = memory.memoryKey ?? 'chat_history'
|
||||
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
let humanImageMessages: HumanMessage[] = []
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
model.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
model.maxTokens = 1024
|
||||
|
||||
for (const msg of messageContent) {
|
||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
||||
}
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
model.modelName = model.configuredModel
|
||||
model.maxTokens = model.configuredMaxToken
|
||||
}
|
||||
|
||||
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
|
||||
const chatPrompt = prepareChatPrompt(nodeData)
|
||||
let promptVariables = {}
|
||||
const promptValuesRaw = (chatPrompt as any).promptValues
|
||||
if (promptValuesRaw) {
|
||||
|
|
@ -227,7 +207,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
|
|||
},
|
||||
...promptVariables
|
||||
},
|
||||
prepareChatPrompt(nodeData, humanImageMessages),
|
||||
prepareChatPrompt(nodeData),
|
||||
model,
|
||||
new StringOutputParser()
|
||||
])
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import { formatResponse, injectOutputParser } from '../../outputparsers/OutputPa
|
|||
import { BaseLLMOutputParser } from 'langchain/schema/output_parser'
|
||||
import { OutputFixingParser } from 'langchain/output_parsers'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { injectChainNodeData } from '../../../src/multiModalUtils'
|
||||
import { injectLLMChainNodeData } from '../../../src/multiModalUtils'
|
||||
|
||||
class LLMChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -108,7 +108,7 @@ class LLMChain_Chains implements INode {
|
|||
verbose: process.env.DEBUG === 'true'
|
||||
})
|
||||
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
|
||||
injectChainNodeData(nodeData, options)
|
||||
injectLLMChainNodeData(nodeData, options)
|
||||
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
@ -138,7 +138,7 @@ class LLMChain_Chains implements INode {
|
|||
if (!this.outputParser && outputParser) {
|
||||
this.outputParser = outputParser
|
||||
}
|
||||
injectChainNodeData(nodeData, options)
|
||||
injectLLMChainNodeData(nodeData, options)
|
||||
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
|
|||
|
|
@ -6,30 +6,15 @@ import { BaseLanguageModelInput } from 'langchain/base_language'
|
|||
import { ChatOpenAICallOptions } from '@langchain/openai/dist/chat_models'
|
||||
import { BaseMessageChunk, BaseMessageLike, HumanMessage, LLMResult } from 'langchain/schema'
|
||||
import { Callbacks } from '@langchain/core/callbacks/manager'
|
||||
import { ICommonObject, IMultiModalOption, INodeData } from '../../../src'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
|
||||
interface MultiModalOptions {
|
||||
chainNodeData: INodeData
|
||||
chainNodeOptions: ICommonObject
|
||||
}
|
||||
import { IMultiModalOption } from '../../../src'
|
||||
import { addImagesToMessages, MultiModalOptions } from '../../../src/multiModalUtils'
|
||||
|
||||
export class ChatOpenAI extends LangchainChatOpenAI {
|
||||
//TODO: Should be class variables and not static
|
||||
// public static nodeData: INodeData
|
||||
// public static nodeOptions: ICommonObject
|
||||
private static chainNodeDataOptions: Map<string, MultiModalOptions> = new Map()
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption?: IMultiModalOption
|
||||
id: string
|
||||
|
||||
public static injectChainNodeData(nodeData: INodeData, options: ICommonObject) {
|
||||
if (nodeData.inputs?.model.id) {
|
||||
ChatOpenAI.chainNodeDataOptions.set(nodeData.inputs?.model.id, { chainNodeData: nodeData, chainNodeOptions: options })
|
||||
}
|
||||
}
|
||||
|
||||
constructor(
|
||||
id: string,
|
||||
fields?: Partial<OpenAIChatInput> & BaseChatModelParams & { openAIApiKey?: string; multiModalOption?: IMultiModalOption },
|
||||
|
|
@ -48,15 +33,15 @@ export class ChatOpenAI extends LangchainChatOpenAI {
|
|||
}
|
||||
|
||||
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
|
||||
if (ChatOpenAI.chainNodeDataOptions.has(this.id)) {
|
||||
await this.injectMultiModalMessages(messages, ChatOpenAI.chainNodeDataOptions.get(this.id) as MultiModalOptions)
|
||||
if (this.lc_kwargs.chainData) {
|
||||
await this.injectMultiModalMessages(messages, this.lc_kwargs.chainData)
|
||||
}
|
||||
return super.generate(messages, options, callbacks)
|
||||
}
|
||||
|
||||
private async injectMultiModalMessages(messages: BaseMessageLike[][], nodeOptions: MultiModalOptions) {
|
||||
const nodeData = nodeOptions.chainNodeData
|
||||
const optionsData = nodeOptions.chainNodeOptions
|
||||
const nodeData = nodeOptions.nodeData
|
||||
const optionsData = nodeOptions.nodeOptions
|
||||
const messageContent = addImagesToMessages(nodeData, optionsData, this.multiModalOption)
|
||||
if (messageContent?.length) {
|
||||
if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) {
|
||||
|
|
|
|||
|
|
@ -1,20 +1,55 @@
|
|||
import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface'
|
||||
import { BaseChatModel } from 'langchain/chat_models/base'
|
||||
import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import path from 'path'
|
||||
import { getStoragePath } from './utils'
|
||||
import fs from 'fs'
|
||||
import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { RunnableBinding, RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { AgentExecutor as LcAgentExecutor, ChatAgent, RunnableAgent } from 'langchain/agents'
|
||||
import { AgentExecutor } from './agents'
|
||||
|
||||
export const injectChainNodeData = (nodeData: INodeData, options: ICommonObject) => {
|
||||
let model = nodeData.inputs?.model as BaseChatModel
|
||||
export interface MultiModalOptions {
|
||||
nodeData: INodeData
|
||||
nodeOptions: ICommonObject
|
||||
}
|
||||
|
||||
if (model instanceof ChatOpenAI) {
|
||||
// TODO: this should not be static, need to figure out how to pass the nodeData and options to the invoke method
|
||||
ChatOpenAI.injectChainNodeData(nodeData, options)
|
||||
export const injectLLMChainNodeData = (nodeData: INodeData, options: ICommonObject) => {
|
||||
let llmChain = nodeData.instance as LLMChain
|
||||
;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options }
|
||||
}
|
||||
|
||||
export const injectAgentExecutorNodeData = (agentExecutor: AgentExecutor, nodeData: INodeData, options: ICommonObject) => {
|
||||
if (agentExecutor.agent instanceof RunnableAgent && agentExecutor.agent.runnable instanceof RunnableSequence) {
|
||||
let rs = agentExecutor.agent.runnable as RunnableSequence
|
||||
injectRunnableNodeData(rs, nodeData, options)
|
||||
}
|
||||
}
|
||||
|
||||
export const injectLcAgentExecutorNodeData = (agentExecutor: LcAgentExecutor, nodeData: INodeData, options: ICommonObject) => {
|
||||
if (agentExecutor.agent instanceof ChatAgent) {
|
||||
let llmChain = agentExecutor.agent.llmChain as LLMChain
|
||||
;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options }
|
||||
}
|
||||
}
|
||||
|
||||
export const injectRunnableNodeData = (runnableSequence: RunnableSequence, nodeData: INodeData, options: ICommonObject) => {
|
||||
runnableSequence.steps.forEach((step) => {
|
||||
if (step instanceof ChatOpenAI) {
|
||||
;(step as ChatOpenAI).lc_kwargs.chainData = { nodeData: nodeData, nodeOptions: options }
|
||||
}
|
||||
|
||||
if (step instanceof RunnableBinding) {
|
||||
if ((step as RunnableBinding<any, any>).bound instanceof ChatOpenAI) {
|
||||
;((step as RunnableBinding<any, any>).bound as ChatOpenAI).lc_kwargs.chainData = {
|
||||
nodeData: nodeData,
|
||||
nodeOptions: options
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export const addImagesToMessages = (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
|
|
|
|||
Loading…
Reference in New Issue