reverting all image upload logic to individual chains/agents
This commit is contained in:
parent
8bad360796
commit
b31e8715f4
|
|
@ -11,7 +11,8 @@ import { getBaseClasses } from '../../../src/utils'
|
|||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { AgentExecutor } from '../../../src/agents'
|
||||
import { injectAgentExecutorNodeData } from '../../../src/multiModalUtils'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
|
||||
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
|
||||
|
||||
|
|
@ -82,14 +83,19 @@ class ConversationalAgent_Agents implements INode {
|
|||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
|
||||
const executor = await prepareAgent(nodeData, { sessionId: this.sessionId, chatId: options.chatId, input }, options.chatHistory)
|
||||
injectAgentExecutorNodeData(executor, nodeData, options)
|
||||
const executor = await prepareAgent(
|
||||
nodeData,
|
||||
options,
|
||||
{ sessionId: this.sessionId, chatId: options.chatId, input },
|
||||
options.chatHistory
|
||||
)
|
||||
// injectAgentExecutorNodeData(executor, nodeData, options)
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
|
@ -123,6 +129,7 @@ class ConversationalAgent_Agents implements INode {
|
|||
|
||||
const prepareAgent = async (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
flowObj: { sessionId?: string; chatId?: string; input?: string },
|
||||
chatHistory: IMessage[] = []
|
||||
) => {
|
||||
|
|
@ -149,6 +156,32 @@ const prepareAgent = async (
|
|||
outputParser
|
||||
})
|
||||
|
||||
if (model instanceof ChatOpenAI) {
|
||||
let humanImageMessages: HumanMessage[] = []
|
||||
const chatModel = model as ChatOpenAI
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
chatModel.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
chatModel.maxTokens = 1024
|
||||
|
||||
for (const msg of messageContent) {
|
||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
||||
}
|
||||
let messagePlaceholder = prompt.promptMessages.pop()
|
||||
prompt.promptMessages.push(...humanImageMessages)
|
||||
// @ts-ignore
|
||||
prompt.promptMessages.push(messagePlaceholder)
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
chatModel.modelName = chatModel.configuredModel
|
||||
chatModel.maxTokens = chatModel.configuredMaxToken
|
||||
}
|
||||
}
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,
|
||||
|
|
@ -169,7 +202,7 @@ const prepareAgent = async (
|
|||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
})
|
||||
|
||||
return executor
|
||||
|
|
|
|||
|
|
@ -7,7 +7,11 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
|||
import { additionalCallbacks } from '../../../src/handler'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils'
|
||||
import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI";
|
||||
import { HumanMessage } from "@langchain/core/messages";
|
||||
import { addImagesToMessages } from "../../../src/multiModalUtils";
|
||||
import { ChatPromptTemplate, SystemMessagePromptTemplate } from "langchain/prompts";
|
||||
// import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils'
|
||||
|
||||
class MRKLAgentChat_Agents implements INode {
|
||||
label: string
|
||||
|
|
@ -54,19 +58,39 @@ class MRKLAgentChat_Agents implements INode {
|
|||
tools = flatten(tools)
|
||||
|
||||
const promptWithChat = await pull<PromptTemplate>('hwchase17/react-chat')
|
||||
let chatPromptTemplate = undefined
|
||||
if (model instanceof ChatOpenAI) {
|
||||
const chatModel = model as ChatOpenAI
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
chatModel.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
chatModel.maxTokens = 1024
|
||||
const oldTemplate = promptWithChat.template as string
|
||||
let chatPromptTemplate = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)])
|
||||
chatPromptTemplate.promptMessages = [new HumanMessage({ content: messageContent })]
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
chatModel.modelName = chatModel.configuredModel
|
||||
chatModel.maxTokens = chatModel.configuredMaxToken
|
||||
}
|
||||
}
|
||||
|
||||
const agent = await createReactAgent({
|
||||
llm: model,
|
||||
tools,
|
||||
prompt: promptWithChat
|
||||
prompt: chatPromptTemplate ?? promptWithChat
|
||||
})
|
||||
|
||||
const executor = new AgentExecutor({
|
||||
agent,
|
||||
tools,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false
|
||||
verbose: process.env.DEBUG === 'true'
|
||||
})
|
||||
injectLcAgentExecutorNodeData(executor, nodeData, options)
|
||||
// injectLcAgentExecutorNodeData(executor, nodeData, options)
|
||||
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,15 +1,16 @@
|
|||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { ConversationChain } from 'langchain/chains'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
|
||||
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { StringOutputParser } from 'langchain/schema/output_parser'
|
||||
import { HumanMessage } from 'langchain/schema'
|
||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { injectRunnableNodeData } from '../../../src/multiModalUtils'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
|
||||
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
|
||||
const inputKey = 'input'
|
||||
|
|
@ -95,8 +96,6 @@ class ConversationChain_Chains implements INode {
|
|||
const memory = nodeData.inputs?.memory
|
||||
|
||||
const chain = prepareChain(nodeData, options, this.sessionId)
|
||||
injectRunnableNodeData(chain, nodeData, options)
|
||||
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
|
|
@ -146,7 +145,7 @@ class ConversationChain_Chains implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
const prepareChatPrompt = (nodeData: INodeData) => {
|
||||
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
||||
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
|
||||
|
|
@ -154,12 +153,10 @@ const prepareChatPrompt = (nodeData: INodeData) => {
|
|||
if (chatPromptTemplate && chatPromptTemplate.promptMessages.length) {
|
||||
const sysPrompt = chatPromptTemplate.promptMessages[0]
|
||||
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
sysPrompt,
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
humanPrompt
|
||||
])
|
||||
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
|
||||
if (humanImageMessages.length) messages.push(...humanImageMessages)
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
if ((chatPromptTemplate as any).promptValues) {
|
||||
// @ts-ignore
|
||||
chatPrompt.promptValues = (chatPromptTemplate as any).promptValues
|
||||
|
|
@ -168,22 +165,47 @@ const prepareChatPrompt = (nodeData: INodeData) => {
|
|||
return chatPrompt
|
||||
}
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages([
|
||||
const messages = [
|
||||
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
|
||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
|
||||
])
|
||||
]
|
||||
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
|
||||
|
||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||
|
||||
return chatPrompt
|
||||
}
|
||||
|
||||
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
|
||||
const chatHistory = options.chatHistory
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
let model = nodeData.inputs?.model
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const memoryKey = memory.memoryKey ?? 'chat_history'
|
||||
|
||||
const chatPrompt = prepareChatPrompt(nodeData)
|
||||
let humanImageMessages: HumanMessage[] = []
|
||||
if (model instanceof ChatOpenAI) {
|
||||
const chatModel = model as ChatOpenAI
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision
|
||||
chatModel.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
chatModel.maxTokens = 1024
|
||||
|
||||
for (const msg of messageContent) {
|
||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
||||
}
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
chatModel.modelName = chatModel.configuredModel
|
||||
chatModel.maxTokens = chatModel.configuredMaxToken
|
||||
}
|
||||
}
|
||||
|
||||
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
|
||||
let promptVariables = {}
|
||||
const promptValuesRaw = (chatPrompt as any).promptValues
|
||||
if (promptValuesRaw) {
|
||||
|
|
@ -207,7 +229,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
|
|||
},
|
||||
...promptVariables
|
||||
},
|
||||
prepareChatPrompt(nodeData),
|
||||
prepareChatPrompt(nodeData, humanImageMessages),
|
||||
model,
|
||||
new StringOutputParser()
|
||||
])
|
||||
|
|
|
|||
|
|
@ -6,8 +6,11 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from
|
|||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||
import { injectLLMChainNodeData } from '../../../src/multiModalUtils'
|
||||
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
|
||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
||||
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
|
||||
import { HumanMessage } from 'langchain/schema'
|
||||
|
||||
class LLMChain_Chains implements INode {
|
||||
label: string
|
||||
|
|
@ -107,7 +110,6 @@ class LLMChain_Chains implements INode {
|
|||
verbose: process.env.DEBUG === 'true'
|
||||
})
|
||||
const inputVariables = chain.prompt.inputVariables as string[] // ["product"]
|
||||
injectLLMChainNodeData(nodeData, options)
|
||||
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
@ -137,7 +139,6 @@ class LLMChain_Chains implements INode {
|
|||
if (!this.outputParser && outputParser) {
|
||||
this.outputParser = outputParser
|
||||
}
|
||||
injectLLMChainNodeData(nodeData, options)
|
||||
promptValues = injectOutputParser(this.outputParser, chain, promptValues)
|
||||
const res = await runPrediction(inputVariables, chain, input, promptValues, options, nodeData)
|
||||
// eslint-disable-next-line no-console
|
||||
|
|
@ -163,12 +164,7 @@ const runPrediction = async (
|
|||
const socketIO = isStreaming ? options.socketIO : undefined
|
||||
const socketIOClientId = isStreaming ? options.socketIOClientId : ''
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
/**
|
||||
* Apply string transformation to reverse converted special chars:
|
||||
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
|
||||
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
||||
*/
|
||||
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
||||
let model = nodeData.inputs?.model as ChatOpenAI
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
|
|
@ -181,6 +177,42 @@ const runPrediction = async (
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply string transformation to reverse converted special chars:
|
||||
* FROM: { "value": "hello i am benFLOWISE_NEWLINEFLOWISE_NEWLINEFLOWISE_TABhow are you?" }
|
||||
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
||||
*/
|
||||
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
if (chain.llm instanceof ChatOpenAI) {
|
||||
const chatOpenAI = chain.llm as ChatOpenAI
|
||||
if (messageContent?.length) {
|
||||
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
|
||||
chatOpenAI.modelName = 'gpt-4-vision-preview'
|
||||
chatOpenAI.maxTokens = 1024
|
||||
// Add image to the message
|
||||
if (chain.prompt instanceof PromptTemplate) {
|
||||
const oldTemplate = chain.prompt.template as string
|
||||
let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)])
|
||||
cp2.promptMessages = [new HumanMessage({ content: messageContent })]
|
||||
chain.prompt = cp2
|
||||
} else if (chain.prompt instanceof ChatPromptTemplate) {
|
||||
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||
} else if (chain.prompt instanceof FewShotPromptTemplate) {
|
||||
let currentPrompt = chain.prompt as FewShotPromptTemplate
|
||||
const oldTemplate = currentPrompt.examplePrompt.template as string
|
||||
let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)])
|
||||
cp2.promptMessages = [new HumanMessage({ content: messageContent })]
|
||||
// @ts-ignore
|
||||
currentPrompt.examplePrompt = cp2
|
||||
}
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
chatOpenAI.modelName = model.configuredModel
|
||||
chatOpenAI.maxTokens = model.configuredMaxToken
|
||||
}
|
||||
}
|
||||
|
||||
if (promptValues && inputVariables.length > 0) {
|
||||
let seen: string[] = []
|
||||
|
||||
|
|
|
|||
|
|
@ -7,12 +7,9 @@ import {
|
|||
ChatOpenAICallOptions
|
||||
} from '@langchain/openai'
|
||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { BaseLanguageModelInput } from '@langchain/core/language_models/base'
|
||||
import { BaseMessageChunk, BaseMessageLike, HumanMessage } from '@langchain/core/messages'
|
||||
import { LLMResult } from '@langchain/core/outputs'
|
||||
import { Callbacks } from '@langchain/core/callbacks/manager'
|
||||
import { IMultiModalOption } from '../../../src'
|
||||
import { addImagesToMessages, MultiModalOptions } from '../../../src/multiModalUtils'
|
||||
import { BaseMessageLike, LLMResult } from 'langchain/schema'
|
||||
import { Callbacks } from '@langchain/core/callbacks/manager'
|
||||
|
||||
export class ChatOpenAI extends LangchainChatOpenAI {
|
||||
configuredModel: string
|
||||
|
|
@ -35,34 +32,7 @@ export class ChatOpenAI extends LangchainChatOpenAI {
|
|||
this.configuredMaxToken = fields?.maxTokens
|
||||
}
|
||||
|
||||
async invoke(input: BaseLanguageModelInput, options?: ChatOpenAICallOptions): Promise<BaseMessageChunk> {
|
||||
return super.invoke(input, options)
|
||||
}
|
||||
|
||||
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
|
||||
if (this.lc_kwargs.chainData) {
|
||||
await this.injectMultiModalMessages(messages, this.lc_kwargs.chainData)
|
||||
}
|
||||
return super.generate(messages, options, callbacks)
|
||||
}
|
||||
|
||||
private async injectMultiModalMessages(messages: BaseMessageLike[][], options: MultiModalOptions) {
|
||||
const optionsData = options.nodeOptions
|
||||
const messageContent = addImagesToMessages(optionsData, this.multiModalOption)
|
||||
if (messageContent?.length) {
|
||||
if (messages[0].length > 0 && messages[0][messages[0].length - 1] instanceof HumanMessage) {
|
||||
// Change model to gpt-4-vision
|
||||
this.modelName = 'gpt-4-vision-preview'
|
||||
|
||||
// Change default max token to higher when using gpt-4-vision
|
||||
this.maxTokens = 1024
|
||||
|
||||
messages[0].push(new HumanMessage({ content: messageContent }))
|
||||
}
|
||||
} else {
|
||||
// revert to previous values if image upload is empty
|
||||
this.modelName = this.configuredModel
|
||||
this.maxTokens = this.configuredMaxToken
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,84 +1,38 @@
|
|||
import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface'
|
||||
import { ChatOpenAI as LangchainChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import path from 'path'
|
||||
import { getStoragePath } from './utils'
|
||||
import fs from 'fs'
|
||||
import { ChatOpenAI } from '../nodes/chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||
import { LLMChain } from 'langchain/chains'
|
||||
import { RunnableBinding, RunnableSequence } from 'langchain/schema/runnable'
|
||||
import { AgentExecutor as LcAgentExecutor, ChatAgent, RunnableAgent } from 'langchain/agents'
|
||||
import { AgentExecutor } from './agents'
|
||||
|
||||
export interface MultiModalOptions {
|
||||
nodeOptions: ICommonObject
|
||||
}
|
||||
|
||||
export const injectLLMChainNodeData = (nodeData: INodeData, options: ICommonObject) => {
|
||||
let llmChain = nodeData.instance as LLMChain
|
||||
;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) }
|
||||
}
|
||||
|
||||
export const injectAgentExecutorNodeData = (agentExecutor: AgentExecutor, nodeData: INodeData, options: ICommonObject) => {
|
||||
if (agentExecutor.agent instanceof RunnableAgent && agentExecutor.agent.runnable instanceof RunnableSequence) {
|
||||
let rs = agentExecutor.agent.runnable as RunnableSequence
|
||||
injectRunnableNodeData(rs, nodeData, options)
|
||||
}
|
||||
}
|
||||
|
||||
export const injectLcAgentExecutorNodeData = (agentExecutor: LcAgentExecutor, nodeData: INodeData, options: ICommonObject) => {
|
||||
if (agentExecutor.agent instanceof ChatAgent) {
|
||||
let llmChain = agentExecutor.agent.llmChain as LLMChain
|
||||
;(llmChain.llm as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) }
|
||||
}
|
||||
}
|
||||
|
||||
export const injectRunnableNodeData = (runnableSequence: RunnableSequence, nodeData: INodeData, options: ICommonObject) => {
|
||||
runnableSequence.steps.forEach((step) => {
|
||||
if (step instanceof ChatOpenAI) {
|
||||
;(step as ChatOpenAI).lc_kwargs.chainData = { nodeOptions: getUploadsFromOptions(options) }
|
||||
}
|
||||
|
||||
if (step instanceof RunnableBinding) {
|
||||
if ((step as RunnableBinding<any, any>).bound instanceof ChatOpenAI) {
|
||||
;((step as RunnableBinding<any, any>).bound as ChatOpenAI).lc_kwargs.chainData = {
|
||||
nodeOptions: getUploadsFromOptions(options)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
const getUploadsFromOptions = (options: ICommonObject): ICommonObject => {
|
||||
if (options?.uploads) {
|
||||
return {
|
||||
uploads: options.uploads,
|
||||
chatflowid: options.chatflowid,
|
||||
chatId: options.chatId
|
||||
}
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
export const addImagesToMessages = (options: ICommonObject, multiModalOption?: IMultiModalOption): MessageContentImageUrl[] => {
|
||||
export const addImagesToMessages = (
|
||||
nodeData: INodeData,
|
||||
options: ICommonObject,
|
||||
multiModalOption?: IMultiModalOption
|
||||
): MessageContentImageUrl[] => {
|
||||
const imageContent: MessageContentImageUrl[] = []
|
||||
let model = nodeData.inputs?.model
|
||||
|
||||
// Image Uploaded
|
||||
if (multiModalOption?.image && multiModalOption?.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) {
|
||||
const imageUploads = getImageUploads(options.uploads)
|
||||
for (const upload of imageUploads) {
|
||||
if (upload.type == 'stored-file') {
|
||||
const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name)
|
||||
if (model instanceof LangchainChatOpenAI && multiModalOption) {
|
||||
// Image Uploaded
|
||||
if (multiModalOption.image && multiModalOption.image.allowImageUploads && options?.uploads && options?.uploads.length > 0) {
|
||||
const imageUploads = getImageUploads(options.uploads)
|
||||
for (const upload of imageUploads) {
|
||||
let bf = upload.data
|
||||
if (upload.type == 'stored-file') {
|
||||
const filePath = path.join(getStoragePath(), options.chatflowid, options.chatId, upload.name)
|
||||
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
const contents = fs.readFileSync(filePath)
|
||||
let bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
|
||||
// as the image is stored in the server, read the file and convert it to base64
|
||||
const contents = fs.readFileSync(filePath)
|
||||
bf = 'data:' + upload.mime + ';base64,' + contents.toString('base64')
|
||||
|
||||
imageContent.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: bf,
|
||||
detail: multiModalOption?.image.imageResolution ?? 'low'
|
||||
}
|
||||
})
|
||||
imageContent.push({
|
||||
type: 'image_url',
|
||||
image_url: {
|
||||
url: bf,
|
||||
detail: multiModalOption.image.imageResolution ?? 'low'
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue