Addition of Claude for Image uploads
This commit is contained in:
parent
a2caf3e265
commit
63b8c23072
|
|
@ -4,7 +4,12 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||||
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
|
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
|
||||||
import { ChainValues } from '@langchain/core/utils/types'
|
import { ChainValues } from '@langchain/core/utils/types'
|
||||||
import { AgentStep } from '@langchain/core/agents'
|
import { AgentStep } from '@langchain/core/agents'
|
||||||
import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
|
import {
|
||||||
|
renderTemplate,
|
||||||
|
MessagesPlaceholder,
|
||||||
|
HumanMessagePromptTemplate,
|
||||||
|
PromptTemplate
|
||||||
|
} from "@langchain/core/prompts";
|
||||||
import { RunnableSequence } from '@langchain/core/runnables'
|
import { RunnableSequence } from '@langchain/core/runnables'
|
||||||
import { ChatConversationalAgent } from 'langchain/agents'
|
import { ChatConversationalAgent } from 'langchain/agents'
|
||||||
import { getBaseClasses } from '../../../src/utils'
|
import { getBaseClasses } from '../../../src/utils'
|
||||||
|
|
@ -12,7 +17,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
|
||||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { AgentExecutor } from '../../../src/agents'
|
import { AgentExecutor } from '../../../src/agents'
|
||||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils";
|
||||||
|
import { IVisionChatModal } from "../../../src/IVisionChatModal";
|
||||||
|
|
||||||
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
|
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
|
||||||
|
|
||||||
|
|
@ -150,33 +156,39 @@ const prepareAgent = async (
|
||||||
outputParser
|
outputParser
|
||||||
})
|
})
|
||||||
|
|
||||||
if (model instanceof ChatOpenAI) {
|
if (llmSupportsVision(model)) {
|
||||||
let humanImageMessages: HumanMessage[] = []
|
const visionChatModel = model as IVisionChatModal
|
||||||
|
// let humanImageMessages: HumanMessage[] = []
|
||||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||||
|
|
||||||
if (messageContent?.length) {
|
if (messageContent?.length) {
|
||||||
// Change model to gpt-4-vision
|
visionChatModel.setVisionModel()
|
||||||
model.modelName = 'gpt-4-vision-preview'
|
|
||||||
|
|
||||||
// Change default max token to higher when using gpt-4-vision
|
// for (const msg of messageContent) {
|
||||||
model.maxTokens = 1024
|
// humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
||||||
|
// }
|
||||||
for (const msg of messageContent) {
|
|
||||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pop the `agent_scratchpad` MessagePlaceHolder
|
// Pop the `agent_scratchpad` MessagePlaceHolder
|
||||||
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
||||||
|
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||||
|
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||||
|
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||||
|
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||||
|
...messageContent,
|
||||||
|
{
|
||||||
|
text: template
|
||||||
|
}
|
||||||
|
])
|
||||||
|
msg.inputVariables = lastMessage.inputVariables
|
||||||
|
prompt.promptMessages.push(msg)
|
||||||
|
}
|
||||||
// Add the HumanMessage for images
|
// Add the HumanMessage for images
|
||||||
prompt.promptMessages.push(...humanImageMessages)
|
//prompt.promptMessages.push(...humanImageMessages)
|
||||||
|
|
||||||
// Add the `agent_scratchpad` MessagePlaceHolder back
|
// Add the `agent_scratchpad` MessagePlaceHolder back
|
||||||
prompt.promptMessages.push(messagePlaceholder)
|
prompt.promptMessages.push(messagePlaceholder)
|
||||||
} else {
|
} else {
|
||||||
// revert to previous values if image upload is empty
|
visionChatModel.revertToOriginalModel()
|
||||||
model.modelName = model.configuredModel
|
|
||||||
model.maxTokens = model.configuredMaxToken
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
import { flatten } from 'lodash'
|
import { flatten } from 'lodash'
|
||||||
import { AgentExecutor } from 'langchain/agents'
|
import { AgentExecutor } from 'langchain/agents'
|
||||||
import { HumanMessage } from '@langchain/core/messages'
|
|
||||||
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
|
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
|
||||||
import { Tool } from '@langchain/core/tools'
|
import { Tool } from '@langchain/core/tools'
|
||||||
import type { PromptTemplate } from '@langchain/core/prompts'
|
import type { PromptTemplate } from '@langchain/core/prompts'
|
||||||
|
|
@ -10,8 +9,8 @@ import { additionalCallbacks } from '../../../src/handler'
|
||||||
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { getBaseClasses } from '../../../src/utils'
|
import { getBaseClasses } from '../../../src/utils'
|
||||||
import { createReactAgent } from '../../../src/agents'
|
import { createReactAgent } from '../../../src/agents'
|
||||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
import { IVisionChatModal } from '../../../src/IVisionChatModal'
|
||||||
|
|
||||||
class MRKLAgentChat_Agents implements INode {
|
class MRKLAgentChat_Agents implements INode {
|
||||||
label: string
|
label: string
|
||||||
|
|
@ -68,23 +67,26 @@ class MRKLAgentChat_Agents implements INode {
|
||||||
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
|
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
|
||||||
let chatPromptTemplate = undefined
|
let chatPromptTemplate = undefined
|
||||||
|
|
||||||
if (model instanceof ChatOpenAI) {
|
if (llmSupportsVision(model)) {
|
||||||
|
const visionChatModel = model as IVisionChatModal
|
||||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||||
|
|
||||||
if (messageContent?.length) {
|
if (messageContent?.length) {
|
||||||
// Change model to gpt-4-vision
|
// Change model to vision supported
|
||||||
model.modelName = 'gpt-4-vision-preview'
|
visionChatModel.setVisionModel()
|
||||||
|
|
||||||
// Change default max token to higher when using gpt-4-vision
|
|
||||||
model.maxTokens = 1024
|
|
||||||
|
|
||||||
const oldTemplate = prompt.template as string
|
const oldTemplate = prompt.template as string
|
||||||
chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)])
|
|
||||||
chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
|
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||||
|
...messageContent,
|
||||||
|
{
|
||||||
|
text: oldTemplate
|
||||||
|
}
|
||||||
|
])
|
||||||
|
msg.inputVariables = prompt.inputVariables
|
||||||
|
chatPromptTemplate = ChatPromptTemplate.fromMessages([msg])
|
||||||
} else {
|
} else {
|
||||||
// revert to previous values if image upload is empty
|
// revert to previous values if image upload is empty
|
||||||
model.modelName = model.configuredModel
|
visionChatModel.revertToOriginalModel()
|
||||||
model.maxTokens = model.configuredMaxToken
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,15 +2,16 @@ import { ConversationChain } from 'langchain/chains'
|
||||||
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
|
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
|
||||||
import { RunnableSequence } from '@langchain/core/runnables'
|
import { RunnableSequence } from '@langchain/core/runnables'
|
||||||
import { StringOutputParser } from '@langchain/core/output_parsers'
|
import { StringOutputParser } from '@langchain/core/output_parsers'
|
||||||
import { HumanMessage } from '@langchain/core/messages'
|
|
||||||
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
|
||||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
||||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
||||||
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageContentImageUrl } from '../../../src/Interface'
|
||||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
||||||
|
import { IVisionChatModal } from '../../../src/IVisionChatModal'
|
||||||
|
import { MessageContent } from 'llamaindex'
|
||||||
|
|
||||||
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
|
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
|
||||||
const inputKey = 'input'
|
const inputKey = 'input'
|
||||||
|
|
@ -145,7 +146,7 @@ class ConversationChain_Chains implements INode {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
|
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageContentImageUrl[]) => {
|
||||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||||
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
const prompt = nodeData.inputs?.systemMessagePrompt as string
|
||||||
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
|
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
|
||||||
|
|
@ -154,7 +155,6 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
|
||||||
const sysPrompt = chatPromptTemplate.promptMessages[0]
|
const sysPrompt = chatPromptTemplate.promptMessages[0]
|
||||||
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
|
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
|
||||||
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
|
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
|
||||||
if (humanImageMessages.length) messages.push(...humanImageMessages)
|
|
||||||
|
|
||||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||||
if ((chatPromptTemplate as any).promptValues) {
|
if ((chatPromptTemplate as any).promptValues) {
|
||||||
|
|
@ -168,9 +168,8 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
|
||||||
const messages = [
|
const messages = [
|
||||||
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
|
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
|
||||||
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
|
||||||
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
|
HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages])
|
||||||
]
|
]
|
||||||
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
|
|
||||||
|
|
||||||
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
|
||||||
|
|
||||||
|
|
@ -183,28 +182,19 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
|
||||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||||
const memoryKey = memory.memoryKey ?? 'chat_history'
|
const memoryKey = memory.memoryKey ?? 'chat_history'
|
||||||
|
|
||||||
let humanImageMessages: HumanMessage[] = []
|
let messageContent: MessageContentImageUrl[] = []
|
||||||
if (model instanceof ChatOpenAI) {
|
if (llmSupportsVision(model)) {
|
||||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||||
|
const visionChatModel = model as IVisionChatModal
|
||||||
if (messageContent?.length) {
|
if (messageContent?.length) {
|
||||||
// Change model to gpt-4-vision
|
visionChatModel.setVisionModel()
|
||||||
model.modelName = 'gpt-4-vision-preview'
|
|
||||||
|
|
||||||
// Change default max token to higher when using gpt-4-vision
|
|
||||||
model.maxTokens = 1024
|
|
||||||
|
|
||||||
for (const msg of messageContent) {
|
|
||||||
humanImageMessages.push(new HumanMessage({ content: [msg] }))
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// revert to previous values if image upload is empty
|
// revert to previous values if image upload is empty
|
||||||
model.modelName = model.configuredModel
|
visionChatModel.revertToOriginalModel()
|
||||||
model.maxTokens = model.configuredMaxToken
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
|
const chatPrompt = prepareChatPrompt(nodeData, messageContent)
|
||||||
let promptVariables = {}
|
let promptVariables = {}
|
||||||
const promptValuesRaw = (chatPrompt as any).promptValues
|
const promptValuesRaw = (chatPrompt as any).promptValues
|
||||||
if (promptValuesRaw) {
|
if (promptValuesRaw) {
|
||||||
|
|
@ -228,7 +218,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
|
||||||
},
|
},
|
||||||
...promptVariables
|
...promptVariables
|
||||||
},
|
},
|
||||||
prepareChatPrompt(nodeData, humanImageMessages),
|
prepareChatPrompt(nodeData, messageContent),
|
||||||
model,
|
model,
|
||||||
new StringOutputParser()
|
new StringOutputParser()
|
||||||
])
|
])
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,22 @@
|
||||||
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
|
import { BaseLanguageModel, BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
|
||||||
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
|
import { BaseLLMOutputParser, BaseOutputParser } from "@langchain/core/output_parsers";
|
||||||
import { HumanMessage } from '@langchain/core/messages'
|
import { HumanMessage } from "@langchain/core/messages";
|
||||||
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
|
import {
|
||||||
import { OutputFixingParser } from 'langchain/output_parsers'
|
ChatPromptTemplate,
|
||||||
import { LLMChain } from 'langchain/chains'
|
FewShotPromptTemplate,
|
||||||
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
|
HumanMessagePromptTemplate,
|
||||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
PromptTemplate
|
||||||
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
|
} from "@langchain/core/prompts";
|
||||||
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
|
import { OutputFixingParser } from "langchain/output_parsers";
|
||||||
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
|
import { LLMChain } from "langchain/chains";
|
||||||
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
|
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from "../../../src/Interface";
|
||||||
import { addImagesToMessages } from '../../../src/multiModalUtils'
|
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from "../../../src/handler";
|
||||||
|
import { getBaseClasses, handleEscapeCharacters } from "../../../src/utils";
|
||||||
|
import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation";
|
||||||
|
import { formatResponse, injectOutputParser } from "../../outputparsers/OutputParserHelpers";
|
||||||
|
import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI";
|
||||||
|
import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils";
|
||||||
|
import { IVisionChatModal } from "../../../src/IVisionChatModal";
|
||||||
|
|
||||||
class LLMChain_Chains implements INode {
|
class LLMChain_Chains implements INode {
|
||||||
label: string
|
label: string
|
||||||
|
|
@ -183,24 +189,39 @@ const runPrediction = async (
|
||||||
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
* TO: { "value": "hello i am ben\n\n\thow are you?" }
|
||||||
*/
|
*/
|
||||||
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
|
||||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
|
||||||
|
|
||||||
if (chain.llm instanceof ChatOpenAI) {
|
if (llmSupportsVision(chain.llm)) {
|
||||||
const chatOpenAI = chain.llm as ChatOpenAI
|
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||||
|
const visionChatModel = chain.llm as IVisionChatModal
|
||||||
if (messageContent?.length) {
|
if (messageContent?.length) {
|
||||||
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
|
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
|
||||||
chatOpenAI.modelName = 'gpt-4-vision-preview'
|
visionChatModel.setVisionModel()
|
||||||
chatOpenAI.maxTokens = 1024
|
|
||||||
// Add image to the message
|
// Add image to the message
|
||||||
if (chain.prompt instanceof PromptTemplate) {
|
if (chain.prompt instanceof PromptTemplate) {
|
||||||
const existingPromptTemplate = chain.prompt.template as string
|
const existingPromptTemplate = chain.prompt.template as string
|
||||||
let newChatPromptTemplate = ChatPromptTemplate.fromMessages([
|
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||||
HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate)
|
...messageContent,
|
||||||
|
{
|
||||||
|
text: existingPromptTemplate
|
||||||
|
}
|
||||||
])
|
])
|
||||||
newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
|
msg.inputVariables = chain.prompt.inputVariables
|
||||||
chain.prompt = newChatPromptTemplate
|
chain.prompt = ChatPromptTemplate.fromMessages([msg])
|
||||||
} else if (chain.prompt instanceof ChatPromptTemplate) {
|
} else if (chain.prompt instanceof ChatPromptTemplate) {
|
||||||
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
|
if (chain.prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||||
|
const lastMessage = chain.prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||||
|
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||||
|
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||||
|
...messageContent,
|
||||||
|
{
|
||||||
|
text: template
|
||||||
|
}
|
||||||
|
])
|
||||||
|
msg.inputVariables = lastMessage.inputVariables
|
||||||
|
chain.prompt.promptMessages.push(msg)
|
||||||
|
} else {
|
||||||
|
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
|
||||||
|
}
|
||||||
} else if (chain.prompt instanceof FewShotPromptTemplate) {
|
} else if (chain.prompt instanceof FewShotPromptTemplate) {
|
||||||
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
|
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
|
||||||
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
|
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
|
||||||
|
|
@ -212,8 +233,7 @@ const runPrediction = async (
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// revert to previous values if image upload is empty
|
// revert to previous values if image upload is empty
|
||||||
chatOpenAI.modelName = model.configuredModel
|
visionChatModel.revertToOriginalModel()
|
||||||
chatOpenAI.maxTokens = model.configuredMaxToken
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
||||||
import { AnthropicInput, ChatAnthropic } from '@langchain/anthropic'
|
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
|
||||||
import { BaseCache } from '@langchain/core/caches'
|
import { BaseCache } from '@langchain/core/caches'
|
||||||
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||||
|
import { ChatAnthropic } from './FlowiseChatAntrhopic'
|
||||||
|
|
||||||
class ChatAnthropic_ChatModels implements INode {
|
class ChatAnthropic_ChatModels implements INode {
|
||||||
label: string
|
label: string
|
||||||
|
|
@ -19,12 +20,12 @@ class ChatAnthropic_ChatModels implements INode {
|
||||||
constructor() {
|
constructor() {
|
||||||
this.label = 'ChatAnthropic'
|
this.label = 'ChatAnthropic'
|
||||||
this.name = 'chatAnthropic'
|
this.name = 'chatAnthropic'
|
||||||
this.version = 3.0
|
this.version = 4.0
|
||||||
this.type = 'ChatAnthropic'
|
this.type = 'ChatAnthropic'
|
||||||
this.icon = 'Anthropic.svg'
|
this.icon = 'Anthropic.svg'
|
||||||
this.category = 'Chat Models'
|
this.category = 'Chat Models'
|
||||||
this.description = 'Wrapper around ChatAnthropic large language models that use the Chat endpoint'
|
this.description = 'Wrapper around ChatAnthropic large language models that use the Chat endpoint'
|
||||||
this.baseClasses = [this.type, ...getBaseClasses(ChatAnthropic)]
|
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatAnthropic)]
|
||||||
this.credential = {
|
this.credential = {
|
||||||
label: 'Connect Credential',
|
label: 'Connect Credential',
|
||||||
name: 'credential',
|
name: 'credential',
|
||||||
|
|
@ -147,6 +148,15 @@ class ChatAnthropic_ChatModels implements INode {
|
||||||
step: 0.1,
|
step: 0.1,
|
||||||
optional: true,
|
optional: true,
|
||||||
additionalParams: true
|
additionalParams: true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
label: 'Allow Image Uploads',
|
||||||
|
name: 'allowImageUploads',
|
||||||
|
type: 'boolean',
|
||||||
|
description:
|
||||||
|
'Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||||
|
default: false,
|
||||||
|
optional: true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
@ -163,6 +173,8 @@ class ChatAnthropic_ChatModels implements INode {
|
||||||
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
|
||||||
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
|
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
|
||||||
|
|
||||||
|
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||||
|
|
||||||
const obj: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string } = {
|
const obj: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string } = {
|
||||||
temperature: parseFloat(temperature),
|
temperature: parseFloat(temperature),
|
||||||
modelName,
|
modelName,
|
||||||
|
|
@ -175,7 +187,14 @@ class ChatAnthropic_ChatModels implements INode {
|
||||||
if (topK) obj.topK = parseFloat(topK)
|
if (topK) obj.topK = parseFloat(topK)
|
||||||
if (cache) obj.cache = cache
|
if (cache) obj.cache = cache
|
||||||
|
|
||||||
const model = new ChatAnthropic(obj)
|
const multiModalOption: IMultiModalOption = {
|
||||||
|
image: {
|
||||||
|
allowImageUploads: allowImageUploads ?? false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const model = new ChatAnthropic(nodeData.id, obj)
|
||||||
|
model.setMultiModalOption(multiModalOption)
|
||||||
return model
|
return model
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,34 @@
|
||||||
|
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
|
||||||
|
import { IMultiModalOption } from '../../../src'
|
||||||
|
import { IVisionChatModal } from '../../../src/IVisionChatModal'
|
||||||
|
import { BaseLLMParams } from '@langchain/core/language_models/llms'
|
||||||
|
|
||||||
|
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {
|
||||||
|
configuredModel: string
|
||||||
|
configuredMaxToken: number
|
||||||
|
multiModalOption: IMultiModalOption
|
||||||
|
id: string
|
||||||
|
|
||||||
|
constructor(id: string, fields: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string }) {
|
||||||
|
super(fields)
|
||||||
|
this.id = id
|
||||||
|
this.configuredModel = fields?.modelName || 'claude-3-opus-20240229'
|
||||||
|
this.configuredMaxToken = fields?.maxTokens ?? 256
|
||||||
|
}
|
||||||
|
|
||||||
|
revertToOriginalModel(): void {
|
||||||
|
super.modelName = this.configuredModel
|
||||||
|
super.maxTokens = this.configuredMaxToken
|
||||||
|
}
|
||||||
|
|
||||||
|
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||||
|
this.multiModalOption = multiModalOption
|
||||||
|
}
|
||||||
|
|
||||||
|
setVisionModel(): void {
|
||||||
|
if (!this.modelName.startsWith('claude-3')) {
|
||||||
|
super.modelName = 'claude-3-opus-20240229'
|
||||||
|
super.maxTokens = 1024
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -228,7 +228,7 @@ class ChatOpenAI_ChatModels implements INode {
|
||||||
|
|
||||||
const obj: Partial<OpenAIChatInput> &
|
const obj: Partial<OpenAIChatInput> &
|
||||||
Partial<AzureOpenAIInput> &
|
Partial<AzureOpenAIInput> &
|
||||||
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption } = {
|
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
|
||||||
temperature: parseFloat(temperature),
|
temperature: parseFloat(temperature),
|
||||||
modelName,
|
modelName,
|
||||||
openAIApiKey,
|
openAIApiKey,
|
||||||
|
|
@ -265,10 +265,9 @@ class ChatOpenAI_ChatModels implements INode {
|
||||||
imageResolution
|
imageResolution
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
obj.multiModalOption = multiModalOption
|
|
||||||
|
|
||||||
const model = new ChatOpenAI(nodeData.id, obj)
|
const model = new ChatOpenAI(nodeData.id, obj)
|
||||||
|
model.setMultiModalOption(multiModalOption)
|
||||||
return model
|
return model
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,39 +1,40 @@
|
||||||
import type { ClientOptions } from 'openai'
|
import type { ClientOptions } from 'openai'
|
||||||
import {
|
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai'
|
||||||
ChatOpenAI as LangchainChatOpenAI,
|
|
||||||
OpenAIChatInput,
|
|
||||||
LegacyOpenAIInput,
|
|
||||||
AzureOpenAIInput,
|
|
||||||
ChatOpenAICallOptions
|
|
||||||
} from '@langchain/openai'
|
|
||||||
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||||
import { BaseMessageLike } from '@langchain/core/messages'
|
|
||||||
import { Callbacks } from '@langchain/core/callbacks/manager'
|
|
||||||
import { LLMResult } from '@langchain/core/outputs'
|
|
||||||
import { IMultiModalOption } from '../../../src'
|
import { IMultiModalOption } from '../../../src'
|
||||||
|
import { IVisionChatModal } from '../../../src/IVisionChatModal'
|
||||||
|
|
||||||
export class ChatOpenAI extends LangchainChatOpenAI {
|
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
|
||||||
configuredModel: string
|
configuredModel: string
|
||||||
configuredMaxToken?: number
|
configuredMaxToken: number
|
||||||
multiModalOption?: IMultiModalOption
|
multiModalOption: IMultiModalOption
|
||||||
id: string
|
id: string
|
||||||
|
|
||||||
constructor(
|
constructor(
|
||||||
id: string,
|
id: string,
|
||||||
fields?: Partial<OpenAIChatInput> &
|
fields?: Partial<OpenAIChatInput> &
|
||||||
Partial<AzureOpenAIInput> &
|
Partial<AzureOpenAIInput> &
|
||||||
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption },
|
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput },
|
||||||
/** @deprecated */
|
/** @deprecated */
|
||||||
configuration?: ClientOptions & LegacyOpenAIInput
|
configuration?: ClientOptions & LegacyOpenAIInput
|
||||||
) {
|
) {
|
||||||
super(fields, configuration)
|
super(fields, configuration)
|
||||||
this.id = id
|
this.id = id
|
||||||
this.multiModalOption = fields?.multiModalOption
|
|
||||||
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
|
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
|
||||||
this.configuredMaxToken = fields?.maxTokens
|
this.configuredMaxToken = fields?.maxTokens ?? 256
|
||||||
}
|
}
|
||||||
|
|
||||||
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
|
revertToOriginalModel(): void {
|
||||||
return super.generate(messages, options, callbacks)
|
super.modelName = this.configuredModel
|
||||||
|
super.maxTokens = this.configuredMaxToken
|
||||||
|
}
|
||||||
|
|
||||||
|
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||||
|
this.multiModalOption = multiModalOption
|
||||||
|
}
|
||||||
|
|
||||||
|
setVisionModel(): void {
|
||||||
|
super.modelName = 'gpt-4-vision-preview'
|
||||||
|
super.maxTokens = 1024
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@
|
||||||
"@google-ai/generativelanguage": "^0.2.1",
|
"@google-ai/generativelanguage": "^0.2.1",
|
||||||
"@google/generative-ai": "^0.1.3",
|
"@google/generative-ai": "^0.1.3",
|
||||||
"@huggingface/inference": "^2.6.1",
|
"@huggingface/inference": "^2.6.1",
|
||||||
"@langchain/anthropic": "^0.0.10",
|
"@langchain/anthropic": "^0.1.4",
|
||||||
"@langchain/cohere": "^0.0.5",
|
"@langchain/cohere": "^0.0.5",
|
||||||
"@langchain/community": "^0.0.30",
|
"@langchain/community": "^0.0.30",
|
||||||
"@langchain/google-genai": "^0.0.10",
|
"@langchain/google-genai": "^0.0.10",
|
||||||
|
|
|
||||||
|
|
@ -1534,7 +1534,7 @@ export class App {
|
||||||
if (!chatflow) return `Chatflow ${chatflowid} not found`
|
if (!chatflow) return `Chatflow ${chatflowid} not found`
|
||||||
|
|
||||||
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent']
|
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent']
|
||||||
const uploadProcessingNodes = ['chatOpenAI', 'azureChatOpenAI']
|
const uploadProcessingNodes = ['chatOpenAI', 'chatAnthropic']
|
||||||
|
|
||||||
const flowObj = JSON.parse(chatflow.flowData)
|
const flowObj = JSON.parse(chatflow.flowData)
|
||||||
const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = []
|
const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = []
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue