Addition of Claude for Image uploads

This commit is contained in:
vinodkiran 2024-03-07 18:55:24 +05:30
parent a2caf3e265
commit 63b8c23072
10 changed files with 185 additions and 108 deletions

View File

@ -4,7 +4,12 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import {
renderTemplate,
MessagesPlaceholder,
HumanMessagePromptTemplate,
PromptTemplate
} from "@langchain/core/prompts";
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatConversationalAgent } from 'langchain/agents'
import { getBaseClasses } from '../../../src/utils'
@ -12,7 +17,8 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils";
import { IVisionChatModal } from "../../../src/IVisionChatModal";
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@ -150,33 +156,39 @@ const prepareAgent = async (
outputParser
})
if (model instanceof ChatOpenAI) {
let humanImageMessages: HumanMessage[] = []
if (llmSupportsVision(model)) {
const visionChatModel = model as IVisionChatModal
// let humanImageMessages: HumanMessage[] = []
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
visionChatModel.setVisionModel()
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
// for (const msg of messageContent) {
// humanImageMessages.push(new HumanMessage({ content: [msg] }))
// }
// Pop the `agent_scratchpad` MessagePlaceHolder
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
const template = (lastMessage.prompt as PromptTemplate).template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: template
}
])
msg.inputVariables = lastMessage.inputVariables
prompt.promptMessages.push(msg)
}
// Add the HumanMessage for images
prompt.promptMessages.push(...humanImageMessages)
//prompt.promptMessages.push(...humanImageMessages)
// Add the `agent_scratchpad` MessagePlaceHolder back
prompt.promptMessages.push(messagePlaceholder)
} else {
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
visionChatModel.revertToOriginalModel()
}
}

View File

@ -1,6 +1,5 @@
import { flatten } from 'lodash'
import { AgentExecutor } from 'langchain/agents'
import { HumanMessage } from '@langchain/core/messages'
import { ChatPromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
import { Tool } from '@langchain/core/tools'
import type { PromptTemplate } from '@langchain/core/prompts'
@ -10,8 +9,8 @@ import { additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { createReactAgent } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
class MRKLAgentChat_Agents implements INode {
label: string
@ -68,23 +67,26 @@ class MRKLAgentChat_Agents implements INode {
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
let chatPromptTemplate = undefined
if (model instanceof ChatOpenAI) {
if (llmSupportsVision(model)) {
const visionChatModel = model as IVisionChatModal
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
// Change model to vision supported
visionChatModel.setVisionModel()
const oldTemplate = prompt.template as string
chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)])
chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: oldTemplate
}
])
msg.inputVariables = prompt.inputVariables
chatPromptTemplate = ChatPromptTemplate.fromMessages([msg])
} else {
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
visionChatModel.revertToOriginalModel()
}
}

View File

@ -2,15 +2,16 @@ import { ConversationChain } from 'langchain/chains'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { StringOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageContentImageUrl } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
import { MessageContent } from 'llamaindex'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input'
@ -145,7 +146,7 @@ class ConversationChain_Chains implements INode {
}
}
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage[]) => {
const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: MessageContentImageUrl[]) => {
const memory = nodeData.inputs?.memory as FlowiseMemory
const prompt = nodeData.inputs?.systemMessagePrompt as string
const chatPromptTemplate = nodeData.inputs?.chatPromptTemplate as ChatPromptTemplate
@ -154,7 +155,6 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
const sysPrompt = chatPromptTemplate.promptMessages[0]
const humanPrompt = chatPromptTemplate.promptMessages[chatPromptTemplate.promptMessages.length - 1]
const messages = [sysPrompt, new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'), humanPrompt]
if (humanImageMessages.length) messages.push(...humanImageMessages)
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
if ((chatPromptTemplate as any).promptValues) {
@ -168,9 +168,8 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
const messages = [
SystemMessagePromptTemplate.fromTemplate(prompt ? prompt : systemMessage),
new MessagesPlaceholder(memory.memoryKey ?? 'chat_history'),
HumanMessagePromptTemplate.fromTemplate(`{${inputKey}}`)
HumanMessagePromptTemplate.fromTemplate([`{${inputKey}}`, ...humanImageMessages])
]
if (humanImageMessages.length) messages.push(...(humanImageMessages as any[]))
const chatPrompt = ChatPromptTemplate.fromMessages(messages)
@ -183,28 +182,19 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
const memory = nodeData.inputs?.memory as FlowiseMemory
const memoryKey = memory.memoryKey ?? 'chat_history'
let humanImageMessages: HumanMessage[] = []
if (model instanceof ChatOpenAI) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
let messageContent: MessageContentImageUrl[] = []
if (llmSupportsVision(model)) {
messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
const visionChatModel = model as IVisionChatModal
if (messageContent?.length) {
// Change model to gpt-4-vision
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
visionChatModel.setVisionModel()
} else {
// revert to previous values if image upload is empty
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
visionChatModel.revertToOriginalModel()
}
}
const chatPrompt = prepareChatPrompt(nodeData, humanImageMessages)
const chatPrompt = prepareChatPrompt(nodeData, messageContent)
let promptVariables = {}
const promptValuesRaw = (chatPrompt as any).promptValues
if (promptValuesRaw) {
@ -228,7 +218,7 @@ const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: s
},
...promptVariables
},
prepareChatPrompt(nodeData, humanImageMessages),
prepareChatPrompt(nodeData, messageContent),
model,
new StringOutputParser()
])

View File

@ -1,16 +1,22 @@
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
import { OutputFixingParser } from 'langchain/output_parsers'
import { LLMChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { BaseLanguageModel, BaseLanguageModelCallOptions } from "@langchain/core/language_models/base";
import { BaseLLMOutputParser, BaseOutputParser } from "@langchain/core/output_parsers";
import { HumanMessage } from "@langchain/core/messages";
import {
ChatPromptTemplate,
FewShotPromptTemplate,
HumanMessagePromptTemplate,
PromptTemplate
} from "@langchain/core/prompts";
import { OutputFixingParser } from "langchain/output_parsers";
import { LLMChain } from "langchain/chains";
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from "../../../src/Interface";
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from "../../../src/handler";
import { getBaseClasses, handleEscapeCharacters } from "../../../src/utils";
import { checkInputs, Moderation, streamResponse } from "../../moderation/Moderation";
import { formatResponse, injectOutputParser } from "../../outputparsers/OutputParserHelpers";
import { ChatOpenAI } from "../../chatmodels/ChatOpenAI/FlowiseChatOpenAI";
import { addImagesToMessages, llmSupportsVision } from "../../../src/multiModalUtils";
import { IVisionChatModal } from "../../../src/IVisionChatModal";
class LLMChain_Chains implements INode {
label: string
@ -183,24 +189,39 @@ const runPrediction = async (
* TO: { "value": "hello i am ben\n\n\thow are you?" }
*/
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (chain.llm instanceof ChatOpenAI) {
const chatOpenAI = chain.llm as ChatOpenAI
if (llmSupportsVision(chain.llm)) {
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
const visionChatModel = chain.llm as IVisionChatModal
if (messageContent?.length) {
// Change model to gpt-4-vision && max token to higher when using gpt-4-vision
chatOpenAI.modelName = 'gpt-4-vision-preview'
chatOpenAI.maxTokens = 1024
visionChatModel.setVisionModel()
// Add image to the message
if (chain.prompt instanceof PromptTemplate) {
const existingPromptTemplate = chain.prompt.template as string
let newChatPromptTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate)
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: existingPromptTemplate
}
])
newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
chain.prompt = newChatPromptTemplate
msg.inputVariables = chain.prompt.inputVariables
chain.prompt = ChatPromptTemplate.fromMessages([msg])
} else if (chain.prompt instanceof ChatPromptTemplate) {
if (chain.prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
const lastMessage = chain.prompt.promptMessages.pop() as HumanMessagePromptTemplate
const template = (lastMessage.prompt as PromptTemplate).template as string
const msg = HumanMessagePromptTemplate.fromTemplate([
...messageContent,
{
text: template
}
])
msg.inputVariables = lastMessage.inputVariables
chain.prompt.promptMessages.push(msg)
} else {
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
}
} else if (chain.prompt instanceof FewShotPromptTemplate) {
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
@ -212,8 +233,7 @@ const runPrediction = async (
}
} else {
// revert to previous values if image upload is empty
chatOpenAI.modelName = model.configuredModel
chatOpenAI.maxTokens = model.configuredMaxToken
visionChatModel.revertToOriginalModel()
}
}

View File

@ -1,8 +1,9 @@
import { AnthropicInput, ChatAnthropic } from '@langchain/anthropic'
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatAnthropic } from './FlowiseChatAntrhopic'
class ChatAnthropic_ChatModels implements INode {
label: string
@ -19,12 +20,12 @@ class ChatAnthropic_ChatModels implements INode {
constructor() {
this.label = 'ChatAnthropic'
this.name = 'chatAnthropic'
this.version = 3.0
this.version = 4.0
this.type = 'ChatAnthropic'
this.icon = 'Anthropic.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around ChatAnthropic large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(ChatAnthropic)]
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatAnthropic)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -147,6 +148,15 @@ class ChatAnthropic_ChatModels implements INode {
step: 0.1,
optional: true,
additionalParams: true
},
{
label: 'Allow Image Uploads',
name: 'allowImageUploads',
type: 'boolean',
description:
'Automatically uses claude-3-* models when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
default: false,
optional: true
}
]
}
@ -163,6 +173,8 @@ class ChatAnthropic_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const anthropicApiKey = getCredentialParam('anthropicApiKey', credentialData, nodeData)
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const obj: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
@ -175,7 +187,14 @@ class ChatAnthropic_ChatModels implements INode {
if (topK) obj.topK = parseFloat(topK)
if (cache) obj.cache = cache
const model = new ChatAnthropic(obj)
const multiModalOption: IMultiModalOption = {
image: {
allowImageUploads: allowImageUploads ?? false
}
}
const model = new ChatAnthropic(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
return model
}
}

View File

@ -0,0 +1,34 @@
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
import { IMultiModalOption } from '../../../src'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {
configuredModel: string
configuredMaxToken: number
multiModalOption: IMultiModalOption
id: string
constructor(id: string, fields: Partial<AnthropicInput> & BaseLLMParams & { anthropicApiKey?: string }) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName || 'claude-3-opus-20240229'
this.configuredMaxToken = fields?.maxTokens ?? 256
}
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
if (!this.modelName.startsWith('claude-3')) {
super.modelName = 'claude-3-opus-20240229'
super.maxTokens = 1024
}
}
}

View File

@ -228,7 +228,7 @@ class ChatOpenAI_ChatModels implements INode {
const obj: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption } = {
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@ -265,10 +265,9 @@ class ChatOpenAI_ChatModels implements INode {
imageResolution
}
}
obj.multiModalOption = multiModalOption
const model = new ChatOpenAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
return model
}
}

View File

@ -1,39 +1,40 @@
import type { ClientOptions } from 'openai'
import {
ChatOpenAI as LangchainChatOpenAI,
OpenAIChatInput,
LegacyOpenAIInput,
AzureOpenAIInput,
ChatOpenAICallOptions
} from '@langchain/openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { BaseMessageLike } from '@langchain/core/messages'
import { Callbacks } from '@langchain/core/callbacks/manager'
import { LLMResult } from '@langchain/core/outputs'
import { IMultiModalOption } from '../../../src'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
export class ChatOpenAI extends LangchainChatOpenAI {
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption?: IMultiModalOption
configuredMaxToken: number
multiModalOption: IMultiModalOption
id: string
constructor(
id: string,
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput; multiModalOption?: IMultiModalOption },
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput },
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput
) {
super(fields, configuration)
this.id = id
this.multiModalOption = fields?.multiModalOption
this.configuredModel = fields?.modelName ?? 'gpt-3.5-turbo'
this.configuredMaxToken = fields?.maxTokens
this.configuredMaxToken = fields?.maxTokens ?? 256
}
async generate(messages: BaseMessageLike[][], options?: string[] | ChatOpenAICallOptions, callbacks?: Callbacks): Promise<LLMResult> {
return super.generate(messages, options, callbacks)
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
super.modelName = 'gpt-4-vision-preview'
super.maxTokens = 1024
}
}

View File

@ -29,7 +29,7 @@
"@google-ai/generativelanguage": "^0.2.1",
"@google/generative-ai": "^0.1.3",
"@huggingface/inference": "^2.6.1",
"@langchain/anthropic": "^0.0.10",
"@langchain/anthropic": "^0.1.4",
"@langchain/cohere": "^0.0.5",
"@langchain/community": "^0.0.30",
"@langchain/google-genai": "^0.0.10",

View File

@ -1534,7 +1534,7 @@ export class App {
if (!chatflow) return `Chatflow ${chatflowid} not found`
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent']
const uploadProcessingNodes = ['chatOpenAI', 'azureChatOpenAI']
const uploadProcessingNodes = ['chatOpenAI', 'chatAnthropic']
const flowObj = JSON.parse(chatflow.flowData)
const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = []