touchup fixes

This commit is contained in:
Henry 2024-02-21 18:39:24 +08:00
parent d17280255b
commit a48edcd3a8
4 changed files with 54 additions and 47 deletions

View File

@ -4,7 +4,7 @@ import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, BaseMessage, HumanMessage } from '@langchain/core/messages'
import { ChainValues } from '@langchain/core/utils/types'
import { AgentStep } from '@langchain/core/agents'
import { renderTemplate } from '@langchain/core/prompts'
import { renderTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { ChatConversationalAgent } from 'langchain/agents'
import { getBaseClasses } from '../../../src/utils'
@ -141,11 +141,6 @@ const prepareAgent = async (
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
const inputKey = memory.inputKey ? memory.inputKey : 'input'
/** Bind a stop token to the model */
const modelWithStop = model.bind({
stop: ['\nObservation']
})
const outputParser = ChatConversationalAgent.getDefaultOutputParser({
llm: model,
toolNames: tools.map((tool) => tool.name)
@ -158,30 +153,39 @@ const prepareAgent = async (
if (model instanceof ChatOpenAI) {
let humanImageMessages: HumanMessage[] = []
const chatModel = model as ChatOpenAI
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
chatModel.modelName = 'gpt-4-vision-preview'
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
chatModel.maxTokens = 1024
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
let messagePlaceholder = prompt.promptMessages.pop()
// Pop the `agent_scratchpad` MessagePlaceHolder
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
// Add the HumanMessage for images
prompt.promptMessages.push(...humanImageMessages)
// @ts-ignore
// Add the `agent_scratchpad` MessagePlaceHolder back
prompt.promptMessages.push(messagePlaceholder)
} else {
// revert to previous values if image upload is empty
chatModel.modelName = chatModel.configuredModel
chatModel.maxTokens = chatModel.configuredMaxToken
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
/** Bind a stop token to the model */
const modelWithStop = model.bind({
stop: ['\nObservation']
})
const runnableAgent = RunnableSequence.from([
{
[inputKey]: (i: { input: string; steps: AgentStep[] }) => i.input,

View File

@ -11,8 +11,7 @@ import { createReactAgent } from '../../../src/agents'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { HumanMessage } from '@langchain/core/messages'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatPromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
// import { injectLcAgentExecutorNodeData } from '../../../src/multiModalUtils'
import { ChatPromptTemplate, HumanMessagePromptTemplate } from 'langchain/prompts'
class MRKLAgentChat_Agents implements INode {
label: string
@ -66,32 +65,33 @@ class MRKLAgentChat_Agents implements INode {
let tools = nodeData.inputs?.tools as Tool[]
tools = flatten(tools)
const promptWithChat = await pull<PromptTemplate>('hwchase17/react-chat')
const prompt = await pull<PromptTemplate>('hwchase17/react-chat')
let chatPromptTemplate = undefined
if (model instanceof ChatOpenAI) {
const chatModel = model as ChatOpenAI
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
chatModel.modelName = 'gpt-4-vision-preview'
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
chatModel.maxTokens = 1024
const oldTemplate = promptWithChat.template as string
chatPromptTemplate = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)])
model.maxTokens = 1024
const oldTemplate = prompt.template as string
chatPromptTemplate = ChatPromptTemplate.fromMessages([HumanMessagePromptTemplate.fromTemplate(oldTemplate)])
chatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
} else {
// revert to previous values if image upload is empty
chatModel.modelName = chatModel.configuredModel
chatModel.maxTokens = chatModel.configuredMaxToken
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}
const agent = await createReactAgent({
llm: model,
tools,
prompt: chatPromptTemplate ?? promptWithChat
prompt: chatPromptTemplate ?? prompt
})
const executor = new AgentExecutor({

View File

@ -1,16 +1,16 @@
import { ConversationChain } from 'langchain/chains'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { RunnableSequence } from 'langchain/schema/runnable'
import { StringOutputParser } from 'langchain/schema/output_parser'
import { HumanMessage } from 'langchain/schema'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from '@langchain/core/prompts'
import { RunnableSequence } from '@langchain/core/runnables'
import { StringOutputParser } from '@langchain/core/output_parsers'
import { HumanMessage } from '@langchain/core/messages'
import { ConsoleCallbackHandler as LCConsoleCallbackHandler } from '@langchain/core/tracers/console'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input'
@ -179,29 +179,28 @@ const prepareChatPrompt = (nodeData: INodeData, humanImageMessages: HumanMessage
const prepareChain = (nodeData: INodeData, options: ICommonObject, sessionId?: string) => {
const chatHistory = options.chatHistory
let model = nodeData.inputs?.model
let model = nodeData.inputs?.model as ChatOpenAI
const memory = nodeData.inputs?.memory as FlowiseMemory
const memoryKey = memory.memoryKey ?? 'chat_history'
let humanImageMessages: HumanMessage[] = []
if (model instanceof ChatOpenAI) {
const chatModel = model as ChatOpenAI
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (messageContent?.length) {
// Change model to gpt-4-vision
chatModel.modelName = 'gpt-4-vision-preview'
model.modelName = 'gpt-4-vision-preview'
// Change default max token to higher when using gpt-4-vision
chatModel.maxTokens = 1024
model.maxTokens = 1024
for (const msg of messageContent) {
humanImageMessages.push(new HumanMessage({ content: [msg] }))
}
} else {
// revert to previous values if image upload is empty
chatModel.modelName = chatModel.configuredModel
chatModel.maxTokens = chatModel.configuredMaxToken
model.modelName = model.configuredModel
model.maxTokens = model.configuredMaxToken
}
}

View File

@ -1,5 +1,6 @@
import { BaseLanguageModel, BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
import { BaseLLMOutputParser, BaseOutputParser } from '@langchain/core/output_parsers'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, HumanMessagePromptTemplate } from '@langchain/core/prompts'
import { OutputFixingParser } from 'langchain/output_parsers'
import { LLMChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
@ -9,7 +10,6 @@ import { checkInputs, Moderation, streamResponse } from '../../moderation/Modera
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages } from '../../../src/multiModalUtils'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
import { HumanMessage } from 'langchain/schema'
class LLMChain_Chains implements INode {
@ -184,6 +184,7 @@ const runPrediction = async (
*/
const promptValues = handleEscapeCharacters(promptValuesRaw, true)
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
if (chain.llm instanceof ChatOpenAI) {
const chatOpenAI = chain.llm as ChatOpenAI
if (messageContent?.length) {
@ -192,19 +193,22 @@ const runPrediction = async (
chatOpenAI.maxTokens = 1024
// Add image to the message
if (chain.prompt instanceof PromptTemplate) {
const oldTemplate = chain.prompt.template as string
let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)])
cp2.promptMessages = [new HumanMessage({ content: messageContent })]
chain.prompt = cp2
const existingPromptTemplate = chain.prompt.template as string
let newChatPromptTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(existingPromptTemplate)
])
newChatPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
chain.prompt = newChatPromptTemplate
} else if (chain.prompt instanceof ChatPromptTemplate) {
chain.prompt.promptMessages.push(new HumanMessage({ content: messageContent }))
} else if (chain.prompt instanceof FewShotPromptTemplate) {
let currentPrompt = chain.prompt as FewShotPromptTemplate
const oldTemplate = currentPrompt.examplePrompt.template as string
let cp2 = ChatPromptTemplate.fromMessages([SystemMessagePromptTemplate.fromTemplate(oldTemplate)])
cp2.promptMessages = [new HumanMessage({ content: messageContent })]
let existingFewShotPromptTemplate = chain.prompt.examplePrompt.template as string
let newFewShotPromptTemplate = ChatPromptTemplate.fromMessages([
HumanMessagePromptTemplate.fromTemplate(existingFewShotPromptTemplate)
])
newFewShotPromptTemplate.promptMessages.push(new HumanMessage({ content: messageContent }))
// @ts-ignore
currentPrompt.examplePrompt = cp2
chain.prompt.examplePrompt = newFewShotPromptTemplate
}
} else {
// revert to previous values if image upload is empty