update message schema

This commit is contained in:
Henry 2023-07-10 16:51:36 +01:00
parent 621c0a37ee
commit cf3bd72a98
6 changed files with 20 additions and 27 deletions

View File

@ -3,7 +3,7 @@ import { initializeAgentExecutorWithOptions, AgentExecutor, InitializeAgentExecu
import { Tool } from 'langchain/tools'
import { BaseChatMemory, ChatMessageHistory } from 'langchain/memory'
import { getBaseClasses } from '../../../src/utils'
import { AIChatMessage, HumanChatMessage } from 'langchain/schema'
import { AIMessage, HumanMessage } from 'langchain/schema'
import { BaseLanguageModel } from 'langchain/base_language'
import { flatten } from 'lodash'
@ -99,9 +99,9 @@ class ConversationalAgent_Agents implements INode {
for (const message of histories) {
if (message.type === 'apiMessage') {
chatHistory.push(new AIChatMessage(message.message))
chatHistory.push(new AIMessage(message.message))
} else if (message.type === 'userMessage') {
chatHistory.push(new HumanChatMessage(message.message))
chatHistory.push(new HumanMessage(message.message))
}
}
memory.chatHistory = new ChatMessageHistory(chatHistory)

View File

@ -4,7 +4,7 @@ import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
import { BaseLanguageModel } from 'langchain/base_language'
import { flatten } from 'lodash'
import { BaseChatMemory, ChatMessageHistory } from 'langchain/memory'
import { AIChatMessage, HumanChatMessage } from 'langchain/schema'
import { AIMessage, HumanMessage } from 'langchain/schema'
class OpenAIFunctionAgent_Agents implements INode {
label: string
@ -84,9 +84,9 @@ class OpenAIFunctionAgent_Agents implements INode {
for (const message of histories) {
if (message.type === 'apiMessage') {
chatHistory.push(new AIChatMessage(message.message))
chatHistory.push(new AIMessage(message.message))
} else if (message.type === 'userMessage') {
chatHistory.push(new HumanChatMessage(message.message))
chatHistory.push(new HumanMessage(message.message))
}
}
memory.chatHistory = new ChatMessageHistory(chatHistory)

View File

@ -4,7 +4,7 @@ import { CustomChainHandler, getBaseClasses } from '../../../src/utils'
import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate } from 'langchain/prompts'
import { BufferMemory, ChatMessageHistory } from 'langchain/memory'
import { BaseChatModel } from 'langchain/chat_models/base'
import { AIChatMessage, HumanChatMessage } from 'langchain/schema'
import { AIMessage, HumanMessage } from 'langchain/schema'
const systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
@ -81,9 +81,9 @@ class ConversationChain_Chains implements INode {
for (const message of histories) {
if (message.type === 'apiMessage') {
chatHistory.push(new AIChatMessage(message.message))
chatHistory.push(new AIMessage(message.message))
} else if (message.type === 'userMessage') {
chatHistory.push(new HumanChatMessage(message.message))
chatHistory.push(new HumanMessage(message.message))
}
}
memory.chatHistory = new ChatMessageHistory(chatHistory)

View File

@ -6,33 +6,26 @@ import { AIMessage, BaseRetriever, HumanMessage } from 'langchain/schema'
import { BaseChatMemory, BufferMemory, ChatMessageHistory } from 'langchain/memory'
import { PromptTemplate } from 'langchain/prompts'
const default_qa_template = `Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
const default_qa_template = `Use the following pieces of context to answer the question at the end, in its original language. If you don't know the answer, just say that you don't know in its original language, don't try to make up an answer.
{context}
Question: {question}
Helpful Answer:`
const qa_template = `Use the following pieces of context to answer the question at the end.
const qa_template = `Use the following pieces of context to answer the question at the end, in its original language.
{context}
Question: {question}
Helpful Answer:`
const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, return the conversation history excerpt that includes any relevant context to the question if it exists and rephrase the follow up question to be a standalone question.
const CUSTOM_QUESTION_GENERATOR_CHAIN_PROMPT = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. include it in the standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Your answer should follow the following format:
\`\`\`
Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't try to make up an answer.
----------------
<Relevant chat history excerpt as context here>
Standalone question: <Rephrased question here>
\`\`\`
Your answer:`
Standalone question:`
class ConversationalRetrievalQAChain_Chains implements INode {
label: string

View File

@ -1,4 +1,4 @@
import { SystemChatMessage } from 'langchain/schema'
import { SystemMessage } from 'langchain/schema'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ZepMemory, ZepMemoryInput } from 'langchain/memory/zep'
@ -123,7 +123,7 @@ class ZepMemory_Memory implements INode {
let summary = autoSummaryTemplate.replace(/{summary}/g, memory.summary.content)
// eslint-disable-next-line no-console
console.log('[ZepMemory] auto summary:', summary)
data[zep.memoryKey].unshift(new SystemChatMessage(summary))
data[zep.memoryKey].unshift(new SystemMessage(summary))
}
}
// for langchain zep memory compatibility, or we will get "Missing value for input variable chat_history"

View File

@ -687,13 +687,13 @@ export class App {
}
}
/* Don't rebuild the flow (to avoid duplicated upsert, recomputation) when all these conditions met:
/* Reuse the flow without having to rebuild (to avoid duplicated upsert, recomputation) when all these conditions met:
* - Node Data already exists in pool
* - Still in sync (i.e the flow has not been modified since)
* - Existing overrideConfig and new overrideConfig are the same
* - Flow doesn't start with nodes that depend on incomingInput.question
***/
const isRebuildNeeded = () => {
const isFlowReusable = () => {
return (
Object.prototype.hasOwnProperty.call(this.chatflowPool.activeChatflows, chatflowid) &&
this.chatflowPool.activeChatflows[chatflowid].inSync &&
@ -707,7 +707,7 @@ export class App {
}
if (process.env.EXECUTION_MODE === 'child') {
if (isRebuildNeeded()) {
if (isFlowReusable()) {
nodeToExecuteData = this.chatflowPool.activeChatflows[chatflowid].endingNodeData
try {
const result = await this.startChildProcess(chatflow, chatId, incomingInput, nodeToExecuteData)
@ -731,7 +731,7 @@ export class App {
const nodes = parsedFlowData.nodes
const edges = parsedFlowData.edges
if (isRebuildNeeded()) {
if (isFlowReusable()) {
nodeToExecuteData = this.chatflowPool.activeChatflows[chatflowid].endingNodeData
isStreamValid = isFlowValidForStream(nodes, nodeToExecuteData)
} else {