refactoring of interface into the common interface.ts file and misc changes

This commit is contained in:
vinodkiran 2024-03-08 17:59:54 +05:30
parent 2b0ca60686
commit bce7ff9ada
9 changed files with 26 additions and 32 deletions

View File

@ -9,10 +9,9 @@ import { RunnableSequence } from '@langchain/core/runnables'
import { ChatConversationalAgent } from 'langchain/agents' import { ChatConversationalAgent } from 'langchain/agents'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents' import { AgentExecutor } from '../../../src/agents'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI. const DEFAULT_PREFIX = `Assistant is a large language model trained by OpenAI.
@ -158,10 +157,6 @@ const prepareAgent = async (
if (messageContent?.length) { if (messageContent?.length) {
visionChatModel.setVisionModel() visionChatModel.setVisionModel()
// for (const msg of messageContent) {
// humanImageMessages.push(new HumanMessage({ content: [msg] }))
// }
// Pop the `agent_scratchpad` MessagePlaceHolder // Pop the `agent_scratchpad` MessagePlaceHolder
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) { if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
@ -176,8 +171,6 @@ const prepareAgent = async (
msg.inputVariables = lastMessage.inputVariables msg.inputVariables = lastMessage.inputVariables
prompt.promptMessages.push(msg) prompt.promptMessages.push(msg)
} }
// Add the HumanMessage for images
//prompt.promptMessages.push(...humanImageMessages)
// Add the `agent_scratchpad` MessagePlaceHolder back // Add the `agent_scratchpad` MessagePlaceHolder back
prompt.promptMessages.push(messagePlaceholder) prompt.promptMessages.push(messagePlaceholder)

View File

@ -6,11 +6,10 @@ import type { PromptTemplate } from '@langchain/core/prompts'
import { BaseChatModel } from '@langchain/core/language_models/chat_models' import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { pull } from 'langchain/hub' import { pull } from 'langchain/hub'
import { additionalCallbacks } from '../../../src/handler' import { additionalCallbacks } from '../../../src/handler'
import { FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface' import { IVisionChatModal, FlowiseMemory, ICommonObject, IMessage, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils' import { getBaseClasses } from '../../../src/utils'
import { createReactAgent } from '../../../src/agents' import { createReactAgent } from '../../../src/agents'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
class MRKLAgentChat_Agents implements INode { class MRKLAgentChat_Agents implements INode {
label: string label: string

View File

@ -7,10 +7,17 @@ import { checkInputs, Moderation, streamResponse } from '../../moderation/Modera
import { formatResponse } from '../../outputparsers/OutputParserHelpers' import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, MessageContentImageUrl } from '../../../src/Interface' import {
IVisionChatModal,
FlowiseMemory,
ICommonObject,
INode,
INodeData,
INodeParams,
MessageContentImageUrl
} from '../../../src/Interface'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler' import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.` let systemMessage = `The following is a friendly conversation between a human and an AI. The AI is talkative and provides lots of specific details from its context. If the AI does not know the answer to a question, it truthfully says it does not know.`
const inputKey = 'input' const inputKey = 'input'

View File

@ -4,14 +4,13 @@ import { HumanMessage } from '@langchain/core/messages'
import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts' import { ChatPromptTemplate, FewShotPromptTemplate, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
import { OutputFixingParser } from 'langchain/output_parsers' import { OutputFixingParser } from 'langchain/output_parsers'
import { LLMChain } from 'langchain/chains' import { LLMChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface' import { IVisionChatModal, ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler' import { additionalCallbacks, ConsoleCallbackHandler, CustomChainHandler } from '../../../src/handler'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils' import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation' import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers' import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI' import { ChatOpenAI } from '../../chatmodels/ChatOpenAI/FlowiseChatOpenAI'
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils' import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
class LLMChain_Chains implements INode { class LLMChain_Chains implements INode {
label: string label: string

View File

@ -1,6 +1,5 @@
import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic' import { AnthropicInput, ChatAnthropic as LangchainChatAnthropic } from '@langchain/anthropic'
import { IMultiModalOption } from '../../../src' import { IVisionChatModal, IMultiModalOption } from '../../../src'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
import { BaseLLMParams } from '@langchain/core/language_models/llms' import { BaseLLMParams } from '@langchain/core/language_models/llms'
export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal { export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChatModal {

View File

@ -1,8 +1,7 @@
import type { ClientOptions } from 'openai' import type { ClientOptions } from 'openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai' import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { IMultiModalOption } from '../../../src' import { IMultiModalOption, IVisionChatModal } from '../../../src'
import { IVisionChatModal } from '../../../src/IVisionChatModal'
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal { export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
configuredModel: string configuredModel: string

View File

@ -1,12 +0,0 @@
import { IMultiModalOption } from './Interface'
export interface IVisionChatModal {
id: string
configuredModel: string
configuredMaxToken: number
multiModalOption: IMultiModalOption
setVisionModel(): void
revertToOriginalModel(): void
setMultiModalOption(multiModalOption: IMultiModalOption): void
}

View File

@ -270,3 +270,14 @@ export abstract class FlowiseSummaryMemory extends ConversationSummaryMemory imp
abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void> abstract addChatMessages(msgArray: { text: string; type: MessageType }[], overrideSessionId?: string): Promise<void>
abstract clearChatMessages(overrideSessionId?: string): Promise<void> abstract clearChatMessages(overrideSessionId?: string): Promise<void>
} }
export interface IVisionChatModal {
id: string
configuredModel: string
configuredMaxToken: number
multiModalOption: IMultiModalOption
setVisionModel(): void
revertToOriginalModel(): void
setMultiModalOption(multiModalOption: IMultiModalOption): void
}

View File

@ -1,8 +1,7 @@
import { ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface' import { IVisionChatModal, ICommonObject, IFileUpload, IMultiModalOption, INodeData, MessageContentImageUrl } from './Interface'
import path from 'path' import path from 'path'
import { getStoragePath } from './utils' import { getStoragePath } from './utils'
import fs from 'fs' import fs from 'fs'
import { IVisionChatModal } from './IVisionChatModal'
export const addImagesToMessages = ( export const addImagesToMessages = (
nodeData: INodeData, nodeData: INodeData,