Chore/Update langchain version, openai, mistral, vertex, anthropic (#2180)
* update langchain version, openai, mistral, vertex, anthropic, introduced toolagent * upgrade @google/generative-ai 0.7.0, replicate and faiss-node * update cohere ver * adding chatCohere to streaming * update gemini to have image upload * update google genai, remove aiplugin
This commit is contained in:
parent
f5be889ea8
commit
95beaba9d9
|
|
@ -63,6 +63,7 @@
|
|||
"node": ">=18.15.0 <19.0.0 || ^20"
|
||||
},
|
||||
"resolutions": {
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.1"
|
||||
"@qdrant/openapi-typescript-fetch": "1.2.1",
|
||||
"@google/generative-ai": "^0.7.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -308,6 +308,10 @@
|
|||
{
|
||||
"label": "gemini-pro",
|
||||
"name": "gemini-pro"
|
||||
},
|
||||
{
|
||||
"label": "gemini-pro-vision",
|
||||
"name": "gemini-pro-vision"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
|
@ -323,6 +327,18 @@
|
|||
{
|
||||
"name": "chatGoogleVertexAI",
|
||||
"models": [
|
||||
{
|
||||
"label": "gemini-1.5-pro",
|
||||
"name": "gemini-1.5-pro"
|
||||
},
|
||||
{
|
||||
"label": "gemini-1.0-pro",
|
||||
"name": "gemini-1.0-pro"
|
||||
},
|
||||
{
|
||||
"label": "gemini-1.0-pro-vision",
|
||||
"name": "gemini-1.0-pro-vision"
|
||||
},
|
||||
{
|
||||
"label": "chat-bison",
|
||||
"name": "chat-bison"
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class MistralAIToolAgent_Agents implements INode {
|
|||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'MistralAI.svg'
|
||||
this.badge = 'NEW'
|
||||
this.badge = 'DEPRECATING'
|
||||
this.description = `Agent that uses MistralAI Function Calling to pick the tools and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeOptionsValue, IN
|
|||
import OpenAI from 'openai'
|
||||
import { DataSource } from 'typeorm'
|
||||
import { getCredentialData, getCredentialParam, getUserHome } from '../../../src/utils'
|
||||
import { MessageContentImageFile, MessageContentText } from 'openai/resources/beta/threads/messages/messages'
|
||||
import { ImageFileContentBlock, TextContentBlock } from 'openai/resources/beta/threads/messages/messages'
|
||||
import * as fsDefault from 'node:fs'
|
||||
import * as path from 'node:path'
|
||||
import fetch from 'node-fetch'
|
||||
|
|
@ -392,7 +392,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const fileAnnotations = []
|
||||
for (let i = 0; i < assistantMessages[0].content.length; i += 1) {
|
||||
if (assistantMessages[0].content[i].type === 'text') {
|
||||
const content = assistantMessages[0].content[i] as MessageContentText
|
||||
const content = assistantMessages[0].content[i] as TextContentBlock
|
||||
|
||||
if (content.text.annotations) {
|
||||
const message_content = content.text
|
||||
|
|
@ -406,8 +406,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
let filePath = ''
|
||||
|
||||
// Gather citations based on annotation attributes
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FileCitation)
|
||||
.file_citation
|
||||
const file_citation = (annotation as OpenAI.Beta.Threads.Messages.FileCitationAnnotation).file_citation
|
||||
if (file_citation) {
|
||||
const cited_file = await openai.files.retrieve(file_citation.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
|
|
@ -421,7 +420,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
})
|
||||
}
|
||||
} else {
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.MessageContentText.Text.FilePath).file_path
|
||||
const file_path = (annotation as OpenAI.Beta.Threads.Messages.FilePathAnnotation).file_path
|
||||
if (file_path) {
|
||||
const cited_file = await openai.files.retrieve(file_path.file_id)
|
||||
// eslint-disable-next-line no-useless-escape
|
||||
|
|
@ -452,7 +451,7 @@ class OpenAIAssistant_Agents implements INode {
|
|||
const lenticularBracketRegex = /【[^】]*】/g
|
||||
returnVal = returnVal.replace(lenticularBracketRegex, '')
|
||||
} else {
|
||||
const content = assistantMessages[0].content[i] as MessageContentImageFile
|
||||
const content = assistantMessages[0].content[i] as ImageFileContentBlock
|
||||
const fileId = content.image_file.file_id
|
||||
const fileObj = await openai.files.retrieve(fileId)
|
||||
const dirPath = path.join(getUserHome(), '.flowise', 'openai-assistant')
|
||||
|
|
@ -533,7 +532,7 @@ const downloadFile = async (fileObj: any, filePath: string, dirPath: string, ope
|
|||
}
|
||||
}
|
||||
|
||||
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.AssistantCreateParams.AssistantToolsFunction => {
|
||||
const formatToOpenAIAssistantTool = (tool: any): OpenAI.Beta.FunctionTool => {
|
||||
return {
|
||||
type: 'function',
|
||||
function: {
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ class OpenAIToolAgent_Agents implements INode {
|
|||
this.icon = 'function.svg'
|
||||
this.description = `Agent that uses OpenAI Function Calling to pick the tools and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.badge = 'NEW'
|
||||
this.badge = 'DEPRECATING'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalU
|
|||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MRKLAgentChat_Agents implements INode {
|
||||
class ReActAgentChat_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -27,7 +27,7 @@ class MRKLAgentChat_Agents implements INode {
|
|||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'ReAct Agent for Chat Models'
|
||||
this.name = 'mrklAgentChat'
|
||||
this.name = 'reactAgentChat'
|
||||
this.version = 4.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -157,4 +157,4 @@ class MRKLAgentChat_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MRKLAgentChat_Agents }
|
||||
module.exports = { nodeClass: ReActAgentChat_Agents }
|
||||
|
Before Width: | Height: | Size: 616 B After Width: | Height: | Size: 616 B |
|
|
@ -11,7 +11,7 @@ import { createReactAgent } from '../../../src/agents'
|
|||
import { checkInputs, Moderation } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
|
||||
class MRKLAgentLLM_Agents implements INode {
|
||||
class ReActAgentLLM_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
|
|
@ -24,7 +24,7 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
|
||||
constructor() {
|
||||
this.label = 'ReAct Agent for LLMs'
|
||||
this.name = 'mrklAgentLLM'
|
||||
this.name = 'reactAgentLLM'
|
||||
this.version = 2.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
|
|
@ -107,4 +107,4 @@ class MRKLAgentLLM_Agents implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: MRKLAgentLLM_Agents }
|
||||
module.exports = { nodeClass: ReActAgentLLM_Agents }
|
||||
|
Before Width: | Height: | Size: 616 B After Width: | Height: | Size: 616 B |
|
|
@ -0,0 +1,260 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { BaseMessage } from '@langchain/core/messages'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { RunnableSequence } from '@langchain/core/runnables'
|
||||
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, PromptTemplate } from '@langchain/core/prompts'
|
||||
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
|
||||
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { FlowiseMemory, ICommonObject, INode, INodeData, INodeParams, IUsedTool, IVisionChatModal } from '../../../src/Interface'
|
||||
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
|
||||
import { AgentExecutor, ToolCallingAgentOutputParser } from '../../../src/agents'
|
||||
import { Moderation, checkInputs, streamResponse } from '../../moderation/Moderation'
|
||||
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
|
||||
import { addImagesToMessages, llmSupportsVision } from '../../../src/multiModalUtils'
|
||||
|
||||
class ToolAgent_Agents implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs: INodeParams[]
|
||||
sessionId?: string
|
||||
badge?: string
|
||||
|
||||
constructor(fields?: { sessionId?: string }) {
|
||||
this.label = 'Tool Agent'
|
||||
this.name = 'toolAgent'
|
||||
this.version = 1.0
|
||||
this.type = 'AgentExecutor'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'toolAgent.png'
|
||||
this.description = `Agent that uses Function Calling to pick the tools and args to call`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.badge = 'NEW'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Tools',
|
||||
name: 'tools',
|
||||
type: 'Tool',
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Memory',
|
||||
name: 'memory',
|
||||
type: 'BaseChatMemory'
|
||||
},
|
||||
{
|
||||
label: 'Tool Calling Chat Model',
|
||||
name: 'model',
|
||||
type: 'BaseChatModel',
|
||||
description:
|
||||
'Only compatible with models that are capable of function calling. ChatOpenAI, ChatMistral, ChatAnthropic, ChatVertexAI'
|
||||
},
|
||||
{
|
||||
label: 'System Message',
|
||||
name: 'systemMessage',
|
||||
type: 'string',
|
||||
default: `You are a helpful AI assistant.`,
|
||||
rows: 4,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Input Moderation',
|
||||
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
|
||||
name: 'inputModeration',
|
||||
type: 'Moderation',
|
||||
optional: true,
|
||||
list: true
|
||||
},
|
||||
{
|
||||
label: 'Max Iterations',
|
||||
name: 'maxIterations',
|
||||
type: 'number',
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
this.sessionId = fields?.sessionId
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
|
||||
return prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
}
|
||||
|
||||
async run(nodeData: INodeData, input: string, options: ICommonObject): Promise<string | ICommonObject> {
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const moderations = nodeData.inputs?.inputModeration as Moderation[]
|
||||
|
||||
const isStreamable = options.socketIO && options.socketIOClientId
|
||||
|
||||
if (moderations && moderations.length > 0) {
|
||||
try {
|
||||
// Use the output of the moderation chain as input for the OpenAI Function Agent
|
||||
input = await checkInputs(moderations, input)
|
||||
} catch (e) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500))
|
||||
if (isStreamable)
|
||||
streamResponse(options.socketIO && options.socketIOClientId, e.message, options.socketIO, options.socketIOClientId)
|
||||
return formatResponse(e.message)
|
||||
}
|
||||
}
|
||||
|
||||
const executor = prepareAgent(nodeData, options, { sessionId: this.sessionId, chatId: options.chatId, input })
|
||||
|
||||
const loggerHandler = new ConsoleCallbackHandler(options.logger)
|
||||
const callbacks = await additionalCallbacks(nodeData, options)
|
||||
|
||||
let res: ChainValues = {}
|
||||
let sourceDocuments: ICommonObject[] = []
|
||||
let usedTools: IUsedTool[] = []
|
||||
|
||||
if (isStreamable) {
|
||||
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, handler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('sourceDocuments', flatten(res.sourceDocuments))
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
options.socketIO.to(options.socketIOClientId).emit('usedTools', res.usedTools)
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
} else {
|
||||
res = await executor.invoke({ input }, { callbacks: [loggerHandler, ...callbacks] })
|
||||
if (res.sourceDocuments) {
|
||||
sourceDocuments = res.sourceDocuments
|
||||
}
|
||||
if (res.usedTools) {
|
||||
usedTools = res.usedTools
|
||||
}
|
||||
}
|
||||
|
||||
let output = res?.output as string
|
||||
|
||||
// Claude 3 Opus tends to spit out <thinking>..</thinking> as well, discard that in final output
|
||||
const regexPattern: RegExp = /<thinking>[\s\S]*?<\/thinking>/
|
||||
const matches: RegExpMatchArray | null = output.match(regexPattern)
|
||||
if (matches) {
|
||||
for (const match of matches) {
|
||||
output = output.replace(match, '')
|
||||
}
|
||||
}
|
||||
|
||||
await memory.addChatMessages(
|
||||
[
|
||||
{
|
||||
text: input,
|
||||
type: 'userMessage'
|
||||
},
|
||||
{
|
||||
text: output,
|
||||
type: 'apiMessage'
|
||||
}
|
||||
],
|
||||
this.sessionId
|
||||
)
|
||||
|
||||
let finalRes = output
|
||||
|
||||
if (sourceDocuments.length || usedTools.length) {
|
||||
const finalRes: ICommonObject = { text: output }
|
||||
if (sourceDocuments.length) {
|
||||
finalRes.sourceDocuments = flatten(sourceDocuments)
|
||||
}
|
||||
if (usedTools.length) {
|
||||
finalRes.usedTools = usedTools
|
||||
}
|
||||
return finalRes
|
||||
}
|
||||
|
||||
return finalRes
|
||||
}
|
||||
}
|
||||
|
||||
const prepareAgent = (nodeData: INodeData, options: ICommonObject, flowObj: { sessionId?: string; chatId?: string; input?: string }) => {
|
||||
const model = nodeData.inputs?.model as BaseChatModel
|
||||
const maxIterations = nodeData.inputs?.maxIterations as string
|
||||
const memory = nodeData.inputs?.memory as FlowiseMemory
|
||||
const systemMessage = nodeData.inputs?.systemMessage as string
|
||||
let tools = nodeData.inputs?.tools
|
||||
tools = flatten(tools)
|
||||
const memoryKey = memory.memoryKey ? memory.memoryKey : 'chat_history'
|
||||
const inputKey = memory.inputKey ? memory.inputKey : 'input'
|
||||
|
||||
const prompt = ChatPromptTemplate.fromMessages([
|
||||
['system', systemMessage],
|
||||
new MessagesPlaceholder(memoryKey),
|
||||
['human', `{${inputKey}}`],
|
||||
new MessagesPlaceholder('agent_scratchpad')
|
||||
])
|
||||
|
||||
if (llmSupportsVision(model)) {
|
||||
const visionChatModel = model as IVisionChatModal
|
||||
const messageContent = addImagesToMessages(nodeData, options, model.multiModalOption)
|
||||
|
||||
if (messageContent?.length) {
|
||||
visionChatModel.setVisionModel()
|
||||
|
||||
// Pop the `agent_scratchpad` MessagePlaceHolder
|
||||
let messagePlaceholder = prompt.promptMessages.pop() as MessagesPlaceholder
|
||||
if (prompt.promptMessages.at(-1) instanceof HumanMessagePromptTemplate) {
|
||||
const lastMessage = prompt.promptMessages.pop() as HumanMessagePromptTemplate
|
||||
const template = (lastMessage.prompt as PromptTemplate).template as string
|
||||
const msg = HumanMessagePromptTemplate.fromTemplate([
|
||||
...messageContent,
|
||||
{
|
||||
text: template
|
||||
}
|
||||
])
|
||||
msg.inputVariables = lastMessage.inputVariables
|
||||
prompt.promptMessages.push(msg)
|
||||
}
|
||||
|
||||
// Add the `agent_scratchpad` MessagePlaceHolder back
|
||||
prompt.promptMessages.push(messagePlaceholder)
|
||||
} else {
|
||||
visionChatModel.revertToOriginalModel()
|
||||
}
|
||||
}
|
||||
|
||||
if (model.bindTools === undefined) {
|
||||
throw new Error(`This agent requires that the "bindTools()" method be implemented on the input model.`)
|
||||
}
|
||||
|
||||
const modelWithTools = model.bindTools(tools)
|
||||
|
||||
const runnableAgent = RunnableSequence.from([
|
||||
{
|
||||
[inputKey]: (i: { input: string; steps: ToolsAgentStep[] }) => i.input,
|
||||
agent_scratchpad: (i: { input: string; steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(i.steps),
|
||||
[memoryKey]: async (_: { input: string; steps: ToolsAgentStep[] }) => {
|
||||
const messages = (await memory.getChatMessages(flowObj?.sessionId, true)) as BaseMessage[]
|
||||
return messages ?? []
|
||||
}
|
||||
},
|
||||
prompt,
|
||||
modelWithTools,
|
||||
new ToolCallingAgentOutputParser()
|
||||
])
|
||||
|
||||
const executor = AgentExecutor.fromAgentAndTools({
|
||||
agent: runnableAgent,
|
||||
tools,
|
||||
sessionId: flowObj?.sessionId,
|
||||
chatId: flowObj?.chatId,
|
||||
input: flowObj?.input,
|
||||
verbose: process.env.DEBUG === 'true' ? true : false,
|
||||
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
|
||||
})
|
||||
|
||||
return executor
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ToolAgent_Agents }
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 17 KiB |
|
|
@ -57,7 +57,6 @@ class XMLAgent_Agents implements INode {
|
|||
this.type = 'XMLAgent'
|
||||
this.category = 'Agents'
|
||||
this.icon = 'xmlagent.svg'
|
||||
this.badge = 'NEW'
|
||||
this.description = `Agent that is designed for LLMs that are good for reasoning/writing XML (e.g: Anthropic Claude)`
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AgentExecutor)]
|
||||
this.inputs = [
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import { HarmBlockThreshold, HarmCategory } from '@google/generative-ai'
|
||||
import type { SafetySetting } from '@google/generative-ai'
|
||||
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from '@langchain/google-genai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { convertMultiOptionsToStringArray, getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
import { ChatGoogleGenerativeAI, GoogleGenerativeAIChatInput } from './FlowiseChatGoogleGenerativeAI'
|
||||
|
||||
class GoogleGenerativeAI_ChatModels implements INode {
|
||||
label: string
|
||||
|
|
@ -139,6 +139,15 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
],
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Allow Image Uploads',
|
||||
name: 'allowImageUploads',
|
||||
type: 'boolean',
|
||||
description:
|
||||
'Automatically uses vision model when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent',
|
||||
default: false,
|
||||
optional: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -162,20 +171,21 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
const harmCategory = nodeData.inputs?.harmCategory as string
|
||||
const harmBlockThreshold = nodeData.inputs?.harmBlockThreshold as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
|
||||
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
|
||||
|
||||
const obj: Partial<GoogleGenerativeAIChatInput> = {
|
||||
apiKey: apiKey,
|
||||
modelName: modelName,
|
||||
maxOutputTokens: 2048
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
|
||||
const model = new ChatGoogleGenerativeAI(obj)
|
||||
if (topP) model.topP = parseFloat(topP)
|
||||
if (topK) model.topK = parseFloat(topK)
|
||||
if (cache) model.cache = cache
|
||||
if (temperature) model.temperature = parseFloat(temperature)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
if (cache) obj.cache = cache
|
||||
if (temperature) obj.temperature = parseFloat(temperature)
|
||||
|
||||
// Safety Settings
|
||||
let harmCategories: string[] = convertMultiOptionsToStringArray(harmCategory)
|
||||
|
|
@ -188,7 +198,16 @@ class GoogleGenerativeAI_ChatModels implements INode {
|
|||
threshold: harmBlockThresholds[index] as HarmBlockThreshold
|
||||
}
|
||||
})
|
||||
if (safetySettings.length > 0) model.safetySettings = safetySettings
|
||||
if (safetySettings.length > 0) obj.safetySettings = safetySettings
|
||||
|
||||
const multiModalOption: IMultiModalOption = {
|
||||
image: {
|
||||
allowImageUploads: allowImageUploads ?? false
|
||||
}
|
||||
}
|
||||
|
||||
const model = new ChatGoogleGenerativeAI(nodeData.id, obj)
|
||||
model.setMultiModalOption(multiModalOption)
|
||||
|
||||
return model
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,550 @@
|
|||
import { BaseMessage, AIMessage, AIMessageChunk, isBaseMessage, ChatMessage, MessageContent } from '@langchain/core/messages'
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
|
||||
import { BaseChatModel, type BaseChatModelParams } from '@langchain/core/language_models/chat_models'
|
||||
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
|
||||
import { ToolCall } from '@langchain/core/messages/tool'
|
||||
import { NewTokenIndices } from '@langchain/core/callbacks/base'
|
||||
import {
|
||||
EnhancedGenerateContentResponse,
|
||||
Content,
|
||||
Part,
|
||||
Tool,
|
||||
GenerativeModel,
|
||||
GoogleGenerativeAI as GenerativeAI
|
||||
} from '@google/generative-ai'
|
||||
import type { SafetySetting } from '@google/generative-ai'
|
||||
import { ICommonObject, IMultiModalOption, IVisionChatModal } from '../../../src'
|
||||
import { StructuredToolInterface } from '@langchain/core/tools'
|
||||
import { isStructuredTool } from '@langchain/core/utils/function_calling'
|
||||
import { zodToJsonSchema } from 'zod-to-json-schema'
|
||||
|
||||
interface TokenUsage {
|
||||
completionTokens?: number
|
||||
promptTokens?: number
|
||||
totalTokens?: number
|
||||
}
|
||||
|
||||
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
|
||||
modelName?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxOutputTokens?: number
|
||||
topP?: number
|
||||
topK?: number
|
||||
stopSequences?: string[]
|
||||
safetySettings?: SafetySetting[]
|
||||
apiKey?: string
|
||||
streaming?: boolean
|
||||
}
|
||||
|
||||
class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGenerativeAIChatInput {
|
||||
modelName = 'gemini-pro'
|
||||
|
||||
temperature?: number
|
||||
|
||||
maxOutputTokens?: number
|
||||
|
||||
topP?: number
|
||||
|
||||
topK?: number
|
||||
|
||||
stopSequences: string[] = []
|
||||
|
||||
safetySettings?: SafetySetting[]
|
||||
|
||||
apiKey?: string
|
||||
|
||||
streaming = false
|
||||
|
||||
private client: GenerativeModel
|
||||
|
||||
get _isMultimodalModel() {
|
||||
return this.modelName.includes('vision') || this.modelName.startsWith('gemini-1.5')
|
||||
}
|
||||
|
||||
constructor(fields?: GoogleGenerativeAIChatInput) {
|
||||
super(fields ?? {})
|
||||
|
||||
this.modelName = fields?.model?.replace(/^models\//, '') ?? fields?.modelName?.replace(/^models\//, '') ?? 'gemini-pro'
|
||||
|
||||
this.maxOutputTokens = fields?.maxOutputTokens ?? this.maxOutputTokens
|
||||
|
||||
if (this.maxOutputTokens && this.maxOutputTokens < 0) {
|
||||
throw new Error('`maxOutputTokens` must be a positive integer')
|
||||
}
|
||||
|
||||
this.temperature = fields?.temperature ?? this.temperature
|
||||
if (this.temperature && (this.temperature < 0 || this.temperature > 1)) {
|
||||
throw new Error('`temperature` must be in the range of [0.0,1.0]')
|
||||
}
|
||||
|
||||
this.topP = fields?.topP ?? this.topP
|
||||
if (this.topP && this.topP < 0) {
|
||||
throw new Error('`topP` must be a positive integer')
|
||||
}
|
||||
|
||||
if (this.topP && this.topP > 1) {
|
||||
throw new Error('`topP` must be below 1.')
|
||||
}
|
||||
|
||||
this.topK = fields?.topK ?? this.topK
|
||||
if (this.topK && this.topK < 0) {
|
||||
throw new Error('`topK` must be a positive integer')
|
||||
}
|
||||
|
||||
this.stopSequences = fields?.stopSequences ?? this.stopSequences
|
||||
|
||||
this.apiKey = fields?.apiKey ?? process.env['GOOGLE_API_KEY']
|
||||
if (!this.apiKey) {
|
||||
throw new Error(
|
||||
'Please set an API key for Google GenerativeAI ' +
|
||||
'in the environment variable GOOGLE_API_KEY ' +
|
||||
'or in the `apiKey` field of the ' +
|
||||
'ChatGoogleGenerativeAI constructor'
|
||||
)
|
||||
}
|
||||
|
||||
this.safetySettings = fields?.safetySettings ?? this.safetySettings
|
||||
if (this.safetySettings && this.safetySettings.length > 0) {
|
||||
const safetySettingsSet = new Set(this.safetySettings.map((s) => s.category))
|
||||
if (safetySettingsSet.size !== this.safetySettings.length) {
|
||||
throw new Error('The categories in `safetySettings` array must be unique')
|
||||
}
|
||||
}
|
||||
|
||||
this.streaming = fields?.streaming ?? this.streaming
|
||||
|
||||
this.getClient()
|
||||
}
|
||||
|
||||
getClient(tools?: Tool[]) {
|
||||
this.client = new GenerativeAI(this.apiKey ?? '').getGenerativeModel({
|
||||
model: this.modelName,
|
||||
tools,
|
||||
safetySettings: this.safetySettings as SafetySetting[],
|
||||
generationConfig: {
|
||||
candidateCount: 1,
|
||||
stopSequences: this.stopSequences,
|
||||
maxOutputTokens: this.maxOutputTokens,
|
||||
temperature: this.temperature,
|
||||
topP: this.topP,
|
||||
topK: this.topK
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
_combineLLMOutput() {
|
||||
return []
|
||||
}
|
||||
|
||||
_llmType() {
|
||||
return 'googlegenerativeai'
|
||||
}
|
||||
|
||||
override bindTools(tools: (StructuredToolInterface | Record<string, unknown>)[], kwargs?: Partial<ICommonObject>) {
|
||||
//@ts-ignore
|
||||
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs })
|
||||
}
|
||||
|
||||
convertFunctionResponse(prompts: Content[]) {
|
||||
for (let i = 0; i < prompts.length; i += 1) {
|
||||
if (prompts[i].role === 'function') {
|
||||
if (prompts[i - 1].role === 'model') {
|
||||
const toolName = prompts[i - 1].parts[0].functionCall?.name ?? ''
|
||||
prompts[i].parts = [
|
||||
{
|
||||
functionResponse: {
|
||||
name: toolName,
|
||||
response: {
|
||||
name: toolName,
|
||||
content: prompts[i].parts[0].text
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async _generateNonStreaming(
|
||||
prompt: Content[],
|
||||
options: this['ParsedCallOptions'],
|
||||
_runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
//@ts-ignore
|
||||
const tools = options.tools ?? []
|
||||
|
||||
this.convertFunctionResponse(prompt)
|
||||
|
||||
if (tools.length > 0) {
|
||||
this.getClient(tools)
|
||||
} else {
|
||||
this.getClient()
|
||||
}
|
||||
const res = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
let output
|
||||
try {
|
||||
output = await this.client.generateContent({
|
||||
contents: prompt
|
||||
})
|
||||
} catch (e: any) {
|
||||
if (e.message?.includes('400 Bad Request')) {
|
||||
e.status = 400
|
||||
}
|
||||
throw e
|
||||
}
|
||||
return output
|
||||
})
|
||||
const generationResult = mapGenerateContentResultToChatResult(res.response)
|
||||
await _runManager?.handleLLMNewToken(generationResult.generations?.length ? generationResult.generations[0].text : '')
|
||||
return generationResult
|
||||
}
|
||||
|
||||
async _generate(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
|
||||
|
||||
// Handle streaming
|
||||
if (this.streaming) {
|
||||
const tokenUsage: TokenUsage = {}
|
||||
const stream = this._streamResponseChunks(messages, options, runManager)
|
||||
const finalChunks: Record<number, ChatGenerationChunk> = {}
|
||||
for await (const chunk of stream) {
|
||||
const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0
|
||||
if (finalChunks[index] === undefined) {
|
||||
finalChunks[index] = chunk
|
||||
} else {
|
||||
finalChunks[index] = finalChunks[index].concat(chunk)
|
||||
}
|
||||
}
|
||||
const generations = Object.entries(finalChunks)
|
||||
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
|
||||
.map(([_, value]) => value)
|
||||
|
||||
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }
|
||||
}
|
||||
return this._generateNonStreaming(prompt, options, runManager)
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
messages: BaseMessage[],
|
||||
options: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
const prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
|
||||
//@ts-ignore
|
||||
if (options.tools !== undefined && options.tools.length > 0) {
|
||||
const result = await this._generateNonStreaming(prompt, options, runManager)
|
||||
const generationMessage = result.generations[0].message as AIMessage
|
||||
if (generationMessage === undefined) {
|
||||
throw new Error('Could not parse Groq output.')
|
||||
}
|
||||
const toolCallChunks = generationMessage.tool_calls?.map((toolCall, i) => ({
|
||||
name: toolCall.name,
|
||||
args: JSON.stringify(toolCall.args),
|
||||
id: toolCall.id,
|
||||
index: i
|
||||
}))
|
||||
yield new ChatGenerationChunk({
|
||||
message: new AIMessageChunk({
|
||||
content: generationMessage.content,
|
||||
additional_kwargs: generationMessage.additional_kwargs,
|
||||
tool_call_chunks: toolCallChunks
|
||||
}),
|
||||
text: generationMessage.tool_calls?.length ? '' : (generationMessage.content as string)
|
||||
})
|
||||
} else {
|
||||
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
|
||||
this.getClient()
|
||||
const { stream } = await this.client.generateContentStream({
|
||||
contents: prompt
|
||||
})
|
||||
return stream
|
||||
})
|
||||
|
||||
for await (const response of stream) {
|
||||
const chunk = convertResponseContentToChatGenerationChunk(response)
|
||||
if (!chunk) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield chunk
|
||||
await runManager?.handleLLMNewToken(chunk.text ?? '')
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI implements IVisionChatModal {
|
||||
configuredModel: string
|
||||
configuredMaxToken?: number
|
||||
multiModalOption: IMultiModalOption
|
||||
id: string
|
||||
|
||||
constructor(id: string, fields?: GoogleGenerativeAIChatInput) {
|
||||
super(fields)
|
||||
this.id = id
|
||||
this.configuredModel = fields?.modelName ?? ''
|
||||
this.configuredMaxToken = fields?.maxOutputTokens
|
||||
}
|
||||
|
||||
revertToOriginalModel(): void {
|
||||
super.modelName = this.configuredModel
|
||||
super.maxOutputTokens = this.configuredMaxToken
|
||||
}
|
||||
|
||||
setMultiModalOption(multiModalOption: IMultiModalOption): void {
|
||||
this.multiModalOption = multiModalOption
|
||||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
if (this.modelName !== 'gemini-pro-vision' && this.modelName !== 'gemini-1.5-pro-latest') {
|
||||
super.modelName = 'gemini-1.5-pro-latest'
|
||||
super.maxOutputTokens = this.configuredMaxToken ? this.configuredMaxToken : 8192
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function getMessageAuthor(message: BaseMessage) {
|
||||
const type = message._getType()
|
||||
if (ChatMessage.isInstance(message)) {
|
||||
return message.role
|
||||
}
|
||||
return message.name ?? type
|
||||
}
|
||||
|
||||
function convertAuthorToRole(author: string) {
|
||||
switch (author) {
|
||||
/**
|
||||
* Note: Gemini currently is not supporting system messages
|
||||
* we will convert them to human messages and merge with following
|
||||
* */
|
||||
case 'ai':
|
||||
case 'model': // getMessageAuthor returns message.name. code ex.: return message.name ?? type;
|
||||
return 'model'
|
||||
case 'system':
|
||||
case 'human':
|
||||
return 'user'
|
||||
case 'function':
|
||||
case 'tool':
|
||||
return 'function'
|
||||
default:
|
||||
throw new Error(`Unknown / unsupported author: ${author}`)
|
||||
}
|
||||
}
|
||||
|
||||
function convertMessageContentToParts(content: MessageContent, isMultimodalModel: boolean): Part[] {
|
||||
if (typeof content === 'string') {
|
||||
return [{ text: content }]
|
||||
}
|
||||
|
||||
return content.map((c) => {
|
||||
if (c.type === 'text') {
|
||||
return {
|
||||
text: c.text
|
||||
}
|
||||
}
|
||||
|
||||
if (c.type === 'tool_use') {
|
||||
return {
|
||||
functionCall: c.functionCall
|
||||
}
|
||||
}
|
||||
|
||||
/*if (c.type === "tool_use" || c.type === "tool_result") {
|
||||
// TODO: Fix when SDK types are fixed
|
||||
return {
|
||||
...contentPart,
|
||||
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
||||
} as any;
|
||||
}*/
|
||||
|
||||
if (c.type === 'image_url') {
|
||||
if (!isMultimodalModel) {
|
||||
throw new Error(`This model does not support images`)
|
||||
}
|
||||
let source
|
||||
if (typeof c.image_url === 'string') {
|
||||
source = c.image_url
|
||||
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
|
||||
source = c.image_url.url
|
||||
} else {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
const [dm, data] = source.split(',')
|
||||
if (!dm.startsWith('data:')) {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
|
||||
if (encoding !== 'base64') {
|
||||
throw new Error('Please provide image as base64 encoded data URL')
|
||||
}
|
||||
|
||||
return {
|
||||
inlineData: {
|
||||
data,
|
||||
mimeType
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new Error(`Unknown content type ${(c as { type: string }).type}`)
|
||||
})
|
||||
}
|
||||
|
||||
function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel: boolean) {
|
||||
return messages.reduce<{
|
||||
content: Content[]
|
||||
mergeWithPreviousContent: boolean
|
||||
}>(
|
||||
(acc, message, index) => {
|
||||
if (!isBaseMessage(message)) {
|
||||
throw new Error('Unsupported message input')
|
||||
}
|
||||
const author = getMessageAuthor(message)
|
||||
if (author === 'system' && index !== 0) {
|
||||
throw new Error('System message should be the first one')
|
||||
}
|
||||
const role = convertAuthorToRole(author)
|
||||
|
||||
const prevContent = acc.content[acc.content.length]
|
||||
if (!acc.mergeWithPreviousContent && prevContent && prevContent.role === role) {
|
||||
throw new Error('Google Generative AI requires alternate messages between authors')
|
||||
}
|
||||
|
||||
const parts = convertMessageContentToParts(message.content, isMultimodalModel)
|
||||
|
||||
if (acc.mergeWithPreviousContent) {
|
||||
const prevContent = acc.content[acc.content.length - 1]
|
||||
if (!prevContent) {
|
||||
throw new Error('There was a problem parsing your system message. Please try a prompt without one.')
|
||||
}
|
||||
prevContent.parts.push(...parts)
|
||||
|
||||
return {
|
||||
mergeWithPreviousContent: false,
|
||||
content: acc.content
|
||||
}
|
||||
}
|
||||
const content: Content = {
|
||||
role,
|
||||
parts
|
||||
}
|
||||
return {
|
||||
mergeWithPreviousContent: author === 'system',
|
||||
content: [...acc.content, content]
|
||||
}
|
||||
},
|
||||
{ content: [], mergeWithPreviousContent: false }
|
||||
).content
|
||||
}
|
||||
|
||||
function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse): ChatResult {
|
||||
// if rejected or error, return empty generations with reason in filters
|
||||
if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) {
|
||||
return {
|
||||
generations: [],
|
||||
llmOutput: {
|
||||
filters: response?.promptFeedback
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const [candidate] = response.candidates
|
||||
const { content, ...generationInfo } = candidate
|
||||
const text = content.parts.map(({ text }) => text).join('')
|
||||
|
||||
if (content.parts.some((part) => part.functionCall)) {
|
||||
const toolCalls: ToolCall[] = []
|
||||
for (const fcPart of content.parts) {
|
||||
const fc = fcPart.functionCall
|
||||
if (fc) {
|
||||
const { name, args } = fc
|
||||
toolCalls.push({ name, args })
|
||||
}
|
||||
}
|
||||
|
||||
const functionCalls = toolCalls.map((tool) => {
|
||||
return { functionCall: { name: tool.name, args: tool.args }, type: 'tool_use' }
|
||||
})
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
content: functionCalls,
|
||||
name: !content ? undefined : content.role,
|
||||
additional_kwargs: generationInfo,
|
||||
tool_calls: toolCalls
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
return {
|
||||
generations: [generation]
|
||||
}
|
||||
} else {
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: new AIMessage({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
additional_kwargs: generationInfo
|
||||
}),
|
||||
generationInfo
|
||||
}
|
||||
|
||||
return {
|
||||
generations: [generation]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse): ChatGenerationChunk | null {
|
||||
if (!response.candidates || response.candidates.length === 0) {
|
||||
return null
|
||||
}
|
||||
const [candidate] = response.candidates
|
||||
const { content, ...generationInfo } = candidate
|
||||
const text = content?.parts[0]?.text ?? ''
|
||||
|
||||
return new ChatGenerationChunk({
|
||||
text,
|
||||
message: new AIMessageChunk({
|
||||
content: text,
|
||||
name: !content ? undefined : content.role,
|
||||
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
|
||||
// so leave blank for now.
|
||||
additional_kwargs: {}
|
||||
}),
|
||||
generationInfo
|
||||
})
|
||||
}
|
||||
|
||||
function zodToGeminiParameters(zodObj: any) {
|
||||
// Gemini doesn't accept either the $schema or additionalProperties
|
||||
// attributes, so we need to explicitly remove them.
|
||||
const jsonSchema: any = zodToJsonSchema(zodObj)
|
||||
// eslint-disable-next-line unused-imports/no-unused-vars
|
||||
const { $schema, additionalProperties, ...rest } = jsonSchema
|
||||
return rest
|
||||
}
|
||||
|
||||
function convertToGeminiTools(structuredTools: (StructuredToolInterface | Record<string, unknown>)[]) {
|
||||
return [
|
||||
{
|
||||
functionDeclarations: structuredTools.map((structuredTool) => {
|
||||
if (isStructuredTool(structuredTool)) {
|
||||
const jsonSchema = zodToGeminiParameters(structuredTool.schema)
|
||||
return {
|
||||
name: structuredTool.name,
|
||||
description: structuredTool.description,
|
||||
parameters: jsonSchema
|
||||
}
|
||||
}
|
||||
return structuredTool
|
||||
})
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,6 +1,5 @@
|
|||
import { GoogleAuthOptions } from 'google-auth-library'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { ChatGoogleVertexAI, GoogleVertexAIChatInput } from '@langchain/community/chat_models/googlevertexai'
|
||||
import { ChatVertexAI, ChatVertexAIInput } from '@langchain/google-vertexai'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
|
@ -20,12 +19,12 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
constructor() {
|
||||
this.label = 'ChatGoogleVertexAI'
|
||||
this.name = 'chatGoogleVertexAI'
|
||||
this.version = 3.0
|
||||
this.version = 4.0
|
||||
this.type = 'ChatGoogleVertexAI'
|
||||
this.icon = 'GoogleVertex.svg'
|
||||
this.category = 'Chat Models'
|
||||
this.description = 'Wrapper around VertexAI large language models that use the Chat endpoint'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatGoogleVertexAI)]
|
||||
this.baseClasses = [this.type, ...getBaseClasses(ChatVertexAI)]
|
||||
this.credential = {
|
||||
label: 'Connect Credential',
|
||||
name: 'credential',
|
||||
|
|
@ -72,6 +71,15 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
step: 0.1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
},
|
||||
{
|
||||
label: 'Top Next Highest Probability Tokens',
|
||||
name: 'topK',
|
||||
type: 'number',
|
||||
description: `Decode using top-k sampling: consider the set of top_k most probable tokens. Must be positive`,
|
||||
step: 1,
|
||||
optional: true,
|
||||
additionalParams: true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -89,7 +97,7 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
const googleApplicationCredential = getCredentialParam('googleApplicationCredential', credentialData, nodeData)
|
||||
const projectID = getCredentialParam('projectID', credentialData, nodeData)
|
||||
|
||||
const authOptions: GoogleAuthOptions = {}
|
||||
const authOptions: ICommonObject = {}
|
||||
if (Object.keys(credentialData).length !== 0) {
|
||||
if (!googleApplicationCredentialFilePath && !googleApplicationCredential)
|
||||
throw new Error('Please specify your Google Application Credential')
|
||||
|
|
@ -111,8 +119,9 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
const maxOutputTokens = nodeData.inputs?.maxOutputTokens as string
|
||||
const topP = nodeData.inputs?.topP as string
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
const topK = nodeData.inputs?.topK as string
|
||||
|
||||
const obj: GoogleVertexAIChatInput<GoogleAuthOptions> = {
|
||||
const obj: ChatVertexAIInput = {
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName
|
||||
}
|
||||
|
|
@ -121,8 +130,9 @@ class GoogleVertexAI_ChatModels implements INode {
|
|||
if (maxOutputTokens) obj.maxOutputTokens = parseInt(maxOutputTokens, 10)
|
||||
if (topP) obj.topP = parseFloat(topP)
|
||||
if (cache) obj.cache = cache
|
||||
if (topK) obj.topK = parseFloat(topK)
|
||||
|
||||
const model = new ChatGoogleVertexAI(obj)
|
||||
const model = new ChatVertexAI(obj)
|
||||
return model
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,37 +1,9 @@
|
|||
import { ChatCompletionResponse, ToolCalls as MistralAIToolCalls } from '@mistralai/mistralai'
|
||||
import { BaseCache } from '@langchain/core/caches'
|
||||
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
|
||||
import { NewTokenIndices } from '@langchain/core/callbacks/base'
|
||||
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
|
||||
import {
|
||||
MessageType,
|
||||
type BaseMessage,
|
||||
MessageContent,
|
||||
AIMessage,
|
||||
HumanMessage,
|
||||
HumanMessageChunk,
|
||||
AIMessageChunk,
|
||||
ToolMessageChunk,
|
||||
ChatMessageChunk
|
||||
} from '@langchain/core/messages'
|
||||
import { ChatMistralAI as LangchainChatMistralAI, ChatMistralAIInput } from '@langchain/mistralai'
|
||||
import { ChatMistralAI, ChatMistralAIInput } from '@langchain/mistralai'
|
||||
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
|
||||
|
||||
interface TokenUsage {
|
||||
completionTokens?: number
|
||||
promptTokens?: number
|
||||
totalTokens?: number
|
||||
}
|
||||
|
||||
type MistralAIInputMessage = {
|
||||
role: string
|
||||
name?: string
|
||||
content: string | string[]
|
||||
tool_calls?: MistralAIToolCalls[] | any[]
|
||||
}
|
||||
|
||||
class ChatMistral_ChatModels implements INode {
|
||||
label: string
|
||||
name: string
|
||||
|
|
@ -170,243 +142,4 @@ class ChatMistral_ChatModels implements INode {
|
|||
}
|
||||
}
|
||||
|
||||
class ChatMistralAI extends LangchainChatMistralAI {
|
||||
async _generate(
|
||||
messages: BaseMessage[],
|
||||
options?: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): Promise<ChatResult> {
|
||||
const tokenUsage: TokenUsage = {}
|
||||
const params = this.invocationParams(options)
|
||||
const mistralMessages = this.convertMessagesToMistralMessages(messages)
|
||||
const input = {
|
||||
...params,
|
||||
messages: mistralMessages
|
||||
}
|
||||
|
||||
// Handle streaming
|
||||
if (this.streaming) {
|
||||
const stream = this._streamResponseChunks(messages, options, runManager)
|
||||
const finalChunks: Record<number, ChatGenerationChunk> = {}
|
||||
for await (const chunk of stream) {
|
||||
const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0
|
||||
if (finalChunks[index] === undefined) {
|
||||
finalChunks[index] = chunk
|
||||
} else {
|
||||
finalChunks[index] = finalChunks[index].concat(chunk)
|
||||
}
|
||||
}
|
||||
const generations = Object.entries(finalChunks)
|
||||
.sort(([aKey], [bKey]) => parseInt(aKey, 10) - parseInt(bKey, 10))
|
||||
.map(([_, value]) => value)
|
||||
|
||||
return { generations, llmOutput: { estimatedTokenUsage: tokenUsage } }
|
||||
}
|
||||
|
||||
// Not streaming, so we can just call the API once.
|
||||
const response = await this.completionWithRetry(input, false)
|
||||
|
||||
const { completion_tokens: completionTokens, prompt_tokens: promptTokens, total_tokens: totalTokens } = response?.usage ?? {}
|
||||
|
||||
if (completionTokens) {
|
||||
tokenUsage.completionTokens = (tokenUsage.completionTokens ?? 0) + completionTokens
|
||||
}
|
||||
|
||||
if (promptTokens) {
|
||||
tokenUsage.promptTokens = (tokenUsage.promptTokens ?? 0) + promptTokens
|
||||
}
|
||||
|
||||
if (totalTokens) {
|
||||
tokenUsage.totalTokens = (tokenUsage.totalTokens ?? 0) + totalTokens
|
||||
}
|
||||
|
||||
const generations: ChatGeneration[] = []
|
||||
for (const part of response?.choices ?? []) {
|
||||
if ('delta' in part) {
|
||||
throw new Error('Delta not supported in non-streaming mode.')
|
||||
}
|
||||
if (!('message' in part)) {
|
||||
throw new Error('No message found in the choice.')
|
||||
}
|
||||
const text = part.message?.content ?? ''
|
||||
const generation: ChatGeneration = {
|
||||
text,
|
||||
message: this.mistralAIResponseToChatMessage(part)
|
||||
}
|
||||
if (part.finish_reason) {
|
||||
generation.generationInfo = { finish_reason: part.finish_reason }
|
||||
}
|
||||
generations.push(generation)
|
||||
}
|
||||
return {
|
||||
generations,
|
||||
llmOutput: { tokenUsage }
|
||||
}
|
||||
}
|
||||
|
||||
async *_streamResponseChunks(
|
||||
messages: BaseMessage[],
|
||||
options?: this['ParsedCallOptions'],
|
||||
runManager?: CallbackManagerForLLMRun
|
||||
): AsyncGenerator<ChatGenerationChunk> {
|
||||
const mistralMessages = this.convertMessagesToMistralMessages(messages)
|
||||
const params = this.invocationParams(options)
|
||||
const input = {
|
||||
...params,
|
||||
messages: mistralMessages
|
||||
}
|
||||
|
||||
const streamIterable = await this.completionWithRetry(input, true)
|
||||
for await (const data of streamIterable) {
|
||||
const choice = data?.choices[0]
|
||||
if (!choice || !('delta' in choice)) {
|
||||
continue
|
||||
}
|
||||
|
||||
const { delta } = choice
|
||||
if (!delta) {
|
||||
continue
|
||||
}
|
||||
const newTokenIndices = {
|
||||
prompt: 0,
|
||||
completion: choice.index ?? 0
|
||||
}
|
||||
const message = this._convertDeltaToMessageChunk(delta)
|
||||
if (message === null) {
|
||||
// Do not yield a chunk if the message is empty
|
||||
continue
|
||||
}
|
||||
const generationChunk = new ChatGenerationChunk({
|
||||
message,
|
||||
text: delta.content ?? '',
|
||||
generationInfo: newTokenIndices
|
||||
})
|
||||
yield generationChunk
|
||||
|
||||
void runManager?.handleLLMNewToken(generationChunk.text ?? '', newTokenIndices, undefined, undefined, undefined, {
|
||||
chunk: generationChunk
|
||||
})
|
||||
}
|
||||
if (options?.signal?.aborted) {
|
||||
throw new Error('AbortError')
|
||||
}
|
||||
}
|
||||
|
||||
_convertDeltaToMessageChunk(delta: {
|
||||
role?: string | undefined
|
||||
content?: string | undefined
|
||||
tool_calls?: MistralAIToolCalls[] | undefined
|
||||
}) {
|
||||
if (!delta.content && !delta.tool_calls) {
|
||||
return null
|
||||
}
|
||||
// Our merge additional kwargs util function will throw unless there
|
||||
// is an index key in each tool object (as seen in OpenAI's) so we
|
||||
// need to insert it here.
|
||||
const toolCallsWithIndex = delta.tool_calls?.length
|
||||
? delta.tool_calls?.map((toolCall, index) => ({
|
||||
...toolCall,
|
||||
index
|
||||
}))
|
||||
: undefined
|
||||
|
||||
let role = 'assistant'
|
||||
if (delta.role) {
|
||||
role = delta.role
|
||||
} else if (toolCallsWithIndex) {
|
||||
role = 'tool'
|
||||
}
|
||||
const content = delta.content ?? ''
|
||||
let additional_kwargs
|
||||
if (toolCallsWithIndex) {
|
||||
additional_kwargs = {
|
||||
tool_calls: toolCallsWithIndex
|
||||
}
|
||||
} else {
|
||||
additional_kwargs = {}
|
||||
}
|
||||
|
||||
if (role === 'user') {
|
||||
return new HumanMessageChunk({ content })
|
||||
} else if (role === 'assistant') {
|
||||
return new AIMessageChunk({ content, additional_kwargs })
|
||||
} else if (role === 'tool') {
|
||||
return new ToolMessageChunk({
|
||||
content,
|
||||
additional_kwargs,
|
||||
tool_call_id: toolCallsWithIndex?.[0].id ?? ''
|
||||
})
|
||||
} else {
|
||||
return new ChatMessageChunk({ content, role })
|
||||
}
|
||||
}
|
||||
|
||||
convertMessagesToMistralMessages(messages: Array<BaseMessage>): Array<MistralAIInputMessage> {
|
||||
const getRole = (role: MessageType) => {
|
||||
switch (role) {
|
||||
case 'human':
|
||||
return 'user'
|
||||
case 'ai':
|
||||
return 'assistant'
|
||||
case 'tool':
|
||||
return 'tool'
|
||||
case 'function':
|
||||
return 'function'
|
||||
case 'system':
|
||||
return 'system'
|
||||
default:
|
||||
throw new Error(`Unknown message type: ${role}`)
|
||||
}
|
||||
}
|
||||
|
||||
const getContent = (content: MessageContent): string => {
|
||||
if (typeof content === 'string') {
|
||||
return content
|
||||
}
|
||||
throw new Error(`ChatMistralAI does not support non text message content. Received: ${JSON.stringify(content, null, 2)}`)
|
||||
}
|
||||
|
||||
const mistralMessages = []
|
||||
for (const msg of messages) {
|
||||
const msgObj: MistralAIInputMessage = {
|
||||
role: getRole(msg._getType()),
|
||||
content: getContent(msg.content)
|
||||
}
|
||||
if (getRole(msg._getType()) === 'tool') {
|
||||
msgObj.role = 'assistant'
|
||||
msgObj.tool_calls = msg.additional_kwargs?.tool_calls ?? []
|
||||
} else if (getRole(msg._getType()) === 'function') {
|
||||
msgObj.role = 'tool'
|
||||
msgObj.name = msg.name
|
||||
}
|
||||
|
||||
mistralMessages.push(msgObj)
|
||||
}
|
||||
|
||||
return mistralMessages
|
||||
}
|
||||
|
||||
mistralAIResponseToChatMessage(choice: ChatCompletionResponse['choices'][0]): BaseMessage {
|
||||
const { message } = choice
|
||||
// MistralAI SDK does not include tool_calls in the non
|
||||
// streaming return type, so we need to extract it like this
|
||||
// to satisfy typescript.
|
||||
let toolCalls: MistralAIToolCalls[] = []
|
||||
if ('tool_calls' in message) {
|
||||
toolCalls = message.tool_calls as MistralAIToolCalls[]
|
||||
}
|
||||
switch (message.role) {
|
||||
case 'assistant':
|
||||
return new AIMessage({
|
||||
content: message.content ?? '',
|
||||
additional_kwargs: {
|
||||
tool_calls: toolCalls
|
||||
}
|
||||
})
|
||||
default:
|
||||
return new HumanMessage(message.content ?? '')
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: ChatMistral_ChatModels }
|
||||
|
|
|
|||
|
|
@ -33,7 +33,9 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
|
|||
}
|
||||
|
||||
setVisionModel(): void {
|
||||
super.modelName = 'gpt-4-vision-preview'
|
||||
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
|
||||
if (this.modelName !== 'gpt-4-turbo' && !this.modelName.includes('vision')) {
|
||||
super.modelName = 'gpt-4-turbo'
|
||||
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,45 +0,0 @@
|
|||
import { AIPluginTool } from '@langchain/community/tools/aiplugin'
|
||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
|
||||
class AIPlugin implements INode {
|
||||
label: string
|
||||
name: string
|
||||
version: number
|
||||
description: string
|
||||
type: string
|
||||
icon: string
|
||||
category: string
|
||||
baseClasses: string[]
|
||||
inputs?: INodeParams[]
|
||||
badge: string
|
||||
|
||||
constructor() {
|
||||
this.label = 'AI Plugin'
|
||||
this.name = 'aiPlugin'
|
||||
this.version = 1.0
|
||||
this.type = 'AIPlugin'
|
||||
this.icon = 'aiplugin.svg'
|
||||
this.category = 'Tools'
|
||||
this.description = 'Execute actions using ChatGPT Plugin Url'
|
||||
this.baseClasses = [this.type, ...getBaseClasses(AIPluginTool)]
|
||||
this.badge = 'DEPRECATING'
|
||||
this.inputs = [
|
||||
{
|
||||
label: 'Plugin Url',
|
||||
name: 'pluginUrl',
|
||||
type: 'string',
|
||||
placeholder: 'https://www.klarna.com/.well-known/ai-plugin.json'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
async init(nodeData: INodeData): Promise<any> {
|
||||
const pluginUrl = nodeData.inputs?.pluginUrl as string
|
||||
const aiplugin = await AIPluginTool.fromPluginUrl(pluginUrl)
|
||||
|
||||
return aiplugin
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = { nodeClass: AIPlugin }
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M8.57535 17.5607C7.7943 16.7796 7.7943 15.5133 8.57535 14.7323L14.2322 9.07542C15.0132 8.29437 16.2796 8.29437 17.0606 9.07542L19.1819 11.1967C20.744 12.7588 20.744 15.2915 19.1819 16.8536L16.3535 19.682C14.7914 21.2441 12.2588 21.2441 10.6967 19.682L8.57535 17.5607Z" stroke="black" stroke-width="2"/>
|
||||
<path d="M11.404 7.66112L9.28271 5.53979" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M7.16136 11.9038L5.04004 9.78247" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
<path d="M17.7676 18.2678L24.8386 25.3389" stroke="black" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 786 B |
|
|
@ -31,16 +31,18 @@
|
|||
"@gomomento/sdk": "^1.51.1",
|
||||
"@gomomento/sdk-core": "^1.51.1",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@google/generative-ai": "^0.1.3",
|
||||
"@google/generative-ai": "^0.7.0",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@langchain/anthropic": "^0.1.4",
|
||||
"@langchain/cohere": "^0.0.5",
|
||||
"@langchain/anthropic": "^0.1.14",
|
||||
"@langchain/cohere": "^0.0.7",
|
||||
"@langchain/community": "^0.0.39",
|
||||
"@langchain/core": "^0.1.57",
|
||||
"@langchain/google-genai": "^0.0.10",
|
||||
"@langchain/groq": "^0.0.2",
|
||||
"@langchain/mistralai": "^0.0.11",
|
||||
"@langchain/google-vertexai": "^0.0.5",
|
||||
"@langchain/groq": "^0.0.8",
|
||||
"@langchain/mistralai": "^0.0.18",
|
||||
"@langchain/mongodb": "^0.0.1",
|
||||
"@langchain/openai": "^0.0.14",
|
||||
"@langchain/openai": "^0.0.28",
|
||||
"@langchain/pinecone": "^0.0.3",
|
||||
"@langchain/weaviate": "^0.0.1",
|
||||
"@mistralai/mistralai": "0.1.3",
|
||||
|
|
@ -64,7 +66,7 @@
|
|||
"d3-dsv": "2",
|
||||
"dotenv": "^16.0.0",
|
||||
"express": "^4.17.3",
|
||||
"faiss-node": "^0.2.2",
|
||||
"faiss-node": "^0.5.1",
|
||||
"fast-json-patch": "^3.1.1",
|
||||
"form-data": "^4.0.0",
|
||||
"google-auth-library": "^9.4.0",
|
||||
|
|
@ -73,7 +75,7 @@
|
|||
"ioredis": "^5.3.2",
|
||||
"jsdom": "^22.1.0",
|
||||
"jsonpointer": "^5.0.1",
|
||||
"langchain": "^0.1.26",
|
||||
"langchain": "^0.1.33",
|
||||
"langfuse": "3.3.4",
|
||||
"langfuse-langchain": "^3.3.4",
|
||||
"langsmith": "0.1.6",
|
||||
|
|
@ -89,7 +91,7 @@
|
|||
"node-html-markdown": "^1.3.0",
|
||||
"notion-to-md": "^3.1.1",
|
||||
"object-hash": "^3.0.0",
|
||||
"openai": "^4.16.1",
|
||||
"openai": "^4.32.1",
|
||||
"pdf-parse": "^1.1.1",
|
||||
"pdfjs-dist": "^3.7.107",
|
||||
"pg": "^8.11.2",
|
||||
|
|
@ -97,7 +99,7 @@
|
|||
"puppeteer": "^20.7.1",
|
||||
"pyodide": ">=0.21.0-alpha.2",
|
||||
"redis": "^4.6.7",
|
||||
"replicate": "^0.12.3",
|
||||
"replicate": "^0.18.0",
|
||||
"socket.io": "^4.6.1",
|
||||
"srt-parser-2": "^1.2.3",
|
||||
"typeorm": "^0.3.6",
|
||||
|
|
|
|||
|
|
@ -1,14 +1,16 @@
|
|||
import { flatten } from 'lodash'
|
||||
import { ChainValues } from '@langchain/core/utils/types'
|
||||
import { AgentStep, AgentAction } from '@langchain/core/agents'
|
||||
import { BaseMessage, FunctionMessage, AIMessage } from '@langchain/core/messages'
|
||||
import { OutputParserException } from '@langchain/core/output_parsers'
|
||||
import { BaseMessage, FunctionMessage, AIMessage, isBaseMessage } from '@langchain/core/messages'
|
||||
import { ToolCall } from '@langchain/core/messages/tool'
|
||||
import { OutputParserException, BaseOutputParser } from '@langchain/core/output_parsers'
|
||||
import { BaseLanguageModel } from '@langchain/core/language_models/base'
|
||||
import { CallbackManager, CallbackManagerForChainRun, Callbacks } from '@langchain/core/callbacks/manager'
|
||||
import { ToolInputParsingException, Tool, StructuredToolInterface } from '@langchain/core/tools'
|
||||
import { Runnable, RunnableSequence, RunnablePassthrough } from '@langchain/core/runnables'
|
||||
import { Serializable } from '@langchain/core/load/serializable'
|
||||
import { renderTemplate } from '@langchain/core/prompts'
|
||||
import { ChatGeneration } from '@langchain/core/outputs'
|
||||
import { BaseChain, SerializedLLMChain } from 'langchain/chains'
|
||||
import {
|
||||
CreateReactAgentParams,
|
||||
|
|
@ -824,3 +826,75 @@ export class XMLAgentOutputParser extends AgentActionOutputParser {
|
|||
throw new Error('getFormatInstructions not implemented inside XMLAgentOutputParser.')
|
||||
}
|
||||
}
|
||||
|
||||
abstract class AgentMultiActionOutputParser extends BaseOutputParser<AgentAction[] | AgentFinish> {}
|
||||
|
||||
type ToolsAgentAction = AgentAction & {
|
||||
toolCallId: string
|
||||
messageLog?: BaseMessage[]
|
||||
}
|
||||
|
||||
export type ToolsAgentStep = AgentStep & {
|
||||
action: ToolsAgentAction
|
||||
}
|
||||
|
||||
function parseAIMessageToToolAction(message: AIMessage): ToolsAgentAction[] | AgentFinish {
|
||||
const stringifiedMessageContent = typeof message.content === 'string' ? message.content : JSON.stringify(message.content)
|
||||
let toolCalls: ToolCall[] = []
|
||||
if (message.tool_calls !== undefined && message.tool_calls.length > 0) {
|
||||
toolCalls = message.tool_calls
|
||||
} else {
|
||||
if (message.additional_kwargs.tool_calls === undefined || message.additional_kwargs.tool_calls.length === 0) {
|
||||
return {
|
||||
returnValues: { output: message.content },
|
||||
log: stringifiedMessageContent
|
||||
}
|
||||
}
|
||||
// Best effort parsing
|
||||
for (const toolCall of message.additional_kwargs.tool_calls ?? []) {
|
||||
const functionName = toolCall.function?.name
|
||||
try {
|
||||
const args = JSON.parse(toolCall.function.arguments)
|
||||
toolCalls.push({ name: functionName, args, id: toolCall.id })
|
||||
} catch (e: any) {
|
||||
throw new OutputParserException(
|
||||
`Failed to parse tool arguments from chat model response. Text: "${JSON.stringify(toolCalls)}". ${e}`
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
return toolCalls.map((toolCall, i) => {
|
||||
const messageLog = i === 0 ? [message] : []
|
||||
const log = `Invoking "${toolCall.name}" with ${JSON.stringify(toolCall.args ?? {})}\n${stringifiedMessageContent}`
|
||||
return {
|
||||
tool: toolCall.name as string,
|
||||
toolInput: toolCall.args,
|
||||
toolCallId: toolCall.id ?? '',
|
||||
log,
|
||||
messageLog
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
export class ToolCallingAgentOutputParser extends AgentMultiActionOutputParser {
|
||||
lc_namespace = ['langchain', 'agents', 'tool_calling']
|
||||
|
||||
static lc_name() {
|
||||
return 'ToolCallingAgentOutputParser'
|
||||
}
|
||||
|
||||
async parse(text: string): Promise<AgentAction[] | AgentFinish> {
|
||||
throw new Error(`ToolCallingAgentOutputParser can only parse messages.\nPassed input: ${text}`)
|
||||
}
|
||||
|
||||
async parseResult(generations: ChatGeneration[]) {
|
||||
if ('message' in generations[0] && isBaseMessage(generations[0].message)) {
|
||||
return parseAIMessageToToolAction(generations[0].message)
|
||||
}
|
||||
throw new Error('parseResult on ToolCallingAgentOutputParser only works on ChatGeneration output')
|
||||
}
|
||||
|
||||
getFormatInstructions(): string {
|
||||
throw new Error('getFormatInstructions not implemented inside ToolCallingAgentOutputParser.')
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ export const availableDependencies = [
|
|||
'@gomomento/sdk',
|
||||
'@gomomento/sdk-core',
|
||||
'@google-ai/generativelanguage',
|
||||
'@google/generative-ai',
|
||||
'@huggingface/inference',
|
||||
'@notionhq/client',
|
||||
'@opensearch-project/opensearch',
|
||||
|
|
|
|||
|
|
@ -1,500 +0,0 @@
|
|||
{
|
||||
"description": "Use ChatGPT Plugins within LangChain abstractions with GET and POST Tools",
|
||||
"categories": "ChatGPT Plugin,HTTP GET/POST,ChatOpenAI,MRKL Agent,Langchain",
|
||||
"framework": "Langchain",
|
||||
"nodes": [
|
||||
{
|
||||
"width": 300,
|
||||
"height": 278,
|
||||
"id": "aiPlugin_0",
|
||||
"position": {
|
||||
"x": 1086.2925487205378,
|
||||
"y": 84.92168014974317
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "aiPlugin_0",
|
||||
"label": "AI Plugin",
|
||||
"name": "aiPlugin",
|
||||
"version": 1,
|
||||
"type": "AIPlugin",
|
||||
"baseClasses": ["AIPlugin", "Tool"],
|
||||
"category": "Tools",
|
||||
"description": "Execute actions using ChatGPT Plugin Url",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Plugin Url",
|
||||
"name": "pluginUrl",
|
||||
"type": "string",
|
||||
"placeholder": "https://www.klarna.com/.well-known/ai-plugin.json"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"pluginUrl": "https://www.klarna.com/.well-known/ai-plugin.json"
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "aiPlugin_0-output-aiPlugin-AIPlugin|Tool",
|
||||
"name": "aiPlugin",
|
||||
"label": "AIPlugin",
|
||||
"type": "AIPlugin | Tool"
|
||||
}
|
||||
],
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1086.2925487205378,
|
||||
"y": 84.92168014974317
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 251,
|
||||
"id": "requestsGet_0",
|
||||
"position": {
|
||||
"x": 761.713884489628,
|
||||
"y": 170.84830553778124
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "requestsGet_0",
|
||||
"label": "Requests Get",
|
||||
"name": "requestsGet",
|
||||
"version": 1,
|
||||
"type": "RequestsGet",
|
||||
"baseClasses": ["RequestsGet", "Tool", "StructuredTool", "BaseLangChain"],
|
||||
"category": "Tools",
|
||||
"description": "Execute HTTP GET requests",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "URL",
|
||||
"name": "url",
|
||||
"type": "string",
|
||||
"description": "Agent will make call to this exact URL. If not specified, agent will try to figure out itself from AIPlugin if provided",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsGet_0-input-url-string"
|
||||
},
|
||||
{
|
||||
"label": "Description",
|
||||
"name": "description",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"default": "A portal to the internet. Use this when you need to get specific content from a website. \nInput should be a url (i.e. https://www.google.com). The output will be the text response of the GET request.",
|
||||
"description": "Acts like a prompt to tell agent when it should use this tool",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsGet_0-input-description-string"
|
||||
},
|
||||
{
|
||||
"label": "Headers",
|
||||
"name": "headers",
|
||||
"type": "json",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsGet_0-input-headers-json"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"url": "",
|
||||
"description": "A portal to the internet. Use this when you need to get specific content from a website. \nInput should be a url (i.e. https://www.google.com). The output will be the text response of the GET request.",
|
||||
"headers": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "requestsGet_0-output-requestsGet-RequestsGet|Tool|StructuredTool|BaseLangChain",
|
||||
"name": "requestsGet",
|
||||
"label": "RequestsGet",
|
||||
"type": "RequestsGet | Tool | StructuredTool | BaseLangChain"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 761.713884489628,
|
||||
"y": 170.84830553778124
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 251,
|
||||
"id": "requestsPost_0",
|
||||
"position": {
|
||||
"x": 436.4125209312256,
|
||||
"y": 306.87715502984184
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "requestsPost_0",
|
||||
"label": "Requests Post",
|
||||
"name": "requestsPost",
|
||||
"version": 1,
|
||||
"type": "RequestsPost",
|
||||
"baseClasses": ["RequestsPost", "Tool", "StructuredTool", "BaseLangChain"],
|
||||
"category": "Tools",
|
||||
"description": "Execute HTTP POST requests",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "URL",
|
||||
"name": "url",
|
||||
"type": "string",
|
||||
"description": "Agent will make call to this exact URL. If not specified, agent will try to figure out itself from AIPlugin if provided",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsPost_0-input-url-string"
|
||||
},
|
||||
{
|
||||
"label": "Body",
|
||||
"name": "body",
|
||||
"type": "json",
|
||||
"description": "JSON body for the POST request. If not specified, agent will try to figure out itself from AIPlugin if provided",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsPost_0-input-body-json"
|
||||
},
|
||||
{
|
||||
"label": "Description",
|
||||
"name": "description",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"default": "Use this when you want to POST to a website.\nInput should be a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, and the value of \"data\" should be a dictionary of \nkey-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string\nThe output will be the text response of the POST request.",
|
||||
"description": "Acts like a prompt to tell agent when it should use this tool",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsPost_0-input-description-string"
|
||||
},
|
||||
{
|
||||
"label": "Headers",
|
||||
"name": "headers",
|
||||
"type": "json",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "requestsPost_0-input-headers-json"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"url": "",
|
||||
"body": "",
|
||||
"description": "Use this when you want to POST to a website.\nInput should be a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, and the value of \"data\" should be a dictionary of \nkey-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string\nThe output will be the text response of the POST request.",
|
||||
"headers": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "requestsPost_0-output-requestsPost-RequestsPost|Tool|StructuredTool|BaseLangChain",
|
||||
"name": "requestsPost",
|
||||
"label": "RequestsPost",
|
||||
"type": "RequestsPost | Tool | StructuredTool | BaseLangChain"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 436.4125209312256,
|
||||
"y": 306.87715502984184
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 523,
|
||||
"id": "chatOpenAI_0",
|
||||
"position": {
|
||||
"x": 802.0103755177098,
|
||||
"y": 576.0760341170851
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"name": "chatOpenAI",
|
||||
"version": 6.0,
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["openAIApi"],
|
||||
"id": "chatOpenAI_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "asyncOptions",
|
||||
"loadMethod": "listModels",
|
||||
"default": "gpt-3.5-turbo",
|
||||
"id": "chatOpenAI_0-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Temperature",
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"default": 0.9,
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-temperature-number"
|
||||
},
|
||||
{
|
||||
"label": "Max Tokens",
|
||||
"name": "maxTokens",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-maxTokens-number"
|
||||
},
|
||||
{
|
||||
"label": "Top Probability",
|
||||
"name": "topP",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-topP-number"
|
||||
},
|
||||
{
|
||||
"label": "Frequency Penalty",
|
||||
"name": "frequencyPenalty",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-frequencyPenalty-number"
|
||||
},
|
||||
{
|
||||
"label": "Presence Penalty",
|
||||
"name": "presencePenalty",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-presencePenalty-number"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-timeout-number"
|
||||
},
|
||||
{
|
||||
"label": "BasePath",
|
||||
"name": "basepath",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
},
|
||||
{
|
||||
"label": "BaseOptions",
|
||||
"name": "baseOptions",
|
||||
"type": "json",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-baseOptions-json"
|
||||
},
|
||||
{
|
||||
"label": "Allow Image Uploads",
|
||||
"name": "allowImageUploads",
|
||||
"type": "boolean",
|
||||
"description": "Automatically uses gpt-4-vision-preview when image is being uploaded from chat. Only works with LLMChain, Conversation Chain, ReAct Agent, and Conversational Agent",
|
||||
"default": false,
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-allowImageUploads-boolean"
|
||||
},
|
||||
{
|
||||
"label": "Image Resolution",
|
||||
"description": "This parameter controls the resolution in which the model views the image.",
|
||||
"name": "imageResolution",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "Low",
|
||||
"name": "low"
|
||||
},
|
||||
{
|
||||
"label": "High",
|
||||
"name": "high"
|
||||
},
|
||||
{
|
||||
"label": "Auto",
|
||||
"name": "auto"
|
||||
}
|
||||
],
|
||||
"default": "low",
|
||||
"optional": false,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-imageResolution-options"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Cache",
|
||||
"name": "cache",
|
||||
"type": "BaseCache",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-cache-BaseCache"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
"maxTokens": "",
|
||||
"topP": "",
|
||||
"frequencyPenalty": "",
|
||||
"presencePenalty": "",
|
||||
"timeout": "",
|
||||
"basepath": "",
|
||||
"baseOptions": "",
|
||||
"allowImageUploads": true,
|
||||
"imageResolution": "low"
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
|
||||
"name": "chatOpenAI",
|
||||
"label": "ChatOpenAI",
|
||||
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 802.0103755177098,
|
||||
"y": 576.0760341170851
|
||||
},
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 280,
|
||||
"id": "mrklAgentChat_0",
|
||||
"position": {
|
||||
"x": 1425.5853300862047,
|
||||
"y": 441.06218012993924
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "mrklAgentChat_0",
|
||||
"label": "MRKL Agent for Chat Models",
|
||||
"name": "mrklAgentChat",
|
||||
"version": 2,
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain"],
|
||||
"category": "Agents",
|
||||
"description": "Agent that uses the ReAct Framework to decide what action to take, optimized to be used with Chat Models",
|
||||
"inputParams": [],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Allowed Tools",
|
||||
"name": "tools",
|
||||
"type": "Tool",
|
||||
"list": true,
|
||||
"id": "mrklAgentChat_0-input-tools-Tool"
|
||||
},
|
||||
{
|
||||
"label": "Language Model",
|
||||
"name": "model",
|
||||
"type": "BaseLanguageModel",
|
||||
"id": "mrklAgentChat_0-input-model-BaseLanguageModel"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
"description": "Detect text that could generate harmful output and prevent it from being sent to the language model",
|
||||
"name": "inputModeration",
|
||||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "mrklAgentChat_0-input-inputModeration-Moderation"
|
||||
},
|
||||
{
|
||||
"label": "Max Iterations",
|
||||
"name": "maxIterations",
|
||||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "mrklAgentChat_0-input-maxIterations-number"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"inputModeration": "",
|
||||
"tools": ["{{requestsGet_0.data.instance}}", "{{requestsPost_0.data.instance}}", "{{aiPlugin_0.data.instance}}"],
|
||||
"model": "{{chatOpenAI_0.data.instance}}"
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "mrklAgentChat_0-output-mrklAgentChat-AgentExecutor|BaseChain",
|
||||
"name": "mrklAgentChat",
|
||||
"label": "AgentExecutor",
|
||||
"type": "AgentExecutor | BaseChain"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": {
|
||||
"x": 1425.5853300862047,
|
||||
"y": 441.06218012993924
|
||||
},
|
||||
"dragging": false
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "aiPlugin_0",
|
||||
"sourceHandle": "aiPlugin_0-output-aiPlugin-AIPlugin|Tool",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-tools-Tool",
|
||||
"type": "buttonedge",
|
||||
"id": "aiPlugin_0-aiPlugin_0-output-aiPlugin-AIPlugin|Tool-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "requestsGet_0",
|
||||
"sourceHandle": "requestsGet_0-output-requestsGet-RequestsGet|Tool|StructuredTool|BaseLangChain",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-tools-Tool",
|
||||
"type": "buttonedge",
|
||||
"id": "requestsGet_0-requestsGet_0-output-requestsGet-RequestsGet|Tool|StructuredTool|BaseLangChain-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "requestsPost_0",
|
||||
"sourceHandle": "requestsPost_0-output-requestsPost-RequestsPost|Tool|StructuredTool|BaseLangChain",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-tools-Tool",
|
||||
"type": "buttonedge",
|
||||
"id": "requestsPost_0-requestsPost_0-output-requestsPost-RequestsPost|Tool|StructuredTool|BaseLangChain-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"source": "chatOpenAI_0",
|
||||
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-model-BaseLanguageModel",
|
||||
"type": "buttonedge",
|
||||
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-mrklAgentChat_0-mrklAgentChat_0-input-model-BaseLanguageModel",
|
||||
"data": {
|
||||
"label": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"description": "An agent that uses ReAct logic to decide what action to take",
|
||||
"categories": "Calculator Tool,SerpAPI,ChatOpenAI,MRKL Agent,Langchain",
|
||||
"categories": "Calculator Tool,SerpAPI,ChatOpenAI,ReAct Agent,Langchain",
|
||||
"framework": "Langchain",
|
||||
"nodes": [
|
||||
{
|
||||
|
|
@ -43,17 +43,17 @@
|
|||
"dragging": false
|
||||
},
|
||||
{
|
||||
"id": "mrklAgentChat_0",
|
||||
"id": "reactAgentChat_0",
|
||||
"position": {
|
||||
"x": 905.8535326018256,
|
||||
"y": 388.58312223652564
|
||||
},
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "mrklAgentChat_0",
|
||||
"id": "reactAgentChat_0",
|
||||
"label": "ReAct Agent for Chat Models",
|
||||
"version": 4,
|
||||
"name": "mrklAgentChat",
|
||||
"name": "reactAgentChat",
|
||||
"type": "AgentExecutor",
|
||||
"baseClasses": ["AgentExecutor", "BaseChain", "Runnable"],
|
||||
"category": "Agents",
|
||||
|
|
@ -65,19 +65,19 @@
|
|||
"name": "tools",
|
||||
"type": "Tool",
|
||||
"list": true,
|
||||
"id": "mrklAgentChat_0-input-tools-Tool"
|
||||
"id": "reactAgentChat_0-input-tools-Tool"
|
||||
},
|
||||
{
|
||||
"label": "Chat Model",
|
||||
"name": "model",
|
||||
"type": "BaseChatModel",
|
||||
"id": "mrklAgentChat_0-input-model-BaseChatModel"
|
||||
"id": "reactAgentChat_0-input-model-BaseChatModel"
|
||||
},
|
||||
{
|
||||
"label": "Memory",
|
||||
"name": "memory",
|
||||
"type": "BaseChatMemory",
|
||||
"id": "mrklAgentChat_0-input-memory-BaseChatMemory"
|
||||
"id": "reactAgentChat_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"label": "Input Moderation",
|
||||
|
|
@ -86,7 +86,7 @@
|
|||
"type": "Moderation",
|
||||
"optional": true,
|
||||
"list": true,
|
||||
"id": "mrklAgentChat_0-input-inputModeration-Moderation"
|
||||
"id": "reactAgentChat_0-input-inputModeration-Moderation"
|
||||
},
|
||||
{
|
||||
"label": "Max Iterations",
|
||||
|
|
@ -94,7 +94,7 @@
|
|||
"type": "number",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "mrklAgentChat_0-input-maxIterations-number"
|
||||
"id": "reactAgentChat_0-input-maxIterations-number"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
|
|
@ -105,8 +105,8 @@
|
|||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "mrklAgentChat_0-output-mrklAgentChat-AgentExecutor|BaseChain|Runnable",
|
||||
"name": "mrklAgentChat",
|
||||
"id": "reactAgentChat_0-output-reactAgentChat-AgentExecutor|BaseChain|Runnable",
|
||||
"name": "reactAgentChat",
|
||||
"label": "AgentExecutor",
|
||||
"description": "Agent that uses the ReAct logic to decide what action to take, optimized to be used with Chat Models",
|
||||
"type": "AgentExecutor | BaseChain | Runnable"
|
||||
|
|
@ -447,34 +447,34 @@
|
|||
{
|
||||
"source": "calculator_1",
|
||||
"sourceHandle": "calculator_1-output-calculator-Calculator|Tool|StructuredTool|BaseLangChain",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-tools-Tool",
|
||||
"target": "reactAgentChat_0",
|
||||
"targetHandle": "reactAgentChat_0-input-tools-Tool",
|
||||
"type": "buttonedge",
|
||||
"id": "calculator_1-calculator_1-output-calculator-Calculator|Tool|StructuredTool|BaseLangChain-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool"
|
||||
"id": "calculator_1-calculator_1-output-calculator-Calculator|Tool|StructuredTool|BaseLangChain-reactAgentChat_0-reactAgentChat_0-input-tools-Tool"
|
||||
},
|
||||
{
|
||||
"source": "RedisBackedChatMemory_0",
|
||||
"sourceHandle": "RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-memory-BaseChatMemory",
|
||||
"target": "reactAgentChat_0",
|
||||
"targetHandle": "reactAgentChat_0-input-memory-BaseChatMemory",
|
||||
"type": "buttonedge",
|
||||
"id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-mrklAgentChat_0-mrklAgentChat_0-input-memory-BaseChatMemory"
|
||||
"id": "RedisBackedChatMemory_0-RedisBackedChatMemory_0-output-RedisBackedChatMemory-RedisBackedChatMemory|BaseChatMemory|BaseMemory-reactAgentChat_0-reactAgentChat_0-input-memory-BaseChatMemory"
|
||||
},
|
||||
{
|
||||
"source": "chatOpenAI_0",
|
||||
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-model-BaseChatModel",
|
||||
"target": "reactAgentChat_0",
|
||||
"targetHandle": "reactAgentChat_0-input-model-BaseChatModel",
|
||||
"type": "buttonedge",
|
||||
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-mrklAgentChat_0-mrklAgentChat_0-input-model-BaseChatModel"
|
||||
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|Runnable-reactAgentChat_0-reactAgentChat_0-input-model-BaseChatModel"
|
||||
},
|
||||
{
|
||||
"source": "serper_0",
|
||||
"sourceHandle": "serper_0-output-serper-Serper|Tool|StructuredTool|Runnable",
|
||||
"target": "mrklAgentChat_0",
|
||||
"targetHandle": "mrklAgentChat_0-input-tools-Tool",
|
||||
"target": "reactAgentChat_0",
|
||||
"targetHandle": "reactAgentChat_0-input-tools-Tool",
|
||||
"type": "buttonedge",
|
||||
"id": "serper_0-serper_0-output-serper-Serper|Tool|StructuredTool|Runnable-mrklAgentChat_0-mrklAgentChat_0-input-tools-Tool"
|
||||
"id": "serper_0-serper_0-output-serper-Serper|Tool|StructuredTool|Runnable-reactAgentChat_0-reactAgentChat_0-input-tools-Tool"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ export const utilGetUploadsConfig = async (chatflowid: string): Promise<any> =>
|
|||
throw new InternalFlowiseError(StatusCodes.NOT_FOUND, `Chatflow ${chatflowid} not found`)
|
||||
}
|
||||
|
||||
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'mrklAgentChat', 'conversationalAgent']
|
||||
const uploadProcessingNodes = ['chatOpenAI', 'chatAnthropic', 'awsChatBedrock', 'azureChatOpenAI']
|
||||
const uploadAllowedNodes = ['llmChain', 'conversationChain', 'reactAgentChat', 'conversationalAgent', 'toolAgent']
|
||||
const uploadProcessingNodes = ['chatOpenAI', 'chatAnthropic', 'awsChatBedrock', 'azureChatOpenAI', 'chatGoogleGenerativeAI']
|
||||
|
||||
const flowObj = JSON.parse(chatflow.flowData)
|
||||
const imgUploadSizeAndTypes: IUploadFileSizeAndTypes[] = []
|
||||
|
|
|
|||
|
|
@ -1022,7 +1022,9 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod
|
|||
'chatOllama',
|
||||
'awsChatBedrock',
|
||||
'chatMistralAI',
|
||||
'groqChat'
|
||||
'groqChat',
|
||||
'chatCohere',
|
||||
'chatGoogleGenerativeAI'
|
||||
],
|
||||
LLMs: ['azureOpenAI', 'openAI', 'ollama']
|
||||
}
|
||||
|
|
@ -1050,10 +1052,22 @@ export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNod
|
|||
'csvAgent',
|
||||
'airtableAgent',
|
||||
'conversationalRetrievalAgent',
|
||||
'openAIToolAgent'
|
||||
'openAIToolAgent',
|
||||
'toolAgent'
|
||||
]
|
||||
isValidChainOrAgent = whitelistAgents.includes(endingNodeData.name)
|
||||
|
||||
// Anthropic & Groq Function Calling streaming is still not supported - https://docs.anthropic.com/claude/docs/tool-use
|
||||
const model = endingNodeData.inputs?.model
|
||||
if (endingNodeData.name.includes('toolAgent')) {
|
||||
if (typeof model === 'string' && (model.includes('chatAnthropic') || model.includes('groqChat'))) {
|
||||
return false
|
||||
} else if (typeof model === 'object' && 'id' in model && model['id'].includes('chatAnthropic')) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else if (endingNodeData.category === 'Engine') {
|
||||
// Engines that are available to stream
|
||||
const whitelistEngine = ['contextChatEngine', 'simpleChatEngine', 'queryEngine', 'subQuestionQueryEngine']
|
||||
isValidChainOrAgent = whitelistEngine.includes(endingNodeData.name)
|
||||
}
|
||||
|
|
|
|||
632
pnpm-lock.yaml
632
pnpm-lock.yaml
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue