Feature/seq agents (#2798)

* update build functions

* sequential agents

* update langchain to 0.2, added sequential agent nodes

* add marketplace templates

* update howto wordings

* Merge branch 'main' into feature/Seq-Agents

# Conflicts:
#	pnpm-lock.yaml

* update deprecated functions and add new sequential nodes

* add marketplace templates

* update marketplace templates, add structured output to llm node

* add multi agents template

* update llm node with bindmodels

* update cypress version

* update templates sticky note wordings

* update tool node to include human in loop action

* update structured outputs error from models

* update cohere package to resolve google genai pipeThrough bug

* update mistral package version, added message reconstruction before invoke seq agent

* add HITL to agent

* update state messages restructuring

* update load and split methods for s3 directory
This commit is contained in:
Henry Heng 2024-07-22 17:46:14 +01:00 committed by GitHub
parent 34d0e4302c
commit bca4de0c63
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
152 changed files with 55307 additions and 35236 deletions

View File

@ -32,7 +32,7 @@
"@babel/preset-env": "^7.19.4",
"@babel/preset-typescript": "7.18.6",
"@types/express": "^4.17.13",
"@typescript-eslint/typescript-estree": "^5.39.0",
"@typescript-eslint/typescript-estree": "^7.13.1",
"eslint": "^8.24.0",
"eslint-config-prettier": "^8.3.0",
"eslint-config-react-app": "^7.0.1",
@ -50,7 +50,7 @@
"rimraf": "^3.0.2",
"run-script-os": "^1.1.6",
"turbo": "1.10.16",
"typescript": "^4.8.4"
"typescript": "^5.4.5"
},
"pnpm": {
"onlyBuiltDependencies": [
@ -63,8 +63,8 @@
"pnpm": ">=9"
},
"resolutions": {
"@qdrant/openapi-typescript-fetch": "1.2.1",
"@google/generative-ai": "^0.7.0",
"@qdrant/openapi-typescript-fetch": "1.2.6",
"@google/generative-ai": "^0.15.0",
"openai": "4.51.0"
},
"eslintIgnore": [

View File

@ -1,4 +1,3 @@
import { ChatOpenAI } from '@langchain/openai'
import { APIChain, createOpenAPIChain } from 'langchain/chains'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
@ -6,6 +5,7 @@ import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from
import { checkInputs, Moderation, streamResponse } from '../../moderation/Moderation'
import { formatResponse } from '../../outputparsers/OutputParserHelpers'
import { getFileFromStorage } from '../../../src'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
class OpenApiChain_Chains implements INode {
label: string
@ -29,9 +29,9 @@ class OpenApiChain_Chains implements INode {
this.baseClasses = [this.type, ...getBaseClasses(APIChain)]
this.inputs = [
{
label: 'ChatOpenAI Model',
label: 'Chat Model',
name: 'model',
type: 'ChatOpenAI'
type: 'BaseChatModel'
},
{
label: 'YAML Link',
@ -96,7 +96,7 @@ class OpenApiChain_Chains implements INode {
}
const initChain = async (nodeData: INodeData, options: ICommonObject) => {
const model = nodeData.inputs?.model as ChatOpenAI
const model = nodeData.inputs?.model as BaseChatModel
const headers = nodeData.inputs?.headers as string
const yamlLink = nodeData.inputs?.yamlLink as string
const yamlFileBase64 = nodeData.inputs?.yamlFile as string

View File

@ -1,10 +1,9 @@
import { BaseCache } from '@langchain/core/caches'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { BedrockChat } from './FlowiseAWSChatBedrock'
import { getModels, getRegions, MODEL_TYPE } from '../../../src/modelLoader'
import { BedrockChatFields } from '@langchain/community/chat_models/bedrock'
/**
* @author Michael Connor <mlconnor@yahoo.com>
@ -116,7 +115,7 @@ class AWSChatBedrock_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const streaming = nodeData.inputs?.streaming as boolean
const obj: BaseBedrockInput & BaseChatModelParams = {
const obj: BedrockChatFields = {
region: iRegion,
model: customModel ? customModel : iModel,
maxTokens: parseInt(iMax_tokens_to_sample, 10),
@ -154,7 +153,7 @@ class AWSChatBedrock_ChatModels implements INode {
}
const amazonBedrock = new BedrockChat(nodeData.id, obj)
if (obj.model.includes('anthropic.claude-3')) amazonBedrock.setMultiModalOption(multiModalOption)
if (obj.model?.includes('anthropic.claude-3')) amazonBedrock.setMultiModalOption(multiModalOption)
return amazonBedrock
}
}

View File

@ -1,6 +1,4 @@
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { BedrockChat as LCBedrockChat } from '@langchain/community/chat_models/bedrock'
import { BaseBedrockInput } from '@langchain/community/dist/utils/bedrock'
import { BedrockChatFields, BedrockChat as LCBedrockChat } from '@langchain/community/chat_models/bedrock'
import { IVisionChatModal, IMultiModalOption } from '../../../src'
export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
@ -9,7 +7,7 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
multiModalOption: IMultiModalOption
id: string
constructor(id: string, fields: BaseBedrockInput & BaseChatModelParams) {
constructor(id: string, fields: BedrockChatFields) {
super(fields)
this.id = id
this.configuredModel = fields?.model || ''
@ -17,8 +15,8 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
}
revertToOriginalModel(): void {
super.model = this.configuredModel
super.maxTokens = this.configuredMaxToken
this.model = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
@ -27,8 +25,8 @@ export class BedrockChat extends LCBedrockChat implements IVisionChatModal {
setVisionModel(): void {
if (!this.model.startsWith('claude-3')) {
super.model = 'anthropic.claude-3-haiku-20240307-v1:0'
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
this.model = 'anthropic.claude-3-haiku-20240307-v1:0'
this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
}
}
}

View File

@ -1,57 +0,0 @@
import { BaseCache } from '@langchain/core/caches'
import { NIBittensorChatModel, BittensorInput } from 'langchain/experimental/chat_models/bittensor'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
class Bittensor_ChatModels implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
inputs: INodeParams[]
constructor() {
this.label = 'NIBittensorChat'
this.name = 'NIBittensorChatModel'
this.version = 2.0
this.type = 'BittensorChat'
this.icon = 'NIBittensor.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Bittensor subnet 1 large language models'
this.baseClasses = [this.type, ...getBaseClasses(NIBittensorChatModel)]
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'System prompt',
name: 'system_prompt',
type: 'string',
additionalParams: true,
optional: true
}
]
}
async init(nodeData: INodeData, _: string): Promise<any> {
const system_prompt = nodeData.inputs?.system_prompt as string
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<BittensorInput> = {
systemPrompt: system_prompt
}
if (cache) obj.cache = cache
const model = new NIBittensorChatModel(obj)
return model
}
}
module.exports = { nodeClass: Bittensor_ChatModels }

View File

@ -1 +0,0 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M18.64 25.698V29H8l1.61-6.25a9.81 9.81 0 0 0-.045-4.5C9.027 15.808 8 15.394 8 10.824c.01-2.35.916-4.601 2.517-6.256C12.12 2.913 14.285 1.989 16.54 2c2.254.01 4.412.955 5.999 2.625 1.587 1.67 2.472 3.93 2.462 6.28V12l2 4h-2v4.208a3.821 3.821 0 0 1-1.08 2.373 3.531 3.531 0 0 1-2.306 1.054c-.165.01-.375.004-.606-.012-1.242-.085-2.367.83-2.367 2.075Z" fill="#000" stroke="#000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/><path d="M21 13h-2l-1-2m3-1-1-2h-4m-3 1 2 4m-1 6 3-3h4" stroke="#fff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>

Before

Width:  |  Height:  |  Size: 666 B

View File

@ -16,8 +16,8 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
}
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxTokens = this.configuredMaxToken
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
@ -26,8 +26,8 @@ export class ChatAnthropic extends LangchainChatAnthropic implements IVisionChat
setVisionModel(): void {
if (!this.modelName.startsWith('claude-3')) {
super.modelName = 'claude-3-haiku-20240307'
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 2048
this.modelName = 'claude-3-haiku-20240307'
this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 2048
}
}
}

View File

@ -1,8 +1,8 @@
import { BaseMessage, AIMessage, AIMessageChunk, isBaseMessage, ChatMessage, MessageContent } from '@langchain/core/messages'
import { BaseMessage, AIMessage, AIMessageChunk, isBaseMessage, ChatMessage, MessageContentComplex } from '@langchain/core/messages'
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
import { BaseChatModel, type BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ChatGeneration, ChatGenerationChunk, ChatResult } from '@langchain/core/outputs'
import { ToolCall } from '@langchain/core/messages/tool'
import { ToolCallChunk } from '@langchain/core/messages/tool'
import { NewTokenIndices } from '@langchain/core/callbacks/base'
import {
EnhancedGenerateContentResponse,
@ -12,11 +12,19 @@ import {
GenerativeModel,
GoogleGenerativeAI as GenerativeAI
} from '@google/generative-ai'
import type { SafetySetting } from '@google/generative-ai'
import type {
FunctionCallPart,
FunctionResponsePart,
SafetySetting,
UsageMetadata,
FunctionDeclarationsTool as GoogleGenerativeAIFunctionDeclarationsTool,
GenerateContentRequest
} from '@google/generative-ai'
import { ICommonObject, IMultiModalOption, IVisionChatModal } from '../../../src'
import { StructuredToolInterface } from '@langchain/core/tools'
import { isStructuredTool } from '@langchain/core/utils/function_calling'
import { zodToJsonSchema } from 'zod-to-json-schema'
import { BaseLanguageModelCallOptions } from '@langchain/core/language_models/base'
interface TokenUsage {
completionTokens?: number
@ -24,7 +32,17 @@ interface TokenUsage {
totalTokens?: number
}
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
interface GoogleGenerativeAIChatCallOptions extends BaseLanguageModelCallOptions {
tools?: StructuredToolInterface[] | GoogleGenerativeAIFunctionDeclarationsTool[]
/**
* Whether or not to include usage data, like token counts
* in the streamed response chunks.
* @default true
*/
streamUsage?: boolean
}
export interface GoogleGenerativeAIChatInput extends BaseChatModelParams, Pick<GoogleGenerativeAIChatCallOptions, 'streamUsage'> {
modelName?: string
model?: string
temperature?: number
@ -34,10 +52,15 @@ export interface GoogleGenerativeAIChatInput extends BaseChatModelParams {
stopSequences?: string[]
safetySettings?: SafetySetting[]
apiKey?: string
apiVersion?: string
baseUrl?: string
streaming?: boolean
}
class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGenerativeAIChatInput {
class LangchainChatGoogleGenerativeAI
extends BaseChatModel<GoogleGenerativeAIChatCallOptions, AIMessageChunk>
implements GoogleGenerativeAIChatInput
{
modelName = 'gemini-pro'
temperature?: number
@ -56,6 +79,8 @@ class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGen
streaming = false
streamUsage = true
private client: GenerativeModel
get _isMultimodalModel() {
@ -114,6 +139,8 @@ class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGen
this.streaming = fields?.streaming ?? this.streaming
this.streamUsage = fields?.streamUsage ?? this.streamUsage
this.getClient()
}
@ -146,6 +173,18 @@ class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGen
return this.bind({ tools: convertToGeminiTools(tools), ...kwargs })
}
invocationParams(options?: this['ParsedCallOptions']): Omit<GenerateContentRequest, 'contents'> {
const tools = options?.tools as GoogleGenerativeAIFunctionDeclarationsTool[] | StructuredToolInterface[] | undefined
if (Array.isArray(tools) && !tools.some((t: any) => !('lc_namespace' in t))) {
return {
tools: convertToGeminiTools(options?.tools as StructuredToolInterface[]) as any
}
}
return {
tools: options?.tools as GoogleGenerativeAIFunctionDeclarationsTool[] | undefined
}
}
convertFunctionResponse(prompts: Content[]) {
for (let i = 0; i < prompts.length; i += 1) {
if (prompts[i].role === 'function') {
@ -178,7 +217,7 @@ class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGen
this.convertFunctionResponse(prompt)
if (tools.length > 0) {
this.getClient(tools)
this.getClient(tools as Tool[])
} else {
this.getClient()
}
@ -214,6 +253,7 @@ class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGen
const tokenUsage: TokenUsage = {}
const stream = this._streamResponseChunks(messages, options, runManager)
const finalChunks: Record<number, ChatGenerationChunk> = {}
for await (const chunk of stream) {
const index = (chunk.generationInfo as NewTokenIndices)?.completion ?? 0
if (finalChunks[index] === undefined) {
@ -239,45 +279,62 @@ class LangchainChatGoogleGenerativeAI extends BaseChatModel implements GoogleGen
let prompt = convertBaseMessagesToContent(messages, this._isMultimodalModel)
prompt = checkIfEmptyContentAndSameRole(prompt)
//@ts-ignore
if (options.tools !== undefined && options.tools.length > 0) {
const result = await this._generateNonStreaming(prompt, options, runManager)
const generationMessage = result.generations[0].message as AIMessage
if (generationMessage === undefined) {
throw new Error('Could not parse Groq output.')
}
const toolCallChunks = generationMessage.tool_calls?.map((toolCall, i) => ({
name: toolCall.name,
args: JSON.stringify(toolCall.args),
id: toolCall.id,
index: i
}))
yield new ChatGenerationChunk({
message: new AIMessageChunk({
content: generationMessage.content,
additional_kwargs: generationMessage.additional_kwargs,
tool_call_chunks: toolCallChunks
}),
text: generationMessage.tool_calls?.length ? '' : (generationMessage.content as string)
})
const parameters = this.invocationParams(options)
const request = {
...parameters,
contents: prompt
}
const tools = options.tools ?? []
if (tools.length > 0) {
this.getClient(tools as Tool[])
} else {
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
this.getClient()
const { stream } = await this.client.generateContentStream({
contents: prompt
})
return stream
})
this.getClient()
}
for await (const response of stream) {
const chunk = convertResponseContentToChatGenerationChunk(response)
if (!chunk) {
continue
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () => {
const { stream } = await this.client.generateContentStream(request)
return stream
})
let usageMetadata: UsageMetadata | ICommonObject | undefined
let index = 0
for await (const response of stream) {
if ('usageMetadata' in response && this.streamUsage !== false && options.streamUsage !== false) {
const genAIUsageMetadata = response.usageMetadata as {
promptTokenCount: number
candidatesTokenCount: number
totalTokenCount: number
}
if (!usageMetadata) {
usageMetadata = {
input_tokens: genAIUsageMetadata.promptTokenCount,
output_tokens: genAIUsageMetadata.candidatesTokenCount,
total_tokens: genAIUsageMetadata.totalTokenCount
}
} else {
// Under the hood, LangChain combines the prompt tokens. Google returns the updated
// total each time, so we need to find the difference between the tokens.
const outputTokenDiff = genAIUsageMetadata.candidatesTokenCount - (usageMetadata as ICommonObject).output_tokens
usageMetadata = {
input_tokens: 0,
output_tokens: outputTokenDiff,
total_tokens: outputTokenDiff
}
}
yield chunk
await runManager?.handleLLMNewToken(chunk.text ?? '')
}
const chunk = convertResponseContentToChatGenerationChunk(response, {
usageMetadata: usageMetadata as UsageMetadata,
index
})
index += 1
if (!chunk) {
continue
}
yield chunk
await runManager?.handleLLMNewToken(chunk.text ?? '')
}
}
}
@ -296,8 +353,8 @@ export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI impl
}
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxOutputTokens = this.configuredMaxToken
this.modelName = this.configuredModel
this.maxOutputTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
@ -306,12 +363,25 @@ export class ChatGoogleGenerativeAI extends LangchainChatGoogleGenerativeAI impl
setVisionModel(): void {
if (this.modelName !== 'gemini-pro-vision' && this.modelName !== 'gemini-1.5-pro-latest') {
super.modelName = 'gemini-1.5-pro-latest'
super.maxOutputTokens = this.configuredMaxToken ? this.configuredMaxToken : 8192
this.modelName = 'gemini-1.5-pro-latest'
this.maxOutputTokens = this.configuredMaxToken ? this.configuredMaxToken : 8192
}
}
}
function messageContentMedia(content: MessageContentComplex): Part {
if ('mimeType' in content && 'data' in content) {
return {
inlineData: {
mimeType: content.mimeType,
data: content.data
}
}
}
throw new Error('Invalid media content')
}
function getMessageAuthor(message: BaseMessage) {
const type = message._getType()
if (ChatMessage.isInstance(message)) {
@ -336,69 +406,88 @@ function convertAuthorToRole(author: string) {
case 'tool':
return 'function'
default:
// Instead of throwing, we return model
// Instead of throwing, we return model (Needed for Multi Agent)
// throw new Error(`Unknown / unsupported author: ${author}`)
return 'model'
}
}
function convertMessageContentToParts(content: MessageContent, isMultimodalModel: boolean): Part[] {
if (typeof content === 'string') {
return [{ text: content }]
function convertMessageContentToParts(message: BaseMessage, isMultimodalModel: boolean): Part[] {
if (typeof message.content === 'string' && message.content !== '') {
return [{ text: message.content }]
}
return content.map((c) => {
if (c.type === 'text') {
return {
text: c.text
}
}
let functionCalls: FunctionCallPart[] = []
let functionResponses: FunctionResponsePart[] = []
let messageParts: Part[] = []
if (c.type === 'tool_use') {
return {
functionCall: c.functionCall
if ('tool_calls' in message && Array.isArray(message.tool_calls) && message.tool_calls.length > 0) {
functionCalls = message.tool_calls.map((tc) => ({
functionCall: {
name: tc.name,
args: tc.args
}
}
/*if (c.type === "tool_use" || c.type === "tool_result") {
// TODO: Fix when SDK types are fixed
return {
...contentPart,
// eslint-disable-next-line @typescript-eslint/no-explicit-any
} as any;
}*/
if (c.type === 'image_url') {
if (!isMultimodalModel) {
throw new Error(`This model does not support images`)
}
let source
if (typeof c.image_url === 'string') {
source = c.image_url
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
source = c.image_url.url
} else {
throw new Error('Please provide image as base64 encoded data URL')
}
const [dm, data] = source.split(',')
if (!dm.startsWith('data:')) {
throw new Error('Please provide image as base64 encoded data URL')
}
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
if (encoding !== 'base64') {
throw new Error('Please provide image as base64 encoded data URL')
}
return {
inlineData: {
data,
mimeType
}))
} else if (message._getType() === 'tool' && message.name && message.content) {
functionResponses = [
{
functionResponse: {
name: message.name,
response: message.content
}
}
}
throw new Error(`Unknown content type ${(c as { type: string }).type}`)
})
]
} else if (Array.isArray(message.content)) {
messageParts = message.content.map((c) => {
if (c.type === 'text') {
return {
text: c.text
}
}
if (c.type === 'image_url') {
if (!isMultimodalModel) {
throw new Error(`This model does not support images`)
}
let source
if (typeof c.image_url === 'string') {
source = c.image_url
} else if (typeof c.image_url === 'object' && 'url' in c.image_url) {
source = c.image_url.url
} else {
throw new Error('Please provide image as base64 encoded data URL')
}
const [dm, data] = source.split(',')
if (!dm.startsWith('data:')) {
throw new Error('Please provide image as base64 encoded data URL')
}
const [mimeType, encoding] = dm.replace(/^data:/, '').split(';')
if (encoding !== 'base64') {
throw new Error('Please provide image as base64 encoded data URL')
}
return {
inlineData: {
data,
mimeType
}
}
} else if (c.type === 'media') {
return messageContentMedia(c)
} else if (c.type === 'tool_use') {
return {
functionCall: {
name: c.name,
args: c.input
}
}
}
throw new Error(`Unknown content type ${(c as { type: string }).type}`)
})
}
return [...messageParts, ...functionCalls, ...functionResponses]
}
/*
@ -440,7 +529,7 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel
throw new Error('Google Generative AI requires alternate messages between authors')
}
const parts = convertMessageContentToParts(message.content, isMultimodalModel)
const parts = convertMessageContentToParts(message, isMultimodalModel)
if (acc.mergeWithPreviousContent) {
const prevContent = acc.content[acc.content.length - 1]
@ -454,8 +543,13 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel
content: acc.content
}
}
let actualRole = role
if (actualRole === 'function') {
// GenerativeAI API will throw an error if the role is not "user" or "model."
actualRole = 'user'
}
const content: Content = {
role,
role: actualRole,
parts
}
return {
@ -467,80 +561,80 @@ function convertBaseMessagesToContent(messages: BaseMessage[], isMultimodalModel
).content
}
function mapGenerateContentResultToChatResult(response: EnhancedGenerateContentResponse): ChatResult {
function mapGenerateContentResultToChatResult(
response: EnhancedGenerateContentResponse,
extra?: {
usageMetadata: UsageMetadata | undefined
}
): ChatResult {
// if rejected or error, return empty generations with reason in filters
if (!response.candidates || response.candidates.length === 0 || !response.candidates[0]) {
return {
generations: [],
llmOutput: {
filters: response?.promptFeedback
filters: response.promptFeedback
}
}
}
const [candidate] = response.candidates
const { content, ...generationInfo } = candidate
const text = content.parts.map(({ text }) => text).join('')
if (content.parts.some((part) => part.functionCall)) {
const toolCalls: ToolCall[] = []
for (const fcPart of content.parts) {
const fc = fcPart.functionCall
if (fc) {
const { name, args } = fc
toolCalls.push({ name, args })
}
}
const functionCalls = toolCalls.map((tool) => {
return { functionCall: { name: tool.name, args: tool.args }, type: 'tool_use' }
})
const generation: ChatGeneration = {
text,
message: new AIMessage({
content: functionCalls,
name: !content ? undefined : content.role,
additional_kwargs: generationInfo,
tool_calls: toolCalls
}),
generationInfo
}
return {
generations: [generation]
}
} else {
const generation: ChatGeneration = {
text,
message: new AIMessage({
content: text,
name: !content ? undefined : content.role,
additional_kwargs: generationInfo
}),
generationInfo
}
return {
generations: [generation]
}
}
}
function convertResponseContentToChatGenerationChunk(response: EnhancedGenerateContentResponse): ChatGenerationChunk | null {
if (!response.candidates || response.candidates.length === 0) {
return null
}
const functionCalls = response.functionCalls()
const [candidate] = response.candidates
const { content, ...generationInfo } = candidate
const text = content?.parts[0]?.text ?? ''
const generation: ChatGeneration = {
text,
message: new AIMessage({
content: text,
tool_calls: functionCalls,
additional_kwargs: {
...generationInfo
},
usage_metadata: extra?.usageMetadata as any
}),
generationInfo
}
return {
generations: [generation]
}
}
function convertResponseContentToChatGenerationChunk(
response: EnhancedGenerateContentResponse,
extra: {
usageMetadata?: UsageMetadata | undefined
index: number
}
): ChatGenerationChunk | null {
if (!response.candidates || response.candidates.length === 0) {
return null
}
const functionCalls = response.functionCalls()
const [candidate] = response.candidates
const { content, ...generationInfo } = candidate
const text = content?.parts[0]?.text ?? ''
const toolCallChunks: ToolCallChunk[] = []
if (functionCalls) {
toolCallChunks.push(
...functionCalls.map((fc) => ({
...fc,
args: JSON.stringify(fc.args),
index: extra.index
}))
)
}
return new ChatGenerationChunk({
text,
message: new AIMessageChunk({
content: text,
name: !content ? undefined : content.role,
tool_call_chunks: toolCallChunks,
// Each chunk can have unique "generationInfo", and merging strategy is unclear,
// so leave blank for now.
additional_kwargs: {}
additional_kwargs: {},
usage_metadata: extra.usageMetadata as any
}),
generationInfo
})

View File

@ -1,8 +1,9 @@
import { ChatOllama, ChatOllamaInput } from '@langchain/community/chat_models/ollama'
import { ChatOllama } from '@langchain/community/chat_models/ollama'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { OllamaInput } from '@langchain/community/llms/ollama'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
class ChatOllama_ChatModels implements INode {
label: string
@ -208,7 +209,7 @@ class ChatOllama_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const obj: ChatOllamaInput & BaseLLMParams = {
const obj: OllamaInput & BaseChatModelParams = {
baseUrl,
temperature: parseFloat(temperature),
model: modelName

View File

@ -439,6 +439,7 @@ class OllamaFunctions extends BaseChatModel<ChatOllamaFunctionsCallOptions> {
}
}
//@ts-ignore
override bindTools(
tools: StructuredToolInterface[],
kwargs?: Partial<ICommonObject>

View File

@ -24,8 +24,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
}
revertToOriginalModel(): void {
super.modelName = this.configuredModel
super.maxTokens = this.configuredMaxToken
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
@ -34,8 +34,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
setVisionModel(): void {
if (this.modelName !== 'gpt-4-turbo' && !this.modelName.includes('vision')) {
super.modelName = 'gpt-4-turbo'
super.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
this.modelName = 'gpt-4-turbo'
this.maxTokens = this.configuredMaxToken ? this.configuredMaxToken : 1024
}
}
}

View File

@ -123,7 +123,8 @@ class API_DocumentLoaders implements INode {
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -153,7 +153,8 @@ class Airtable_DocumentLoaders implements INode {
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -2,7 +2,7 @@ import { omit } from 'lodash'
import { INode, INodeData, INodeParams, ICommonObject } from '../../../src/Interface'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
import { TextSplitter } from 'langchain/text_splitter'
import { ApifyDatasetLoader } from 'langchain/document_loaders/web/apify_dataset'
import { ApifyDatasetLoader } from '@langchain/community/document_loaders/web/apify_dataset'
import { Document } from '@langchain/core/documents'
class ApifyWebsiteContentCrawler_DocumentLoaders implements INode {
@ -165,7 +165,8 @@ class ApifyWebsiteContentCrawler_DocumentLoaders implements INode {
let docs = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,6 +1,6 @@
import { TextSplitter } from 'langchain/text_splitter'
import { omit } from 'lodash'
import { CheerioWebBaseLoader, WebBaseLoaderParams } from 'langchain/document_loaders/web/cheerio'
import { CheerioWebBaseLoader, WebBaseLoaderParams } from '@langchain/community/document_loaders/web/cheerio'
import { test } from 'linkifyjs'
import { parse } from 'css-what'
import { webCrawl, xmlScrape } from '../../../src'
@ -138,7 +138,8 @@ class Cheerio_DocumentLoaders implements INode {
}
const loader = new CheerioWebBaseLoader(url, params)
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { ConfluencePagesLoader, ConfluencePagesLoaderParams } from 'langchain/document_loaders/web/confluence'
import { ConfluencePagesLoader, ConfluencePagesLoaderParams } from '@langchain/community/document_loaders/web/confluence'
import { getCredentialData, getCredentialParam } from '../../../src'
class Confluence_DocumentLoaders implements INode {
@ -119,7 +119,8 @@ class Confluence_DocumentLoaders implements INode {
let docs = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { CSVLoader } from 'langchain/document_loaders/fs/csv'
import { CSVLoader } from '@langchain/community/document_loaders/fs/csv'
import { getFileFromStorage, handleEscapeCharacters } from '../../../src'
class Csv_DocumentLoaders implements INode {
@ -113,7 +113,8 @@ class Csv_DocumentLoaders implements INode {
const loader = new CSVLoader(blob, columnName.trim().length === 0 ? undefined : columnName.trim())
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs.push(...(await loader.load()))
}
@ -133,7 +134,8 @@ class Csv_DocumentLoaders implements INode {
const loader = new CSVLoader(blob, columnName.trim().length === 0 ? undefined : columnName.trim())
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs.push(...(await loader.load()))
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { DocxLoader } from 'langchain/document_loaders/fs/docx'
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'
import { getFileFromStorage } from '../../../src'
class Docx_DocumentLoaders implements INode {
@ -88,7 +88,9 @@ class Docx_DocumentLoaders implements INode {
const loader = new DocxLoader(blob)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}
@ -108,7 +110,9 @@ class Docx_DocumentLoaders implements INode {
const loader = new DocxLoader(blob)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { getCredentialData, getCredentialParam } from '../../../src'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { FigmaFileLoader, FigmaLoaderParams } from 'langchain/document_loaders/web/figma'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { FigmaFileLoader, FigmaLoaderParams } from '@langchain/community/document_loaders/web/figma'
import { TextSplitter } from 'langchain/text_splitter'
class Figma_DocumentLoaders implements INode {
@ -105,7 +105,14 @@ class Figma_DocumentLoaders implements INode {
const loader = new FigmaFileLoader(figmaOptions)
let docs = textSplitter ? await loader.loadAndSplit() : await loader.load()
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)

View File

@ -4,9 +4,9 @@ import { TextSplitter } from 'langchain/text_splitter'
import { TextLoader } from 'langchain/document_loaders/fs/text'
import { DirectoryLoader } from 'langchain/document_loaders/fs/directory'
import { JSONLoader } from 'langchain/document_loaders/fs/json'
import { CSVLoader } from 'langchain/document_loaders/fs/csv'
import { PDFLoader } from 'langchain/document_loaders/fs/pdf'
import { DocxLoader } from 'langchain/document_loaders/fs/docx'
import { CSVLoader } from '@langchain/community/document_loaders/fs/csv'
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf'
import { DocxLoader } from '@langchain/community/document_loaders/fs/docx'
class Folder_DocumentLoaders implements INode {
label: string
@ -153,7 +153,8 @@ class Folder_DocumentLoaders implements INode {
let docs = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { GitbookLoader } from 'langchain/document_loaders/web/gitbook'
import { GitbookLoader } from '@langchain/community/document_loaders/web/gitbook'
class Gitbook_DocumentLoaders implements INode {
label: string
@ -79,7 +79,14 @@ class Gitbook_DocumentLoaders implements INode {
const loader = shouldLoadAllPaths ? new GitbookLoader(webPath, { shouldLoadAllPaths }) : new GitbookLoader(webPath)
let docs = textSplitter ? await loader.loadAndSplit() : await loader.load()
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { GithubRepoLoader, GithubRepoLoaderParams } from 'langchain/document_loaders/web/github'
import { GithubRepoLoader, GithubRepoLoaderParams } from '@langchain/community/document_loaders/web/github'
import { getCredentialData, getCredentialParam } from '../../../src'
class Github_DocumentLoaders implements INode {
@ -139,7 +139,15 @@ class Github_DocumentLoaders implements INode {
if (ignorePath) githubOptions.ignorePaths = JSON.parse(ignorePath)
const loader = new GithubRepoLoader(repoLink, githubOptions)
let docs = textSplitter ? await loader.loadAndSplit(textSplitter) : await loader.load()
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)

View File

@ -104,7 +104,9 @@ class Json_DocumentLoaders implements INode {
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}
@ -124,7 +126,9 @@ class Json_DocumentLoaders implements INode {
const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}

View File

@ -98,7 +98,9 @@ class Jsonlines_DocumentLoaders implements INode {
const loader = new JSONLinesLoader(blob, pointer)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}
@ -118,7 +120,9 @@ class Jsonlines_DocumentLoaders implements INode {
const loader = new JSONLinesLoader(blob, pointer)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { NotionAPILoader, NotionAPILoaderOptions } from 'langchain/document_loaders/web/notionapi'
import { NotionAPILoader, NotionAPILoaderOptions } from '@langchain/community/document_loaders/web/notionapi'
import { getCredentialData, getCredentialParam } from '../../../src'
class NotionDB_DocumentLoaders implements INode {
@ -95,7 +95,8 @@ class NotionDB_DocumentLoaders implements INode {
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { NotionLoader } from 'langchain/document_loaders/fs/notion'
import { NotionLoader } from '@langchain/community/document_loaders/fs/notion'
class NotionFolder_DocumentLoaders implements INode {
label: string
@ -74,7 +74,8 @@ class NotionFolder_DocumentLoaders implements INode {
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { NotionAPILoader, NotionAPILoaderOptions } from 'langchain/document_loaders/web/notionapi'
import { NotionAPILoader, NotionAPILoaderOptions } from '@langchain/community/document_loaders/web/notionapi'
import { getCredentialData, getCredentialParam } from '../../../src'
class NotionPage_DocumentLoaders implements INode {
@ -92,7 +92,8 @@ class NotionPage_DocumentLoaders implements INode {
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { IDocument, ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { PDFLoader } from 'langchain/document_loaders/fs/pdf'
import { PDFLoader } from '@langchain/community/document_loaders/fs/pdf'
import { getFileFromStorage } from '../../../src'
class Pdf_DocumentLoaders implements INode {
@ -172,7 +172,9 @@ class Pdf_DocumentLoaders implements INode {
legacyBuild ? import('pdfjs-dist/legacy/build/pdf.js') : import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js')
})
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}
@ -183,7 +185,9 @@ class Pdf_DocumentLoaders implements INode {
legacyBuild ? import('pdfjs-dist/legacy/build/pdf.js') : import('pdf-parse/lib/pdf.js/v1.10.100/build/pdf.js')
})
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}

View File

@ -1,7 +1,12 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { Browser, Page, PlaywrightWebBaseLoader, PlaywrightWebBaseLoaderOptions } from 'langchain/document_loaders/web/playwright'
import {
Browser,
Page,
PlaywrightWebBaseLoader,
PlaywrightWebBaseLoaderOptions
} from '@langchain/community/document_loaders/web/playwright'
import { test } from 'linkifyjs'
import { webCrawl, xmlScrape } from '../../../src'
@ -174,7 +179,8 @@ class Playwright_DocumentLoaders implements INode {
}
const loader = new PlaywrightWebBaseLoader(url, config)
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { Browser, Page, PuppeteerWebBaseLoader, PuppeteerWebBaseLoaderOptions } from 'langchain/document_loaders/web/puppeteer'
import { Browser, Page, PuppeteerWebBaseLoader, PuppeteerWebBaseLoaderOptions } from '@langchain/community/document_loaders/web/puppeteer'
import { test } from 'linkifyjs'
import { webCrawl, xmlScrape } from '../../../src'
import { PuppeteerLifeCycleEvent } from 'puppeteer'
@ -175,7 +175,8 @@ class Puppeteer_DocumentLoaders implements INode {
}
const loader = new PuppeteerWebBaseLoader(url, config)
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}

View File

@ -271,7 +271,9 @@ class S3_DocumentLoaders implements INode {
let docs = []
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs = await loader.load()
}

View File

@ -1,13 +1,13 @@
import { omit } from 'lodash'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { S3Loader } from 'langchain/document_loaders/web/s3'
import { S3Loader } from '@langchain/community/document_loaders/web/s3'
import {
UnstructuredLoader,
UnstructuredLoaderOptions,
UnstructuredLoaderStrategy,
SkipInferTableTypes,
HiResModelName
} from 'langchain/document_loaders/fs/unstructured'
} from '@langchain/community/document_loaders/fs/unstructured'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
import { S3Client, GetObjectCommand, S3ClientConfig } from '@aws-sdk/client-s3'
import { getRegions, MODEL_TYPE } from '../../../src/modelLoader'

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { SearchApiLoader } from 'langchain/document_loaders/web/searchapi'
import { SearchApiLoader } from '@langchain/community/document_loaders/web/searchapi'
import { getCredentialData, getCredentialParam } from '../../../src'
// Provides access to multiple search engines using the SearchApi.
@ -106,7 +106,13 @@ class SearchAPI_DocumentLoaders implements INode {
const loader = new SearchApiLoader(loaderConfig)
// Fetch documents, split if a text splitter is provided
let docs = textSplitter ? await loader.loadAndSplit() : await loader.load()
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)

View File

@ -1,7 +1,7 @@
import { omit } from 'lodash'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface'
import { TextSplitter } from 'langchain/text_splitter'
import { SerpAPILoader } from 'langchain/document_loaders/web/serpapi'
import { SerpAPILoader } from '@langchain/community/document_loaders/web/serpapi'
import { getCredentialData, getCredentialParam } from '../../../src'
class SerpAPI_DocumentLoaders implements INode {
@ -80,7 +80,14 @@ class SerpAPI_DocumentLoaders implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const serpApiKey = getCredentialParam('serpApiKey', credentialData, nodeData)
const loader = new SerpAPILoader({ q: query, apiKey: serpApiKey })
let docs = textSplitter ? await loader.loadAndSplit() : await loader.load()
let docs: IDocument[] = []
if (textSplitter) {
docs = await loader.load()
docs = await textSplitter.splitDocuments(docs)
} else {
docs = await loader.load()
}
if (metadata) {
const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata)

View File

@ -106,7 +106,9 @@ class Text_DocumentLoaders implements INode {
const loader = new TextLoader(blob)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}
@ -126,7 +128,9 @@ class Text_DocumentLoaders implements INode {
const loader = new TextLoader(blob)
if (textSplitter) {
docs.push(...(await loader.loadAndSplit(textSplitter)))
let splittedDocs = await loader.load()
splittedDocs = await textSplitter.splitDocuments(splittedDocs)
docs.push(...splittedDocs)
} else {
docs.push(...(await loader.load()))
}

View File

@ -3,7 +3,7 @@ import {
SkipInferTableTypes,
UnstructuredLoaderOptions,
UnstructuredLoaderStrategy
} from 'langchain/document_loaders/fs/unstructured'
} from '@langchain/community/document_loaders/fs/unstructured'
import { BaseDocumentLoader } from 'langchain/document_loaders/base'
import { StringWithAutocomplete } from 'langchain/dist/util/types'
import { Document } from '@langchain/core/documents'

View File

@ -6,7 +6,7 @@ import {
SkipInferTableTypes,
HiResModelName,
UnstructuredLoader as LCUnstructuredLoader
} from 'langchain/document_loaders/fs/unstructured'
} from '@langchain/community/document_loaders/fs/unstructured'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
import { getFileFromStorage } from '../../../src'
import { UnstructuredLoader } from './Unstructured'

View File

@ -6,7 +6,7 @@ import {
UnstructuredLoaderStrategy,
SkipInferTableTypes,
HiResModelName
} from 'langchain/document_loaders/fs/unstructured'
} from '@langchain/community/document_loaders/fs/unstructured'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
class UnstructuredFolder_DocumentLoaders implements INode {

View File

@ -1,5 +1,5 @@
import { OllamaEmbeddings } from '@langchain/community/embeddings/ollama'
import { OllamaInput } from 'langchain/llms/ollama'
import { OllamaInput } from '@langchain/community/llms/ollama'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'

View File

@ -55,9 +55,7 @@ class TogetherAIEmbedding_Embeddings implements INode {
const obj: Partial<TogetherAIEmbeddingsParams> = {
modelName: modelName,
apiKey: togetherAIApiKey,
//@ts-ignore
model: modelName,
togetherAIApiKey: togetherAIApiKey
model: modelName
}
const model = new TogetherAIEmbeddings(obj)

View File

@ -1,7 +1,7 @@
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { VoyageEmbeddings, VoyageEmbeddingsParams } from 'langchain/embeddings/voyage'
import { VoyageEmbeddings, VoyageEmbeddingsParams } from '@langchain/community/embeddings/voyage'
class VoyageAIEmbedding_Embeddings implements INode {
label: string

View File

@ -1,68 +0,0 @@
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { NIBittensorLLM, BittensorInput } from 'langchain/experimental/llms/bittensor'
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
class Bittensor_LLMs implements INode {
label: string
name: string
version: number
type: string
icon: string
category: string
description: string
baseClasses: string[]
inputs: INodeParams[]
constructor() {
this.label = 'NIBittensorLLM'
this.name = 'NIBittensorLLM'
this.version = 2.0
this.type = 'Bittensor'
this.icon = 'NIBittensor.svg'
this.category = 'LLMs'
this.description = 'Wrapper around Bittensor subnet 1 large language models'
this.baseClasses = [this.type, ...getBaseClasses(NIBittensorLLM)]
this.inputs = [
{
label: 'Cache',
name: 'cache',
type: 'BaseCache',
optional: true
},
{
label: 'System prompt',
name: 'system_prompt',
type: 'string',
additionalParams: true,
optional: true
},
{
label: 'Top Responses',
name: 'topResponses',
type: 'number',
step: 1,
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, _: string): Promise<any> {
const system_prompt = nodeData.inputs?.system_prompt as string
const topResponses = Number(nodeData.inputs?.topResponses as number)
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<BittensorInput> & BaseLLMParams = {
systemPrompt: system_prompt,
topResponses: topResponses
}
if (cache) obj.cache = cache
const model = new NIBittensorLLM(obj)
return model
}
}
module.exports = { nodeClass: Bittensor_LLMs }

View File

@ -1 +0,0 @@
<svg width="32" height="32" fill="none" xmlns="http://www.w3.org/2000/svg"><path d="M18.64 25.698V29H8l1.61-6.25a9.81 9.81 0 0 0-.045-4.5C9.027 15.808 8 15.394 8 10.824c.01-2.35.916-4.601 2.517-6.256C12.12 2.913 14.285 1.989 16.54 2c2.254.01 4.412.955 5.999 2.625 1.587 1.67 2.472 3.93 2.462 6.28V12l2 4h-2v4.208a3.821 3.821 0 0 1-1.08 2.373 3.531 3.531 0 0 1-2.306 1.054c-.165.01-.375.004-.606-.012-1.242-.085-2.367.83-2.367 2.075Z" fill="#000" stroke="#000" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/><path d="M21 13h-2l-1-2m3-1-1-2h-4m-3 1 2 4m-1 6 3-3h4" stroke="#fff" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"/></svg>

Before

Width:  |  Height:  |  Size: 666 B

View File

@ -1,7 +1,7 @@
import { BaseCache } from '@langchain/core/caches'
import { Cohere, CohereInput } from '@langchain/cohere'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Cohere, CohereInput } from './core'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
class Cohere_LLMs implements INode {

View File

@ -1,78 +0,0 @@
import { LLM, BaseLLMParams } from '@langchain/core/language_models/llms'
export interface CohereInput extends BaseLLMParams {
/** Sampling temperature to use */
temperature?: number
/**
* Maximum number of tokens to generate in the completion.
*/
maxTokens?: number
/** Model to use */
model?: string
apiKey?: string
}
export class Cohere extends LLM implements CohereInput {
temperature = 0
maxTokens = 250
model: string
apiKey: string
constructor(fields?: CohereInput) {
super(fields ?? {})
const apiKey = fields?.apiKey ?? undefined
if (!apiKey) {
throw new Error('Please set the COHERE_API_KEY environment variable or pass it to the constructor as the apiKey field.')
}
this.apiKey = apiKey
this.maxTokens = fields?.maxTokens ?? this.maxTokens
this.temperature = fields?.temperature ?? this.temperature
this.model = fields?.model ?? this.model
}
_llmType() {
return 'cohere'
}
/** @ignore */
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
const { cohere } = await Cohere.imports()
cohere.init(this.apiKey)
// Hit the `generate` endpoint on the `large` model
const generateResponse = await this.caller.callWithOptions({ signal: options.signal }, cohere.generate.bind(cohere), {
prompt,
model: this.model,
max_tokens: this.maxTokens,
temperature: this.temperature,
end_sequences: options.stop
})
try {
return generateResponse.body.generations[0].text
} catch {
throw new Error('Could not parse response.')
}
}
/** @ignore */
static async imports(): Promise<{
cohere: typeof import('cohere-ai')
}> {
try {
const { default: cohere } = await import('cohere-ai')
return { cohere }
} catch (e) {
throw new Error('Please install cohere-ai as a dependency with, e.g. `pnpm install cohere-ai`')
}
}
}

View File

@ -1,8 +1,8 @@
import { Replicate, ReplicateInput } from '@langchain/community/llms/replicate'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { Replicate, ReplicateInput } from './core'
class Replicate_LLMs implements INode {
label: string
@ -97,7 +97,7 @@ class Replicate_LLMs implements INode {
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const modelName = nodeData.inputs?.model as string
const modelName = nodeData.inputs?.model as `${string}/${string}` | `${string}/${string}:${string}`
const temperature = nodeData.inputs?.temperature as string
const maxTokens = nodeData.inputs?.maxTokens as string
const topP = nodeData.inputs?.topP as string
@ -107,14 +107,10 @@ class Replicate_LLMs implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const apiKey = getCredentialParam('replicateApiKey', credentialData, nodeData)
const version = modelName.split(':').pop()
const name = modelName.split(':')[0].split('/').pop()
const org = modelName.split(':')[0].split('/')[0]
const cache = nodeData.inputs?.cache as BaseCache
const obj: ReplicateInput & BaseLLMParams = {
model: `${org}/${name}:${version}`,
model: modelName,
apiKey
}

View File

@ -0,0 +1,145 @@
import { LLM, type BaseLLMParams } from '@langchain/core/language_models/llms'
import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager'
import { GenerationChunk } from '@langchain/core/outputs'
import type ReplicateInstance from 'replicate'
export interface ReplicateInput {
model: `${string}/${string}` | `${string}/${string}:${string}`
input?: {
// different models accept different inputs
[key: string]: string | number | boolean
}
apiKey?: string
promptKey?: string
}
export class Replicate extends LLM implements ReplicateInput {
lc_serializable = true
model: ReplicateInput['model']
input: ReplicateInput['input']
apiKey: string
promptKey?: string
constructor(fields: ReplicateInput & BaseLLMParams) {
super(fields)
const apiKey = fields?.apiKey
if (!apiKey) {
throw new Error('Please set the REPLICATE_API_TOKEN')
}
this.apiKey = apiKey
this.model = fields.model
this.input = fields.input ?? {}
this.promptKey = fields.promptKey
}
_llmType() {
return 'replicate'
}
/** @ignore */
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
const replicate = await this._prepareReplicate()
const input = await this._getReplicateInput(replicate, prompt)
const output = await this.caller.callWithOptions({ signal: options.signal }, () =>
replicate.run(this.model, {
input
})
)
if (typeof output === 'string') {
return output
} else if (Array.isArray(output)) {
return output.join('')
} else {
// Note this is a little odd, but the output format is not consistent
// across models, so it makes some amount of sense.
return String(output)
}
}
async *_streamResponseChunks(
prompt: string,
options: this['ParsedCallOptions'],
runManager?: CallbackManagerForLLMRun
): AsyncGenerator<GenerationChunk> {
const replicate = await this._prepareReplicate()
const input = await this._getReplicateInput(replicate, prompt)
const stream = await this.caller.callWithOptions({ signal: options?.signal }, async () =>
replicate.stream(this.model, {
input
})
)
for await (const chunk of stream) {
if (chunk.event === 'output') {
yield new GenerationChunk({ text: chunk.data, generationInfo: chunk })
await runManager?.handleLLMNewToken(chunk.data ?? '')
}
// stream is done
if (chunk.event === 'done')
yield new GenerationChunk({
text: '',
generationInfo: { finished: true }
})
}
}
/** @ignore */
static async imports(): Promise<{
Replicate: typeof ReplicateInstance
}> {
try {
const { default: Replicate } = await import('replicate')
return { Replicate }
} catch (e) {
throw new Error('Please install replicate as a dependency with, e.g. `yarn add replicate`')
}
}
private async _prepareReplicate(): Promise<ReplicateInstance> {
const imports = await Replicate.imports()
return new imports.Replicate({
userAgent: 'flowise',
auth: this.apiKey
})
}
private async _getReplicateInput(replicate: ReplicateInstance, prompt: string) {
if (this.promptKey === undefined) {
const [modelString, versionString] = this.model.split(':')
if (versionString) {
const version = await replicate.models.versions.get(modelString.split('/')[0], modelString.split('/')[1], versionString)
const openapiSchema = version.openapi_schema
const inputProperties: { 'x-order': number | undefined }[] = (openapiSchema as any)?.components?.schemas?.Input?.properties
if (inputProperties === undefined) {
this.promptKey = 'prompt'
} else {
const sortedInputProperties = Object.entries(inputProperties).sort(([_keyA, valueA], [_keyB, valueB]) => {
const orderA = valueA['x-order'] || 0
const orderB = valueB['x-order'] || 0
return orderA - orderB
})
this.promptKey = sortedInputProperties[0][0] ?? 'prompt'
}
} else {
this.promptKey = 'prompt'
}
}
return {
[this.promptKey!]: prompt,
...this.input
}
}
}

View File

@ -0,0 +1,105 @@
import path from 'path'
import { getBaseClasses, getUserHome } from '../../../src/utils'
import { SaverOptions } from './interface'
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeParams } from '../../../src/Interface'
import { SqliteSaver } from './sqliteSaver'
import { DataSource } from 'typeorm'
class AgentMemory_Memory implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
badge: string
baseClasses: string[]
inputs: INodeParams[]
constructor() {
this.label = 'Agent Memory'
this.name = 'agentMemory'
this.version = 1.0
this.type = 'AgentMemory'
this.icon = 'agentmemory.svg'
this.category = 'Memory'
this.description = 'Memory for agentflow to remember the state of the conversation'
this.baseClasses = [this.type, ...getBaseClasses(SqliteSaver)]
this.inputs = [
{
label: 'Database',
name: 'databaseType',
type: 'options',
options: [
{
label: 'SQLite',
name: 'sqlite'
}
],
default: 'sqlite'
},
{
label: 'Database File Path',
name: 'databaseFilePath',
type: 'string',
placeholder: 'C:\\Users\\User\\.flowise\\database.sqlite',
description:
'If SQLite is selected, provide the path to the SQLite database file. Leave empty to use default application database',
additionalParams: true,
optional: true
},
{
label: 'Additional Connection Configuration',
name: 'additionalConfig',
type: 'json',
additionalParams: true,
optional: true
}
]
}
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const additionalConfig = nodeData.inputs?.additionalConfig as string
const databaseFilePath = nodeData.inputs?.databaseFilePath as string
const databaseType = nodeData.inputs?.databaseType as string
const databaseEntities = options.databaseEntities as IDatabaseEntity
const chatflowid = options.chatflowid as string
const appDataSource = options.appDataSource as DataSource
let additionalConfiguration = {}
if (additionalConfig) {
try {
additionalConfiguration = typeof additionalConfig === 'object' ? additionalConfig : JSON.parse(additionalConfig)
} catch (exception) {
throw new Error('Invalid JSON in the Additional Configuration: ' + exception)
}
}
const threadId = options.sessionId || options.chatId
const datasourceOptions: ICommonObject = {
...additionalConfiguration,
type: databaseType
}
if (databaseType === 'sqlite') {
datasourceOptions.database = databaseFilePath
? path.resolve(databaseFilePath)
: path.join(process.env.DATABASE_PATH ?? path.join(getUserHome(), '.flowise'), 'database.sqlite')
const args: SaverOptions = {
datasourceOptions,
threadId,
appDataSource,
databaseEntities,
chatflowid
}
const recordManager = new SqliteSaver(args)
return recordManager
}
return undefined
}
}
module.exports = { nodeClass: AgentMemory_Memory }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-database"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 6m-8 0a8 3 0 1 0 16 0a8 3 0 1 0 -16 0" /><path d="M4 6v6a8 3 0 0 0 16 0v-6" /><path d="M4 12v6a8 3 0 0 0 16 0v-6" /></svg>

After

Width:  |  Height:  |  Size: 444 B

View File

@ -0,0 +1,24 @@
import { Checkpoint, CheckpointMetadata } from '@langchain/langgraph'
import { RunnableConfig } from '@langchain/core/runnables'
import { IDatabaseEntity } from '../../../src'
import { DataSource } from 'typeorm'
export type SaverOptions = {
datasourceOptions: any
threadId: string
appDataSource: DataSource
databaseEntities: IDatabaseEntity
chatflowid: string
}
export interface CheckpointTuple {
config: RunnableConfig
checkpoint: Checkpoint
metadata?: CheckpointMetadata
parentConfig?: RunnableConfig
}
export interface SerializerProtocol<D> {
stringify(obj: D): string
parse(data: string): Promise<D>
}

View File

@ -0,0 +1,242 @@
import { BaseCheckpointSaver, Checkpoint, CheckpointMetadata } from '@langchain/langgraph'
import { RunnableConfig } from '@langchain/core/runnables'
import { BaseMessage } from '@langchain/core/messages'
import { DataSource, QueryRunner } from 'typeorm'
import { CheckpointTuple, SaverOptions, SerializerProtocol } from './interface'
import { IMessage, MemoryMethods } from '../../../src/Interface'
import { mapChatMessageToBaseMessage } from '../../../src/utils'
export class SqliteSaver extends BaseCheckpointSaver implements MemoryMethods {
protected isSetup: boolean
datasource: DataSource
queryRunner: QueryRunner
config: SaverOptions
threadId: string
tableName = 'checkpoints'
constructor(config: SaverOptions, serde?: SerializerProtocol<Checkpoint>) {
super(serde)
this.config = config
const { datasourceOptions, threadId } = config
this.threadId = threadId
this.datasource = new DataSource(datasourceOptions)
}
private async setup(): Promise<void> {
if (this.isSetup) {
return
}
try {
const appDataSource = await this.datasource.initialize()
this.queryRunner = appDataSource.createQueryRunner()
await this.queryRunner.manager.query(`
CREATE TABLE IF NOT EXISTS ${this.tableName} (
thread_id TEXT NOT NULL,
checkpoint_id TEXT NOT NULL,
parent_id TEXT,
checkpoint BLOB,
metadata BLOB,
PRIMARY KEY (thread_id, checkpoint_id));`)
} catch (error) {
console.error(`Error creating ${this.tableName} table`, error)
throw new Error(`Error creating ${this.tableName} table`)
}
this.isSetup = true
}
async getTuple(config: RunnableConfig): Promise<CheckpointTuple | undefined> {
await this.setup()
const thread_id = config.configurable?.thread_id || this.threadId
const checkpoint_id = config.configurable?.checkpoint_id
if (checkpoint_id) {
try {
const keys = [thread_id, checkpoint_id]
const sql = `SELECT checkpoint, parent_id, metadata FROM ${this.tableName} WHERE thread_id = ? AND checkpoint_id = ?`
const rows = await this.queryRunner.manager.query(sql, [...keys])
if (rows && rows.length > 0) {
return {
config,
checkpoint: (await this.serde.parse(rows[0].checkpoint)) as Checkpoint,
metadata: (await this.serde.parse(rows[0].metadata)) as CheckpointMetadata,
parentConfig: rows[0].parent_id
? {
configurable: {
thread_id,
checkpoint_id: rows[0].parent_id
}
}
: undefined
}
}
} catch (error) {
console.error(`Error retrieving ${this.tableName}`, error)
throw new Error(`Error retrieving ${this.tableName}`)
}
} else {
const keys = [thread_id]
const sql = `SELECT thread_id, checkpoint_id, parent_id, checkpoint, metadata FROM ${this.tableName} WHERE thread_id = ? ORDER BY checkpoint_id DESC LIMIT 1`
const rows = await this.queryRunner.manager.query(sql, [...keys])
if (rows && rows.length > 0) {
return {
config: {
configurable: {
thread_id: rows[0].thread_id,
checkpoint_id: rows[0].checkpoint_id
}
},
checkpoint: (await this.serde.parse(rows[0].checkpoint)) as Checkpoint,
metadata: (await this.serde.parse(rows[0].metadata)) as CheckpointMetadata,
parentConfig: rows[0].parent_id
? {
configurable: {
thread_id: rows[0].thread_id,
checkpoint_id: rows[0].parent_id
}
}
: undefined
}
}
}
return undefined
}
async *list(config: RunnableConfig, limit?: number, before?: RunnableConfig): AsyncGenerator<CheckpointTuple> {
await this.setup()
const thread_id = config.configurable?.thread_id || this.threadId
let sql = `SELECT thread_id, checkpoint_id, parent_id, checkpoint, metadata FROM ${this.tableName} WHERE thread_id = ? ${
before ? 'AND checkpoint_id < ?' : ''
} ORDER BY checkpoint_id DESC`
if (limit) {
sql += ` LIMIT ${limit}`
}
const args = [thread_id, before?.configurable?.checkpoint_id].filter(Boolean)
try {
const rows = await this.queryRunner.manager.query(sql, [...args])
if (rows && rows.length > 0) {
for (const row of rows) {
yield {
config: {
configurable: {
thread_id: row.thread_id,
checkpoint_id: row.checkpoint_id
}
},
checkpoint: (await this.serde.parse(row.checkpoint)) as Checkpoint,
metadata: (await this.serde.parse(row.metadata)) as CheckpointMetadata,
parentConfig: row.parent_id
? {
configurable: {
thread_id: row.thread_id,
checkpoint_id: row.parent_id
}
}
: undefined
}
}
}
} catch (error) {
console.error(`Error listing ${this.tableName}`, error)
throw new Error(`Error listing ${this.tableName}`)
}
}
async put(config: RunnableConfig, checkpoint: Checkpoint, metadata: CheckpointMetadata): Promise<RunnableConfig> {
await this.setup()
if (!config.configurable?.checkpoint_id) return {}
try {
const row = [
config.configurable?.thread_id || this.threadId,
checkpoint.id,
config.configurable?.checkpoint_id,
this.serde.stringify(checkpoint),
this.serde.stringify(metadata)
]
const query = `INSERT OR REPLACE INTO ${this.tableName} (thread_id, checkpoint_id, parent_id, checkpoint, metadata) VALUES (?, ?, ?, ?, ?)`
await this.queryRunner.manager.query(query, row)
} catch (error) {
console.error('Error saving checkpoint', error)
throw new Error('Error saving checkpoint')
}
return {
configurable: {
thread_id: config.configurable?.thread_id || this.threadId,
checkpoint_id: checkpoint.id
}
}
}
async delete(threadId: string): Promise<void> {
if (!threadId) {
return
}
await this.setup()
const query = `DELETE FROM "${this.tableName}" WHERE thread_id = ?;`
try {
await this.queryRunner.manager.query(query, [threadId])
} catch (error) {
console.error(`Error deleting thread_id ${threadId}`, error)
}
}
async getChatMessages(
overrideSessionId = '',
returnBaseMessages = false,
prependMessages?: IMessage[]
): Promise<IMessage[] | BaseMessage[]> {
if (!overrideSessionId) return []
const chatMessage = await this.config.appDataSource.getRepository(this.config.databaseEntities['ChatMessage']).find({
where: {
sessionId: overrideSessionId,
chatflowid: this.config.chatflowid
},
order: {
createdDate: 'ASC'
}
})
if (prependMessages?.length) {
chatMessage.unshift(...prependMessages)
}
if (returnBaseMessages) {
return mapChatMessageToBaseMessage(chatMessage)
}
let returnIMessages: IMessage[] = []
for (const m of chatMessage) {
returnIMessages.push({
message: m.content as string,
type: m.role
})
}
return returnIMessages
}
async addChatMessages(): Promise<void> {
// Empty as its not being used
}
async clearChatMessages(overrideSessionId = ''): Promise<void> {
await this.delete(overrideSessionId)
}
}

View File

@ -127,6 +127,7 @@ class BufferMemoryExtended extends FlowiseMemory implements MemoryMethods {
type: m.role
})
}
return returnIMessages
}

View File

@ -1,3 +1,7 @@
import { ZepMemory, ZepMemoryInput } from '@getzep/zep-cloud/langchain'
import { BaseMessage } from '@langchain/core/messages'
import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory'
import { ICommonObject } from '../../../src'
import { IMessage, INode, INodeData, INodeParams, MemoryMethods, MessageType } from '../../../src/Interface'
import {
convertBaseMessagetoIMessage,
@ -6,11 +10,6 @@ import {
getCredentialParam,
mapChatMessageToBaseMessage
} from '../../../src/utils'
import { ZepMemory, ZepMemoryInput } from '@getzep/zep-cloud/langchain'
import { ICommonObject } from '../../../src'
import { InputValues, MemoryVariables, OutputValues } from 'langchain/memory'
import { BaseMessage } from 'langchain/schema'
class ZepMemoryCloud_Memory implements INode {
label: string

View File

@ -30,6 +30,9 @@ Select strategically to minimize the number of steps taken.`
const routerToolName = 'route'
const defaultSummarization = 'Conversation finished'
const defaultInstruction = 'Conversation finished'
class Supervisor_MultiAgents implements INode {
label: string
name: string
@ -46,7 +49,7 @@ class Supervisor_MultiAgents implements INode {
constructor() {
this.label = 'Supervisor'
this.name = 'supervisor'
this.version = 1.0
this.version = 2.0
this.type = 'Supervisor'
this.icon = 'supervisor.svg'
this.category = 'Multi Agents'
@ -74,6 +77,13 @@ class Supervisor_MultiAgents implements INode {
type: 'BaseChatModel',
description: `Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, GroqChat. Best result with GPT-4 model`
},
{
label: 'Agent Memory',
name: 'agentMemory',
type: 'BaseCheckpointSaver',
description: 'Save the state of the agent',
optional: true
},
{
label: 'Recursion Limit',
name: 'recursionLimit',
@ -120,13 +130,15 @@ class Supervisor_MultiAgents implements INode {
let userPrompt = `Given the conversation above, who should act next? Or should we FINISH? Select one of: ${memberOptions.join(
', '
)}`
)}
Remember to give reasonings, instructions and summarization`
const tool = new RouteTool({
schema: z.object({
reasoning: z.string(),
next: z.enum(['FINISH', ...members]),
instructions: z.string().describe('The specific instructions of the sub-task the next role should accomplish.')
instructions: z.string().describe('The specific instructions of the sub-task the next role should accomplish.'),
summarization: z.string().optional().describe('Summarization of the conversation')
})
})
@ -144,6 +156,7 @@ class Supervisor_MultiAgents implements INode {
multiModalMessageContent = messages.multiModalMessageContent
// Force Mistral to use tool
// @ts-ignore
const modelWithTool = llm.bind({
tools: [tool],
tool_choice: 'any',
@ -162,14 +175,16 @@ class Supervisor_MultiAgents implements INode {
next: Object.keys(toolAgentAction.args).length ? toolAgentAction.args.next : 'FINISH',
instructions: Object.keys(toolAgentAction.args).length
? toolAgentAction.args.instructions
: 'Conversation finished',
team_members: members.join(', ')
: defaultInstruction,
team_members: members.join(', '),
summarization: Object.keys(toolAgentAction.args).length ? toolAgentAction.args.summarization : ''
}
} else {
return {
next: 'FINISH',
instructions: 'Conversation finished',
team_members: members.join(', ')
instructions: defaultInstruction,
team_members: members.join(', '),
summarization: defaultSummarization
}
}
})
@ -177,7 +192,7 @@ class Supervisor_MultiAgents implements INode {
// Force Anthropic to use tool : https://docs.anthropic.com/claude/docs/tool-use#forcing-tool-use
userPrompt = `Given the conversation above, who should act next? Or should we FINISH? Select one of: ${memberOptions.join(
', '
)}. Use the ${routerToolName} tool in your response.`
)}. Remember to give reasonings, instructions and summarization. Use the ${routerToolName} tool in your response.`
let prompt = ChatPromptTemplate.fromMessages([
['system', systemPrompt],
@ -206,19 +221,22 @@ class Supervisor_MultiAgents implements INode {
return {
next: toolAgentAction.toolInput.next,
instructions: toolAgentAction.toolInput.instructions,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: toolAgentAction.toolInput.summarization
}
} else if (typeof x === 'object' && 'returnValues' in x) {
return {
next: 'FINISH',
instructions: x.returnValues?.output,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: defaultSummarization
}
} else {
return {
next: 'FINISH',
instructions: 'Conversation finished',
team_members: members.join(', ')
instructions: defaultInstruction,
team_members: members.join(', '),
summarization: defaultSummarization
}
}
})
@ -229,7 +247,7 @@ class Supervisor_MultiAgents implements INode {
['human', userPrompt]
])
const messages = await processImageMessage(1, llm, prompt, nodeData, options)
const messages = await processImageMessage(1, llm as any, prompt, nodeData, options)
prompt = messages.prompt
multiModalMessageContent = messages.multiModalMessageContent
@ -251,19 +269,22 @@ class Supervisor_MultiAgents implements INode {
return {
next: toolAgentAction.toolInput.next,
instructions: toolAgentAction.toolInput.instructions,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: toolAgentAction.toolInput.summarization
}
} else if (typeof x === 'object' && 'returnValues' in x) {
return {
next: 'FINISH',
instructions: x.returnValues?.output,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: defaultSummarization
}
} else {
return {
next: 'FINISH',
instructions: 'Conversation finished',
team_members: members.join(', ')
instructions: defaultInstruction,
team_members: members.join(', '),
summarization: defaultSummarization
}
}
})
@ -298,19 +319,22 @@ class Supervisor_MultiAgents implements INode {
return {
next: toolAgentAction.toolInput.next,
instructions: toolAgentAction.toolInput.instructions,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: toolAgentAction.toolInput.summarization
}
} else if (typeof x === 'object' && 'returnValues' in x) {
return {
next: 'FINISH',
instructions: x.returnValues?.output,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: defaultSummarization
}
} else {
return {
next: 'FINISH',
instructions: 'Conversation finished',
team_members: members.join(', ')
instructions: defaultInstruction,
team_members: members.join(', '),
summarization: defaultSummarization
}
}
})
@ -341,19 +365,22 @@ class Supervisor_MultiAgents implements INode {
return {
next: toolAgentAction.toolInput.next,
instructions: toolAgentAction.toolInput.instructions,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: toolAgentAction.toolInput.summarization
}
} else if (typeof x === 'object' && 'returnValues' in x) {
return {
next: 'FINISH',
instructions: x.returnValues?.output,
team_members: members.join(', ')
team_members: members.join(', '),
summarization: defaultSummarization
}
} else {
return {
next: 'FINISH',
instructions: 'Conversation finished',
team_members: members.join(', ')
instructions: defaultInstruction,
team_members: members.join(', '),
summarization: defaultSummarization
}
}
})
@ -369,6 +396,7 @@ class Supervisor_MultiAgents implements INode {
{
state,
agent: supervisorAgent,
nodeId: nodeData.id,
abortControllerSignal
},
config
@ -383,7 +411,8 @@ class Supervisor_MultiAgents implements INode {
recursionLimit,
llm,
moderations,
multiModalMessageContent
multiModalMessageContent,
checkpointMemory: nodeData.inputs?.agentMemory
}
return returnOutput
@ -391,7 +420,12 @@ class Supervisor_MultiAgents implements INode {
}
async function agentNode(
{ state, agent, abortControllerSignal }: { state: ITeamState; agent: AgentExecutor | Runnable; abortControllerSignal: AbortController },
{
state,
agent,
nodeId,
abortControllerSignal
}: { state: ITeamState; agent: AgentExecutor | Runnable; nodeId: string; abortControllerSignal: AbortController },
config: RunnableConfig
) {
try {
@ -399,6 +433,8 @@ async function agentNode(
throw new Error('Aborted!')
}
const result = await agent.invoke({ ...state, signal: abortControllerSignal.signal }, config)
const additional_kwargs: ICommonObject = { nodeId }
result.additional_kwargs = { ...result.additional_kwargs, ...additional_kwargs }
return result
} catch (error) {
throw new Error('Aborted!')

View File

@ -21,17 +21,19 @@ class Worker_MultiAgents implements INode {
icon: string
category: string
baseClasses: string[]
hideOutput: boolean
inputs?: INodeParams[]
badge?: string
constructor() {
this.label = 'Worker'
this.name = 'worker'
this.version = 1.0
this.version = 2.0
this.type = 'Worker'
this.icon = 'worker.svg'
this.category = 'Multi Agents'
this.baseClasses = [this.type]
this.hideOutput = true
this.inputs = [
{
label: 'Worker Name',
@ -137,6 +139,7 @@ class Worker_MultiAgents implements INode {
state,
agent: agent,
name: workerName,
nodeId: nodeData.id,
abortControllerSignal
},
config
@ -269,8 +272,9 @@ async function agentNode(
state,
agent,
name,
nodeId,
abortControllerSignal
}: { state: ITeamState; agent: AgentExecutor | RunnableSequence; name: string; abortControllerSignal: AbortController },
}: { state: ITeamState; agent: AgentExecutor | RunnableSequence; name: string; nodeId: string; abortControllerSignal: AbortController },
config: RunnableConfig
) {
try {
@ -279,7 +283,7 @@ async function agentNode(
}
const result = await agent.invoke({ ...state, signal: abortControllerSignal.signal }, config)
const additional_kwargs: ICommonObject = {}
const additional_kwargs: ICommonObject = { nodeId }
if (result.usedTools) {
additional_kwargs.usedTools = result.usedTools
}

View File

@ -1,5 +1,5 @@
import { getBaseClasses, INode, INodeData, INodeParams } from '../../../src'
import { BaseOutputParser } from 'langchain/schema/output_parser'
import { BaseOutputParser } from '@langchain/core/output_parsers'
import { StructuredOutputParser as LangchainStructuredOutputParser } from 'langchain/output_parsers'
import { CATEGORY } from '../OutputParserHelpers'
import { z } from 'zod'

View File

@ -0,0 +1,886 @@
import { flatten, uniq } from 'lodash'
import { DataSource } from 'typeorm'
import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchain/core/runnables'
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, BaseMessagePromptTemplateLike } from '@langchain/core/prompts'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, AIMessageChunk, BaseMessage, HumanMessage, ToolMessage } from '@langchain/core/messages'
import { formatToOpenAIToolMessages } from 'langchain/agents/format_scratchpad/openai_tools'
import { type ToolsAgentStep } from 'langchain/agents/openai/output_parser'
import { StringOutputParser } from '@langchain/core/output_parsers'
import {
INode,
INodeData,
INodeParams,
ISeqAgentsState,
ICommonObject,
MessageContentImageUrl,
INodeOutputsValue,
ISeqAgentNode,
IDatabaseEntity,
IUsedTool,
IDocument
} from '../../../src/Interface'
import { ToolCallingAgentOutputParser, AgentExecutor, SOURCE_DOCUMENTS_PREFIX } from '../../../src/agents'
import { getInputVariables, getVars, handleEscapeCharacters, prepareSandboxVars } from '../../../src/utils'
import {
customGet,
getVM,
processImageMessage,
transformObjectPropertyToFunction,
restructureMessages,
MessagesState,
RunnableCallable
} from '../commonUtils'
import { END, StateGraph } from '@langchain/langgraph'
import { StructuredTool } from '@langchain/core/tools'
const defaultApprovalPrompt = `You are about to execute tool: {tools}. Ask if user want to proceed`
const examplePrompt = 'You are a research assistant who can search for up-to-date info using search engine.'
const customOutputFuncDesc = `This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values`
const howToUseCode = `
1. Return the key value JSON object. For example: if you have the following State:
\`\`\`json
{
"user": null
}
\`\`\`
You can update the "user" value by returning the following:
\`\`\`js
return {
"user": "john doe"
}
\`\`\`
2. If you want to use the agent's output as the value to update state, it is available as \`$flow.output\` with the following structure:
\`\`\`json
{
"content": "Hello! How can I assist you today?",
"usedTools": [
{
"tool": "tool-name",
"toolInput": "{foo: var}",
"toolOutput": "This is the tool's output"
}
],
"sourceDocuments": [
{
"pageContent": "This is the page content",
"metadata": "{foo: var}",
}
],
}
\`\`\`
For example, if the \`toolOutput\` is the value you want to update the state with, you can return the following:
\`\`\`js
return {
"user": $flow.output.usedTools[0].toolOutput
}
\`\`\`
3. You can also get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const howToUse = `
1. Key and value pair to be updated. For example: if you have the following State:
| Key | Operation | Default Value |
|-----------|---------------|-------------------|
| user | Replace | |
You can update the "user" value with the following:
| Key | Value |
|-----------|-----------|
| user | john doe |
2. If you want to use the agent's output as the value to update state, it is available as available as \`$flow.output\` with the following structure:
\`\`\`json
{
"output": "Hello! How can I assist you today?",
"usedTools": [
{
"tool": "tool-name",
"toolInput": "{foo: var}",
"toolOutput": "This is the tool's output"
}
],
"sourceDocuments": [
{
"pageContent": "This is the page content",
"metadata": "{foo: var}",
}
],
}
\`\`\`
For example, if the \`toolOutput\` is the value you want to update the state with, you can do the following:
| Key | Value |
|-----------|-------------------------------------------|
| user | \`$flow.output.usedTools[0].toolOutput\` |
3. You can get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const defaultFunc = `const result = $flow.output;
/* Suppose we have a custom State schema like this:
* {
aggregate: {
value: (x, y) => x.concat(y),
default: () => []
}
}
*/
return {
aggregate: [result.content]
};`
const TAB_IDENTIFIER = 'selectedUpdateStateMemoryTab'
class Agent_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs?: INodeParams[]
badge?: string
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Agent'
this.name = 'seqAgent'
this.version = 1.0
this.type = 'Agent'
this.icon = 'seqAgent.png'
this.category = 'Sequential Agents'
this.description = 'Agent that can execute tools'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Agent Name',
name: 'agentName',
type: 'string',
placeholder: 'Agent'
},
{
label: 'System Prompt',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
optional: true,
default: examplePrompt
},
{
label: 'Human Prompt',
name: 'humanMessagePrompt',
type: 'string',
description: 'This prompt will be added at the end of the messages as human message',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true
},
{
label: 'Start | Agent | LLM | Tool Node',
name: 'sequentialNode',
type: 'Start | Agent | LLMNode | ToolNode',
list: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel',
optional: true,
description: `Overwrite model to be used for this agent`
},
{
label: 'Require Approval',
name: 'interrupt',
description: 'Require approval before executing tools. Will proceed when tools are not called',
type: 'boolean',
optional: true
},
{
label: 'Format Prompt Values',
name: 'promptValues',
description: 'Assign values to the prompt variables. You can also use $flow.state.<variable-name> to get the state value',
type: 'json',
optional: true,
acceptVariable: true,
list: true
},
{
label: 'Approval Prompt',
name: 'approvalPrompt',
description: 'Prompt for approval. Only applicable if "Require Approval" is enabled',
type: 'string',
default: defaultApprovalPrompt,
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Approve Button Text',
name: 'approveButtonText',
description: 'Text for approve button. Only applicable if "Require Approval" is enabled',
type: 'string',
default: 'Yes',
optional: true,
additionalParams: true
},
{
label: 'Reject Button Text',
name: 'rejectButtonText',
description: 'Text for reject button. Only applicable if "Require Approval" is enabled',
type: 'string',
default: 'No',
optional: true,
additionalParams: true
},
{
label: 'Update State',
name: 'updateStateMemory',
type: 'tabs',
tabIdentifier: TAB_IDENTIFIER,
additionalParams: true,
default: 'updateStateMemoryUI',
tabs: [
{
label: 'Update State (Table)',
name: 'updateStateMemoryUI',
type: 'datagrid',
hint: {
label: 'How to use',
value: howToUse
},
description: customOutputFuncDesc,
datagrid: [
{
field: 'key',
headerName: 'Key',
type: 'asyncSingleSelect',
loadMethod: 'loadStateKeys',
flex: 0.5,
editable: true
},
{
field: 'value',
headerName: 'Value',
type: 'freeSolo',
valueOptions: [
{
label: 'Agent Output (string)',
value: '$flow.output.content'
},
{
label: `Used Tools (array)`,
value: '$flow.output.usedTools'
},
{
label: `First Tool Output (string)`,
value: '$flow.output.usedTools[0].toolOutput'
},
{
label: 'Source Documents (array)',
value: '$flow.output.sourceDocuments'
},
{
label: `Global variable (string)`,
value: '$vars.<variable-name>'
},
{
label: 'Input Question (string)',
value: '$flow.input'
},
{
label: 'Session Id (string)',
value: '$flow.sessionId'
},
{
label: 'Chat Id (string)',
value: '$flow.chatId'
},
{
label: 'Chatflow Id (string)',
value: '$flow.chatflowId'
}
],
editable: true,
flex: 1
}
],
optional: true,
additionalParams: true
},
{
label: 'Update State (Code)',
name: 'updateStateMemoryCode',
type: 'code',
hint: {
label: 'How to use',
value: howToUseCode
},
description: `${customOutputFuncDesc}. Must return an object representing the state`,
hideCodeExecute: true,
codeExample: defaultFunc,
optional: true,
additionalParams: true
}
]
},
{
label: 'Max Iterations',
name: 'maxIterations',
type: 'number',
optional: true,
additionalParams: true
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
let tools = nodeData.inputs?.tools
tools = flatten(tools)
let agentSystemPrompt = nodeData.inputs?.systemMessagePrompt as string
let agentHumanPrompt = nodeData.inputs?.humanMessagePrompt as string
const agentLabel = nodeData.inputs?.agentName as string
const sequentialNodes = nodeData.inputs?.sequentialNode as ISeqAgentNode[]
const maxIterations = nodeData.inputs?.maxIterations as string
const model = nodeData.inputs?.model as BaseChatModel
const promptValuesStr = nodeData.inputs?.promptValues
const output = nodeData.outputs?.output as string
const approvalPrompt = nodeData.inputs?.approvalPrompt as string
if (!agentLabel) throw new Error('Agent name is required!')
const agentName = agentLabel.toLowerCase().replace(/\s/g, '_').trim()
if (!sequentialNodes || !sequentialNodes.length) throw new Error('Agent must have a predecessor!')
let agentInputVariablesValues: ICommonObject = {}
if (promptValuesStr) {
try {
agentInputVariablesValues = typeof promptValuesStr === 'object' ? promptValuesStr : JSON.parse(promptValuesStr)
} catch (exception) {
throw new Error("Invalid JSON in the Agent's Prompt Input Values: " + exception)
}
}
agentInputVariablesValues = handleEscapeCharacters(agentInputVariablesValues, true)
const startLLM = sequentialNodes[0].startLLM
const llm = model || startLLM
if (nodeData.inputs) nodeData.inputs.model = llm
const multiModalMessageContent = sequentialNodes[0]?.multiModalMessageContent || (await processImageMessage(llm, nodeData, options))
const abortControllerSignal = options.signal as AbortController
const agentInputVariables = uniq([...getInputVariables(agentSystemPrompt), ...getInputVariables(agentHumanPrompt)])
if (!agentInputVariables.every((element) => Object.keys(agentInputVariablesValues).includes(element))) {
throw new Error('Agent input variables values are not provided!')
}
const interrupt = nodeData.inputs?.interrupt as boolean
const toolName = `tool_${nodeData.id}`
const toolNode = new ToolNode(tools, nodeData, input, options, toolName, [], { sequentialNodeName: toolName })
;(toolNode as any).seekPermissionMessage = async (usedTools: IUsedTool[]) => {
const prompt = ChatPromptTemplate.fromMessages([['human', approvalPrompt || defaultApprovalPrompt]])
const chain = prompt.pipe(startLLM)
const response = (await chain.invoke({
input: 'Hello there!',
tools: JSON.stringify(usedTools)
})) as AIMessageChunk
return response.content
}
const workerNode = async (state: ISeqAgentsState, config: RunnableConfig) => {
return await agentNode(
{
state,
llm,
interrupt,
agent: await createAgent(
agentName,
state,
llm,
interrupt,
[...tools],
agentSystemPrompt,
agentHumanPrompt,
multiModalMessageContent,
agentInputVariablesValues,
maxIterations,
{
sessionId: options.sessionId,
chatId: options.chatId,
input
}
),
name: agentName,
abortControllerSignal,
nodeData,
input,
options
},
config
)
}
const toolInterrupt = async (
graph: StateGraph<any>,
nextNodeName?: string,
runCondition?: any,
conditionalMapping: ICommonObject = {}
) => {
const routeMessage = async (state: ISeqAgentsState) => {
const messages = state.messages as unknown as BaseMessage[]
const lastMessage = messages[messages.length - 1] as AIMessage
if (!lastMessage?.tool_calls?.length) {
// if next node is condition node, run the condition
if (runCondition) {
const returnNodeName = await runCondition(state)
return returnNodeName
}
return nextNodeName || END
}
return toolName
}
graph.addNode(toolName, toolNode)
if (nextNodeName) {
// @ts-ignore
graph.addConditionalEdges(agentName, routeMessage, {
[toolName]: toolName,
[END]: END,
[nextNodeName]: nextNodeName,
...conditionalMapping
})
} else {
// @ts-ignore
graph.addConditionalEdges(agentName, routeMessage, { [toolName]: toolName, [END]: END, ...conditionalMapping })
}
// @ts-ignore
graph.addEdge(toolName, agentName)
return graph
}
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: workerNode,
name: agentName,
label: agentLabel,
type: 'agent',
llm,
startLLM,
output,
predecessorAgents: sequentialNodes,
multiModalMessageContent,
moderations: sequentialNodes[0]?.moderations,
agentInterruptToolNode: interrupt ? toolNode : undefined,
agentInterruptToolFunc: interrupt ? toolInterrupt : undefined
}
return returnOutput
}
}
async function createAgent(
agentName: string,
state: ISeqAgentsState,
llm: BaseChatModel,
interrupt: boolean,
tools: any[],
systemPrompt: string,
humanPrompt: string,
multiModalMessageContent: MessageContentImageUrl[],
agentInputVariablesValues: ICommonObject,
maxIterations?: string,
flowObj?: { sessionId?: string; chatId?: string; input?: string }
): Promise<any> {
if (tools.length && !interrupt) {
const promptArrays = [
new MessagesPlaceholder('messages'),
new MessagesPlaceholder('agent_scratchpad')
] as BaseMessagePromptTemplateLike[]
if (systemPrompt) promptArrays.unshift(['system', systemPrompt])
if (humanPrompt) promptArrays.push(['human', humanPrompt])
const prompt = ChatPromptTemplate.fromMessages(promptArrays)
if (multiModalMessageContent.length) {
const msg = HumanMessagePromptTemplate.fromTemplate([...multiModalMessageContent])
prompt.promptMessages.splice(1, 0, msg)
}
if (llm.bindTools === undefined) {
throw new Error(`This agent only compatible with function calling models.`)
}
const modelWithTools = llm.bindTools(tools)
let agent
if (!agentInputVariablesValues || !Object.keys(agentInputVariablesValues).length) {
agent = RunnableSequence.from([
RunnablePassthrough.assign({
//@ts-ignore
agent_scratchpad: (input: { steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(input.steps)
}),
prompt,
modelWithTools,
new ToolCallingAgentOutputParser()
]).withConfig({
metadata: { sequentialNodeName: agentName }
})
} else {
agent = RunnableSequence.from([
RunnablePassthrough.assign({
//@ts-ignore
agent_scratchpad: (input: { steps: ToolsAgentStep[] }) => formatToOpenAIToolMessages(input.steps)
}),
RunnablePassthrough.assign(transformObjectPropertyToFunction(agentInputVariablesValues, state)),
prompt,
modelWithTools,
new ToolCallingAgentOutputParser()
]).withConfig({
metadata: { sequentialNodeName: agentName }
})
}
const executor = AgentExecutor.fromAgentAndTools({
agent,
tools,
sessionId: flowObj?.sessionId,
chatId: flowObj?.chatId,
input: flowObj?.input,
verbose: process.env.DEBUG === 'true' ? true : false,
maxIterations: maxIterations ? parseFloat(maxIterations) : undefined
})
return executor
} else if (tools.length && interrupt) {
if (llm.bindTools === undefined) {
throw new Error(`Agent Node only compatible with function calling models.`)
}
// @ts-ignore
llm = llm.bindTools(tools)
const promptArrays = [new MessagesPlaceholder('messages')] as BaseMessagePromptTemplateLike[]
if (systemPrompt) promptArrays.unshift(['system', systemPrompt])
if (humanPrompt) promptArrays.push(['human', humanPrompt])
const prompt = ChatPromptTemplate.fromMessages(promptArrays)
if (multiModalMessageContent.length) {
const msg = HumanMessagePromptTemplate.fromTemplate([...multiModalMessageContent])
prompt.promptMessages.splice(1, 0, msg)
}
let agent
if (!agentInputVariablesValues || !Object.keys(agentInputVariablesValues).length) {
agent = RunnableSequence.from([prompt, llm]).withConfig({
metadata: { sequentialNodeName: agentName }
})
} else {
agent = RunnableSequence.from([
RunnablePassthrough.assign(transformObjectPropertyToFunction(agentInputVariablesValues, state)),
prompt,
llm
]).withConfig({
metadata: { sequentialNodeName: agentName }
})
}
return agent
} else {
const promptArrays = [new MessagesPlaceholder('messages')] as BaseMessagePromptTemplateLike[]
if (systemPrompt) promptArrays.unshift(['system', systemPrompt])
if (humanPrompt) promptArrays.push(['human', humanPrompt])
const prompt = ChatPromptTemplate.fromMessages(promptArrays)
if (multiModalMessageContent.length) {
const msg = HumanMessagePromptTemplate.fromTemplate([...multiModalMessageContent])
prompt.promptMessages.splice(1, 0, msg)
}
let conversationChain
if (!agentInputVariablesValues || !Object.keys(agentInputVariablesValues).length) {
conversationChain = RunnableSequence.from([prompt, llm, new StringOutputParser()]).withConfig({
metadata: { sequentialNodeName: agentName }
})
} else {
conversationChain = RunnableSequence.from([
RunnablePassthrough.assign(transformObjectPropertyToFunction(agentInputVariablesValues, state)),
prompt,
llm,
new StringOutputParser()
]).withConfig({
metadata: { sequentialNodeName: agentName }
})
}
return conversationChain
}
}
async function agentNode(
{
state,
llm,
interrupt,
agent,
name,
abortControllerSignal,
nodeData,
input,
options
}: {
state: ISeqAgentsState
llm: BaseChatModel
interrupt: boolean
agent: AgentExecutor | RunnableSequence
name: string
abortControllerSignal: AbortController
nodeData: INodeData
input: string
options: ICommonObject
},
config: RunnableConfig
) {
try {
if (abortControllerSignal.signal.aborted) {
throw new Error('Aborted!')
}
// @ts-ignore
state.messages = restructureMessages(llm, state)
let result = await agent.invoke({ ...state, signal: abortControllerSignal.signal }, config)
if (interrupt) {
const messages = state.messages as unknown as BaseMessage[]
const lastMessage = messages[messages.length - 1]
// If the last message is a tool message and is an interrupted message, format output into standard agent output
if (lastMessage._getType() === 'tool' && lastMessage.additional_kwargs?.nodeId === nodeData.id) {
let formattedAgentResult: { output?: string; usedTools?: IUsedTool[]; sourceDocuments?: IDocument[] } = {}
formattedAgentResult.output = result.content
if (lastMessage.additional_kwargs?.usedTools) {
formattedAgentResult.usedTools = lastMessage.additional_kwargs.usedTools as IUsedTool[]
}
if (lastMessage.additional_kwargs?.sourceDocuments) {
formattedAgentResult.sourceDocuments = lastMessage.additional_kwargs.sourceDocuments as IDocument[]
}
result = formattedAgentResult
} else {
result.name = name
result.additional_kwargs = { ...result.additional_kwargs, nodeId: nodeData.id, interrupt: true }
return {
messages: [result]
}
}
}
const additional_kwargs: ICommonObject = { nodeId: nodeData.id }
if (result.usedTools) {
additional_kwargs.usedTools = result.usedTools
}
if (result.sourceDocuments) {
additional_kwargs.sourceDocuments = result.sourceDocuments
}
if (result.output) {
result.content = result.output
delete result.output
}
const outputContent = typeof result === 'string' ? result : result.content || result.output
if (nodeData.inputs?.updateStateMemoryUI || nodeData.inputs?.updateStateMemoryCode) {
let formattedOutput = {
...result,
content: outputContent
}
const returnedOutput = await getReturnOutput(nodeData, input, options, formattedOutput, state)
return {
...returnedOutput,
messages: convertCustomMessagesToBaseMessages([outputContent], name, additional_kwargs)
}
} else {
return {
messages: [
new HumanMessage({
content: outputContent,
name,
additional_kwargs: Object.keys(additional_kwargs).length ? additional_kwargs : undefined
})
]
}
}
} catch (error) {
throw new Error(error)
}
}
const getReturnOutput = async (nodeData: INodeData, input: string, options: ICommonObject, output: any, state: ISeqAgentsState) => {
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const tabIdentifier = nodeData.inputs?.[`${TAB_IDENTIFIER}_${nodeData.id}`] as string
const updateStateMemoryUI = nodeData.inputs?.updateStateMemoryUI as string
const updateStateMemoryCode = nodeData.inputs?.updateStateMemoryCode as string
const selectedTab = tabIdentifier ? tabIdentifier.split(`_${nodeData.id}`)[0] : 'updateStateMemoryUI'
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input,
output,
state,
vars: prepareSandboxVars(variables)
}
if (selectedTab === 'updateStateMemoryUI' && updateStateMemoryUI) {
try {
const parsedSchema = typeof updateStateMemoryUI === 'string' ? JSON.parse(updateStateMemoryUI) : updateStateMemoryUI
const obj: ICommonObject = {}
for (const sch of parsedSchema) {
const key = sch.key
if (!key) throw new Error(`Key is required`)
let value = sch.value as string
if (value.startsWith('$flow')) {
value = customGet(flow, sch.value.replace('$flow.', ''))
} else if (value.startsWith('$vars')) {
value = customGet(flow, sch.value.replace('$', ''))
}
obj[key] = value
}
return obj
} catch (e) {
throw new Error(e)
}
} else if (selectedTab === 'updateStateMemoryCode' && updateStateMemoryCode) {
const vm = await getVM(appDataSource, databaseEntities, nodeData, flow)
try {
const response = await vm.run(`module.exports = async function() {${updateStateMemoryCode}}()`, __dirname)
if (typeof response !== 'object') throw new Error('Return output must be an object')
return response
} catch (e) {
throw new Error(e)
}
}
return {}
}
const convertCustomMessagesToBaseMessages = (messages: string[], name: string, additional_kwargs: ICommonObject) => {
return messages.map((message) => {
return new HumanMessage({
content: message,
name,
additional_kwargs: Object.keys(additional_kwargs).length ? additional_kwargs : undefined
})
})
}
class ToolNode<T extends BaseMessage[] | MessagesState> extends RunnableCallable<T, T> {
tools: StructuredTool[]
nodeData: INodeData
inputQuery: string
options: ICommonObject
constructor(
tools: StructuredTool[],
nodeData: INodeData,
inputQuery: string,
options: ICommonObject,
name: string = 'tools',
tags: string[] = [],
metadata: ICommonObject = {}
) {
super({ name, metadata, tags, func: (input, config) => this.run(input, config) })
this.tools = tools
this.nodeData = nodeData
this.inputQuery = inputQuery
this.options = options
}
private async run(input: BaseMessage[] | MessagesState, config: RunnableConfig): Promise<BaseMessage[] | MessagesState> {
const message = Array.isArray(input) ? input[input.length - 1] : input.messages[input.messages.length - 1]
if (message._getType() !== 'ai') {
throw new Error('ToolNode only accepts AIMessages as input.')
}
const outputs = await Promise.all(
(message as AIMessage).tool_calls?.map(async (call) => {
const tool = this.tools.find((tool) => tool.name === call.name)
if (tool === undefined) {
throw new Error(`Tool ${call.name} not found.`)
}
let output = await tool.invoke(call.args, config)
let sourceDocuments: Document[] = []
if (output?.includes(SOURCE_DOCUMENTS_PREFIX)) {
const outputArray = output.split(SOURCE_DOCUMENTS_PREFIX)
output = outputArray[0]
const docs = outputArray[1]
try {
sourceDocuments = JSON.parse(docs)
} catch (e) {
console.error('Error parsing source documents from tool')
}
}
return new ToolMessage({
name: tool.name,
content: typeof output === 'string' ? output : JSON.stringify(output),
tool_call_id: call.id!,
additional_kwargs: {
sourceDocuments,
args: call.args,
usedTools: [
{
tool: tool.name ?? '',
toolInput: call.args,
toolOutput: output
}
]
}
})
}) ?? []
)
const additional_kwargs: ICommonObject = { nodeId: this.nodeData.id }
outputs.forEach((result) => (result.additional_kwargs = { ...result.additional_kwargs, ...additional_kwargs }))
return Array.isArray(input) ? outputs : { messages: outputs }
}
}
module.exports = { nodeClass: Agent_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-user-circle"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M12 12m-9 0a9 9 0 1 0 18 0a9 9 0 1 0 -18 0" /><path d="M12 10m-3 0a3 3 0 1 0 6 0a3 3 0 1 0 -6 0" /><path d="M6.168 18.849a4 4 0 0 1 3.832 -2.849h4a4 4 0 0 1 3.834 2.855" /></svg>

After

Width:  |  Height:  |  Size: 499 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

View File

@ -0,0 +1,325 @@
import { DataSource } from 'typeorm'
import { BaseMessage } from '@langchain/core/messages'
import {
ICommonObject,
IDatabaseEntity,
INode,
INodeData,
INodeOutputsValue,
INodeParams,
ISeqAgentNode,
ISeqAgentsState
} from '../../../src/Interface'
import { checkCondition, customGet, getVM } from '../commonUtils'
import { getVars, prepareSandboxVars } from '../../../src/utils'
const howToUseCode = `
1. Must return a string value at the end of function. For example:
\`\`\`js
if ("X" === "X") {
return "Agent"; // connect to next agent node
} else {
return "End"; // connect to end node
}
\`\`\`
2. In most cases, you would probably get the last message to do some comparison. You can get all current messages from the state: \`$flow.state.messages\`:
\`\`\`json
[
{
"content": "Hello! How can I assist you today?",
"name": "",
"additional_kwargs": {},
"response_metadata": {},
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": {}
}
]
\`\`\`
For example, to get the last message content:
\`\`\`js
const messages = $flow.state.messages;
const lastMessage = messages[messages.length - 1];
// Proceed to do something with the last message content
\`\`\`
3. You can get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const defaultFunc = `const state = $flow.state;
const messages = state.messages;
const lastMessage = messages[messages.length - 1];
/* Check if the last message has content */
if (lastMessage.content) {
return "Agent";
}
return "End";`
const TAB_IDENTIFIER = 'selectedConditionFunctionTab'
interface IConditionGridItem {
variable: string
operation: string
value: string
output: string
}
class Condition_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Condition'
this.name = 'seqCondition'
this.version = 1.0
this.type = 'Condition'
this.icon = 'condition.svg'
this.category = 'Sequential Agents'
this.description = 'Conditional function to determine which route to take next'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Condition Name',
name: 'conditionName',
type: 'string',
optional: true,
placeholder: 'If X, then Y'
},
{
label: 'Start | Agent | LLM | Tool Node',
name: 'sequentialNode',
type: 'Start | Agent | LLMNode | ToolNode',
list: true
},
{
label: 'Condition',
name: 'condition',
type: 'conditionFunction', // This is a custom type to show as button on the UI and render anchor points when saved
tabIdentifier: TAB_IDENTIFIER,
tabs: [
{
label: 'Condition (Table)',
name: 'conditionUI',
type: 'datagrid',
description: 'If a condition is met, the node connected to the respective output will be executed',
optional: true,
datagrid: [
{
field: 'variable',
headerName: 'Variable',
type: 'freeSolo',
editable: true,
loadMethod: ['getPreviousMessages', 'loadStateKeys'],
valueOptions: [
{
label: 'Total Messages (number)',
value: '$flow.state.messages.length'
},
{
label: 'First Message Content (string)',
value: '$flow.state.messages[0].content'
},
{
label: 'Last Message Content (string)',
value: '$flow.state.messages[-1].content'
},
{
label: `Global variable (string)`,
value: '$vars.<variable-name>'
}
],
flex: 0.5,
minWidth: 200
},
{
field: 'operation',
headerName: 'Operation',
type: 'singleSelect',
valueOptions: [
'Contains',
'Not Contains',
'Start With',
'End With',
'Is',
'Is Not',
'Is Empty',
'Is Not Empty',
'Greater Than',
'Less Than',
'Equal To',
'Not Equal To',
'Greater Than or Equal To',
'Less Than or Equal To'
],
editable: true,
flex: 0.4,
minWidth: 150
},
{
field: 'value',
headerName: 'Value',
flex: 1,
editable: true
},
{
field: 'output',
headerName: 'Output Name',
editable: true,
flex: 0.3,
minWidth: 150
}
]
},
{
label: 'Condition (Code)',
name: 'conditionFunction',
type: 'code',
description: 'Function to evaluate the condition',
hint: {
label: 'How to use',
value: howToUseCode
},
hideCodeExecute: true,
codeExample: defaultFunc,
optional: true
}
]
}
]
this.outputs = [
{
label: 'Next',
name: 'next',
baseClasses: ['Agent', 'LLMNode', 'ToolNode'],
isAnchor: true
},
{
label: 'End',
name: 'end',
baseClasses: ['Agent', 'LLMNode', 'ToolNode'],
isAnchor: true
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const conditionLabel = nodeData.inputs?.conditionName as string
const conditionName = conditionLabel.toLowerCase().replace(/\s/g, '_').trim()
const output = nodeData.outputs?.output as string
const sequentialNodes = nodeData.inputs?.sequentialNode as ISeqAgentNode[]
if (!sequentialNodes || !sequentialNodes.length) throw new Error('Condition must have a predecessor!')
const startLLM = sequentialNodes[0].startLLM
const conditionalEdge = async (state: ISeqAgentsState) => await runCondition(nodeData, input, options, state)
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: conditionalEdge,
name: conditionName,
label: conditionLabel,
type: 'condition',
output,
llm: startLLM,
startLLM,
multiModalMessageContent: sequentialNodes[0]?.multiModalMessageContent,
predecessorAgents: sequentialNodes
}
return returnOutput
}
}
const runCondition = async (nodeData: INodeData, input: string, options: ICommonObject, state: ISeqAgentsState) => {
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const conditionUI = nodeData.inputs?.conditionUI as string
const conditionFunction = nodeData.inputs?.conditionFunction as string
const tabIdentifier = nodeData.inputs?.[`${TAB_IDENTIFIER}_${nodeData.id}`] as string
const selectedTab = tabIdentifier ? tabIdentifier.split(`_${nodeData.id}`)[0] : 'conditionUI'
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input,
state,
vars: prepareSandboxVars(variables)
}
if (selectedTab === 'conditionFunction' && conditionFunction) {
const vm = await getVM(appDataSource, databaseEntities, nodeData, flow)
try {
const response = await vm.run(`module.exports = async function() {${conditionFunction}}()`, __dirname)
if (typeof response !== 'string') throw new Error('Condition function must return a string')
return response
} catch (e) {
throw new Error(e)
}
} else if (selectedTab === 'conditionUI' && conditionUI) {
try {
const conditionItems: IConditionGridItem[] = typeof conditionUI === 'string' ? JSON.parse(conditionUI) : conditionUI
for (const item of conditionItems) {
if (!item.variable) throw new Error('Condition variable is required!')
if (item.variable.startsWith('$flow')) {
const variableValue = customGet(flow, item.variable.replace('$flow.', ''))
if (checkCondition(variableValue, item.operation, item.value)) {
return item.output
}
} else if (item.variable.startsWith('$vars')) {
const variableValue = customGet(flow, item.variable.replace('$', ''))
if (checkCondition(variableValue, item.operation, item.value)) {
return item.output
}
} else if (item.variable.startsWith('$')) {
const nodeId = item.variable.replace('$', '')
const messageOutputs = ((state.messages as unknown as BaseMessage[]) ?? []).filter(
(message) => message.additional_kwargs && message.additional_kwargs?.nodeId === nodeId
)
const messageOutput = messageOutputs[messageOutputs.length - 1]
if (messageOutput) {
if (checkCondition(messageOutput.content as string, item.operation, item.value)) {
return item.output
}
}
}
}
return 'End'
} catch (exception) {
throw new Error('Invalid Condition: ' + exception)
}
}
}
module.exports = { nodeClass: Condition_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-arrows-split"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M21 17h-8l-3.5 -5h-6.5" /><path d="M21 7h-8l-3.495 5" /><path d="M18 10l3 -3l-3 -3" /><path d="M18 20l3 -3l-3 -3" /></svg>

After

Width:  |  Height:  |  Size: 444 B

View File

@ -0,0 +1,556 @@
import { uniq } from 'lodash'
import { DataSource } from 'typeorm'
import { z } from 'zod'
import { BaseMessagePromptTemplateLike, ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchain/core/runnables'
import { BaseMessage } from '@langchain/core/messages'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import {
ICommonObject,
IDatabaseEntity,
INode,
INodeData,
INodeOutputsValue,
INodeParams,
ISeqAgentNode,
ISeqAgentsState
} from '../../../src/Interface'
import { getInputVariables, getVars, handleEscapeCharacters, prepareSandboxVars } from '../../../src/utils'
import {
ExtractTool,
checkCondition,
convertStructuredSchemaToZod,
customGet,
getVM,
transformObjectPropertyToFunction,
restructureMessages
} from '../commonUtils'
import { ChatGoogleGenerativeAI } from '../../chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI'
interface IConditionGridItem {
variable: string
operation: string
value: string
output: string
}
const examplePrompt = `You are an expert customer support routing system.
Your job is to detect whether a customer support representative is routing a user to the technical support team, or just responding conversationally.`
const exampleHumanPrompt = `The previous conversation is an interaction between a customer support representative and a user.
Extract whether the representative is routing the user to the technical support team, or just responding conversationally.
If representative want to route the user to the technical support team, respond only with the word "TECHNICAL".
Otherwise, respond only with the word "CONVERSATION".
Remember, only respond with one of the above words.`
const howToUseCode = `
1. Must return a string value at the end of function. For example:
\`\`\`js
if ("X" === "X") {
return "Agent"; // connect to next agent node
} else {
return "End"; // connect to end node
}
\`\`\`
2. In most cases, you would probably get the last message to do some comparison. You can get all current messages from the state: \`$flow.state.messages\`:
\`\`\`json
[
{
"content": "Hello! How can I assist you today?",
"name": "",
"additional_kwargs": {},
"response_metadata": {},
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": {}
}
]
\`\`\`
For example, to get the last message content:
\`\`\`js
const messages = $flow.state.messages;
const lastMessage = messages[messages.length - 1];
// Proceed to do something with the last message content
\`\`\`
3. If you want to use the Condition Agent's output for conditional checks, it is available as \`$flow.output\` with the following structure:
\`\`\`json
{
"content": 'Hello! How can I assist you today?',
"name": "",
"additional_kwargs": {},
"response_metadata": {},
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": {}
}
\`\`\`
For example, we can check if the agent's output contains specific keyword:
\`\`\`js
const result = $flow.output.content;
if (result.includes("some-keyword")) {
return "Agent"; // connect to next agent node
} else {
return "End"; // connect to end node
}
\`\`\`
If Structured Output is enabled, \`$flow.output\` will be in the JSON format as defined in the Structured Output configuration:
\`\`\`json
{
"foo": 'var'
}
\`\`\`
4. You can get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
5. You can get custom variables: \`$vars.<variable-name>\`
`
const defaultFunc = `const result = $flow.output.content;
if (result.includes("some-keyword")) {
return "Agent";
}
return "End";
`
const TAB_IDENTIFIER = 'selectedConditionFunctionTab'
class ConditionAgent_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
outputs: INodeOutputsValue[]
constructor() {
this.label = 'Condition Agent'
this.name = 'seqConditionAgent'
this.version = 1.0
this.type = 'ConditionAgent'
this.icon = 'condition.svg'
this.category = 'Sequential Agents'
this.description = 'Uses an agent to determine which route to take next'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Name',
name: 'conditionAgentName',
type: 'string',
placeholder: 'Condition Agent'
},
{
label: 'Start | Agent | LLM | Tool Node',
name: 'sequentialNode',
type: 'Start | Agent | LLMNode | ToolNode',
list: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel',
optional: true,
description: `Overwrite model to be used for this agent`
},
{
label: 'System Prompt',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
default: examplePrompt,
additionalParams: true,
optional: true
},
{
label: 'Human Prompt',
name: 'humanMessagePrompt',
type: 'string',
description: 'This prompt will be added at the end of the messages as human message',
rows: 4,
default: exampleHumanPrompt,
additionalParams: true,
optional: true
},
{
label: 'Format Prompt Values',
name: 'promptValues',
description: 'Assign values to the prompt variables. You can also use $flow.state.<variable-name> to get the state value',
type: 'json',
optional: true,
acceptVariable: true,
list: true,
additionalParams: true
},
{
label: 'JSON Structured Output',
name: 'conditionAgentStructuredOutput',
type: 'datagrid',
description: 'Instruct the LLM to give output in a JSON structured schema',
datagrid: [
{ field: 'key', headerName: 'Key', editable: true },
{
field: 'type',
headerName: 'Type',
type: 'singleSelect',
valueOptions: ['String', 'String Array', 'Number', 'Boolean', 'Enum'],
editable: true
},
{ field: 'enumValues', headerName: 'Enum Values', editable: true },
{ field: 'description', headerName: 'Description', flex: 1, editable: true }
],
optional: true,
additionalParams: true
},
{
label: 'Condition',
name: 'condition',
type: 'conditionFunction', // This is a custom type to show as button on the UI and render anchor points when saved
tabIdentifier: TAB_IDENTIFIER,
tabs: [
{
label: 'Condition (Table)',
name: 'conditionUI',
type: 'datagrid',
description: 'If a condition is met, the node connected to the respective output will be executed',
optional: true,
datagrid: [
{
field: 'variable',
headerName: 'Variable',
type: 'freeSolo',
editable: true,
loadMethod: ['getPreviousMessages', 'loadStateKeys'],
valueOptions: [
{
label: 'Agent Output (string)',
value: '$flow.output.content'
},
{
label: `Agent's JSON Key Output (string)`,
value: '$flow.output.<replace-with-key>'
},
{
label: 'Total Messages (number)',
value: '$flow.state.messages.length'
},
{
label: 'First Message Content (string)',
value: '$flow.state.messages[0].content'
},
{
label: 'Last Message Content (string)',
value: '$flow.state.messages[-1].content'
},
{
label: `Global variable (string)`,
value: '$vars.<variable-name>'
}
],
flex: 0.5,
minWidth: 200
},
{
field: 'operation',
headerName: 'Operation',
type: 'singleSelect',
valueOptions: [
'Contains',
'Not Contains',
'Start With',
'End With',
'Is',
'Is Not',
'Is Empty',
'Is Not Empty',
'Greater Than',
'Less Than',
'Equal To',
'Not Equal To',
'Greater Than or Equal To',
'Less Than or Equal To'
],
editable: true,
flex: 0.4,
minWidth: 150
},
{
field: 'value',
headerName: 'Value',
flex: 1,
editable: true
},
{
field: 'output',
headerName: 'Output Name',
editable: true,
flex: 0.3,
minWidth: 150
}
]
},
{
label: 'Condition (Code)',
name: 'conditionFunction',
type: 'code',
description: 'Function to evaluate the condition',
hint: {
label: 'How to use',
value: howToUseCode
},
hideCodeExecute: true,
codeExample: defaultFunc,
optional: true
}
]
}
]
this.outputs = [
{
label: 'Next',
name: 'next',
baseClasses: ['Agent', 'LLMNode', 'ToolNode'],
isAnchor: true
},
{
label: 'End',
name: 'end',
baseClasses: ['Agent', 'LLMNode', 'ToolNode'],
isAnchor: true
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const conditionLabel = nodeData.inputs?.conditionAgentName as string
const conditionName = conditionLabel.toLowerCase().replace(/\s/g, '_').trim()
const output = nodeData.outputs?.output as string
const sequentialNodes = nodeData.inputs?.sequentialNode as ISeqAgentNode[]
let agentPrompt = nodeData.inputs?.systemMessagePrompt as string
let humanPrompt = nodeData.inputs?.humanMessagePrompt as string
const promptValuesStr = nodeData.inputs?.promptValues
const conditionAgentStructuredOutput = nodeData.inputs?.conditionAgentStructuredOutput
const model = nodeData.inputs?.model as BaseChatModel
if (!sequentialNodes || !sequentialNodes.length) throw new Error('Condition Agent must have a predecessor!')
const startLLM = sequentialNodes[0].startLLM
const llm = model || startLLM
if (nodeData.inputs) nodeData.inputs.model = llm
let conditionAgentInputVariablesValues: ICommonObject = {}
if (promptValuesStr) {
try {
conditionAgentInputVariablesValues = typeof promptValuesStr === 'object' ? promptValuesStr : JSON.parse(promptValuesStr)
} catch (exception) {
throw new Error("Invalid JSON in the Condition Agent's Prompt Input Values: " + exception)
}
}
conditionAgentInputVariablesValues = handleEscapeCharacters(conditionAgentInputVariablesValues, true)
const conditionAgentInputVariables = uniq([...getInputVariables(agentPrompt), ...getInputVariables(humanPrompt)])
if (!conditionAgentInputVariables.every((element) => Object.keys(conditionAgentInputVariablesValues).includes(element))) {
throw new Error('Condition Agent input variables values are not provided!')
}
const abortControllerSignal = options.signal as AbortController
const conditionalEdge = async (state: ISeqAgentsState, config: RunnableConfig) =>
await runCondition(
conditionName,
nodeData,
input,
options,
state,
config,
llm,
agentPrompt,
humanPrompt,
conditionAgentInputVariablesValues,
conditionAgentStructuredOutput,
abortControllerSignal
)
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: conditionalEdge,
name: conditionName,
label: conditionLabel,
type: 'condition',
output,
llm,
startLLM,
multiModalMessageContent: sequentialNodes[0]?.multiModalMessageContent,
predecessorAgents: sequentialNodes
}
return returnOutput
}
}
const runCondition = async (
conditionName: string,
nodeData: INodeData,
input: string,
options: ICommonObject,
state: ISeqAgentsState,
config: RunnableConfig,
llm: BaseChatModel,
agentPrompt: string,
humanPrompt: string,
conditionAgentInputVariablesValues: ICommonObject,
conditionAgentStructuredOutput: string,
abortControllerSignal: AbortController
) => {
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const tabIdentifier = nodeData.inputs?.[`${TAB_IDENTIFIER}_${nodeData.id}`] as string
const conditionUI = nodeData.inputs?.conditionUI as string
const conditionFunction = nodeData.inputs?.conditionFunction as string
const selectedTab = tabIdentifier ? tabIdentifier.split(`_${nodeData.id}`)[0] : 'conditionUI'
const promptArrays = [new MessagesPlaceholder('messages')] as BaseMessagePromptTemplateLike[]
if (agentPrompt) promptArrays.unshift(['system', agentPrompt])
if (humanPrompt) promptArrays.push(['human', humanPrompt])
const prompt = ChatPromptTemplate.fromMessages(promptArrays)
let model
if (conditionAgentStructuredOutput && conditionAgentStructuredOutput !== '[]') {
try {
const structuredOutput = z.object(convertStructuredSchemaToZod(conditionAgentStructuredOutput))
if (llm instanceof ChatGoogleGenerativeAI) {
const tool = new ExtractTool({
schema: structuredOutput
})
// @ts-ignore
const modelWithTool = llm.bind({
tools: [tool],
signal: abortControllerSignal ? abortControllerSignal.signal : undefined
})
model = modelWithTool
} else {
// @ts-ignore
model = llm.withStructuredOutput(structuredOutput)
}
} catch (exception) {
console.error('Invalid JSON in Condition Agent Structured Output: ' + exception)
model = llm
}
} else {
model = llm
}
let chain
if (!conditionAgentInputVariablesValues || !Object.keys(conditionAgentInputVariablesValues).length) {
chain = RunnableSequence.from([prompt, model]).withConfig({
metadata: { sequentialNodeName: conditionName }
})
} else {
chain = RunnableSequence.from([
RunnablePassthrough.assign(transformObjectPropertyToFunction(conditionAgentInputVariablesValues, state)),
prompt,
model
]).withConfig({
metadata: { sequentialNodeName: conditionName }
})
}
// @ts-ignore
state.messages = restructureMessages(model, state)
let result = await chain.invoke({ ...state, signal: abortControllerSignal?.signal }, config)
result.additional_kwargs = { ...result.additional_kwargs, nodeId: nodeData.id }
if (conditionAgentStructuredOutput && conditionAgentStructuredOutput !== '[]' && result.tool_calls && result.tool_calls.length) {
let jsonResult = {}
for (const toolCall of result.tool_calls) {
jsonResult = { ...jsonResult, ...toolCall.args }
}
result = { ...jsonResult, additional_kwargs: { nodeId: nodeData.id } }
}
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input,
state,
output: result,
vars: prepareSandboxVars(variables)
}
if (selectedTab === 'conditionFunction' && conditionFunction) {
const vm = await getVM(appDataSource, databaseEntities, nodeData, flow)
try {
const response = await vm.run(`module.exports = async function() {${conditionFunction}}()`, __dirname)
if (typeof response !== 'string') throw new Error('Condition function must return a string')
return response
} catch (e) {
throw new Error(e)
}
} else if (selectedTab === 'conditionUI' && conditionUI) {
try {
const conditionItems: IConditionGridItem[] = typeof conditionUI === 'string' ? JSON.parse(conditionUI) : conditionUI
for (const item of conditionItems) {
if (!item.variable) throw new Error('Condition variable is required!')
if (item.variable.startsWith('$flow')) {
const variableValue = customGet(flow, item.variable.replace('$flow.', ''))
if (checkCondition(variableValue, item.operation, item.value)) {
return item.output
}
} else if (item.variable.startsWith('$vars')) {
const variableValue = customGet(flow, item.variable.replace('$', ''))
if (checkCondition(variableValue, item.operation, item.value)) {
return item.output
}
} else if (item.variable.startsWith('$')) {
const nodeId = item.variable.replace('$', '')
const messageOutputs = ((state.messages as unknown as BaseMessage[]) ?? []).filter(
(message) => message.additional_kwargs && message.additional_kwargs?.nodeId === nodeId
)
const messageOutput = messageOutputs[messageOutputs.length - 1]
if (messageOutput) {
if (checkCondition(messageOutput.content as string, item.operation, item.value)) {
return item.output
}
}
}
}
return 'End'
} catch (exception) {
throw new Error('Invalid Condition: ' + exception)
}
}
}
module.exports = { nodeClass: ConditionAgent_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-arrows-split"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M21 17h-8l-3.5 -5h-6.5" /><path d="M21 7h-8l-3.495 5" /><path d="M18 10l3 -3l-3 -3" /><path d="M18 20l3 -3l-3 -3" /></svg>

After

Width:  |  Height:  |  Size: 444 B

View File

@ -0,0 +1,54 @@
import { END } from '@langchain/langgraph'
import { INode, INodeData, INodeParams, ISeqAgentNode } from '../../../src/Interface'
class End_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
hideOutput: boolean
constructor() {
this.label = 'End'
this.name = 'seqEnd'
this.version = 1.0
this.type = 'End'
this.icon = 'end.svg'
this.category = 'Sequential Agents'
this.description = 'End conversation'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Start | Agent | LLM | Tool Node',
name: 'sequentialNode',
type: 'Start | Agent | LLMNode | ToolNode'
}
]
this.hideOutput = true
}
async init(nodeData: INodeData): Promise<any> {
const sequentialNode = nodeData.inputs?.sequentialNode as ISeqAgentNode
if (!sequentialNode) throw new Error('End must have a predecessor!')
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: END,
name: END,
label: END,
type: 'end',
output: END,
predecessorAgents: [sequentialNode]
}
return returnOutput
}
}
module.exports = { nodeClass: End_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-player-stop"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M5 5m0 2a2 2 0 0 1 2 -2h10a2 2 0 0 1 2 2v10a2 2 0 0 1 -2 2h-10a2 2 0 0 1 -2 -2z" /></svg>

After

Width:  |  Height:  |  Size: 410 B

View File

@ -0,0 +1,605 @@
import { flatten, uniq } from 'lodash'
import { DataSource } from 'typeorm'
import { z } from 'zod'
import { RunnableSequence, RunnablePassthrough, RunnableConfig } from '@langchain/core/runnables'
import { ChatPromptTemplate, MessagesPlaceholder, HumanMessagePromptTemplate, BaseMessagePromptTemplateLike } from '@langchain/core/prompts'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { AIMessage, AIMessageChunk } from '@langchain/core/messages'
import {
INode,
INodeData,
INodeParams,
ISeqAgentsState,
ICommonObject,
MessageContentImageUrl,
INodeOutputsValue,
ISeqAgentNode,
IDatabaseEntity
} from '../../../src/Interface'
import { AgentExecutor } from '../../../src/agents'
import { getInputVariables, getVars, handleEscapeCharacters, prepareSandboxVars } from '../../../src/utils'
import {
ExtractTool,
convertStructuredSchemaToZod,
customGet,
getVM,
processImageMessage,
transformObjectPropertyToFunction,
restructureMessages
} from '../commonUtils'
import { ChatGoogleGenerativeAI } from '../../chatmodels/ChatGoogleGenerativeAI/FlowiseChatGoogleGenerativeAI'
const TAB_IDENTIFIER = 'selectedUpdateStateMemoryTab'
const customOutputFuncDesc = `This is only applicable when you have a custom State at the START node. After agent execution, you might want to update the State values`
const howToUseCode = `
1. Return the key value JSON object. For example: if you have the following State:
\`\`\`json
{
"user": null
}
\`\`\`
You can update the "user" value by returning the following:
\`\`\`js
return {
"user": "john doe"
}
\`\`\`
2. If you want to use the LLM Node's output as the value to update state, it is available as \`$flow.output\` with the following structure:
\`\`\`json
{
"content": 'Hello! How can I assist you today?',
"name": "",
"additional_kwargs": {},
"response_metadata": {},
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": {}
}
\`\`\`
For example, if the output \`content\` is the value you want to update the state with, you can return the following:
\`\`\`js
return {
"user": $flow.output.content
}
\`\`\`
3. You can also get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const howToUse = `
1. Key and value pair to be updated. For example: if you have the following State:
| Key | Operation | Default Value |
|-----------|---------------|-------------------|
| user | Replace | |
You can update the "user" value with the following:
| Key | Value |
|-----------|-----------|
| user | john doe |
2. If you want to use the agent's output as the value to update state, it is available as available as \`$flow.output\` with the following structure:
\`\`\`json
{
"content": 'Hello! How can I assist you today?',
"name": "",
"additional_kwargs": {},
"response_metadata": {},
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": {}
}
\`\`\`
For example, if the output \`content\` is the value you want to update the state with, you can do the following:
| Key | Value |
|-----------|---------------------------|
| user | \`$flow.output.content\` |
3. You can get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const defaultFunc = `const result = $flow.output;
/* Suppose we have a custom State schema like this:
* {
aggregate: {
value: (x, y) => x.concat(y),
default: () => []
}
}
*/
return {
aggregate: [result.content]
};`
class LLMNode_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
inputs?: INodeParams[]
badge?: string
outputs: INodeOutputsValue[]
constructor() {
this.label = 'LLM Node'
this.name = 'seqLLMNode'
this.version = 1.0
this.type = 'LLMNode'
this.icon = 'llmNode.svg'
this.category = 'Sequential Agents'
this.description = 'Run Chat Model and return the output'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Name',
name: 'llmNodeName',
type: 'string',
placeholder: 'LLM'
},
{
label: 'System Prompt',
name: 'systemMessagePrompt',
type: 'string',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Human Prompt',
name: 'humanMessagePrompt',
type: 'string',
description: 'This prompt will be added at the end of the messages as human message',
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Start | Agent | LLM | Tool Node',
name: 'sequentialNode',
type: 'Start | Agent | LLMNode | ToolNode',
list: true
},
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel',
optional: true,
description: `Overwrite model to be used for this node`
},
{
label: 'Format Prompt Values',
name: 'promptValues',
description: 'Assign values to the prompt variables. You can also use $flow.state.<variable-name> to get the state value',
type: 'json',
optional: true,
acceptVariable: true,
list: true,
additionalParams: true
},
{
label: 'JSON Structured Output',
name: 'llmStructuredOutput',
type: 'datagrid',
description: 'Instruct the LLM to give output in a JSON structured schema',
datagrid: [
{ field: 'key', headerName: 'Key', editable: true },
{
field: 'type',
headerName: 'Type',
type: 'singleSelect',
valueOptions: ['String', 'String Array', 'Number', 'Boolean', 'Enum'],
editable: true
},
{ field: 'enumValues', headerName: 'Enum Values', editable: true },
{ field: 'description', headerName: 'Description', flex: 1, editable: true }
],
optional: true,
additionalParams: true
},
{
label: 'Update State',
name: 'updateStateMemory',
type: 'tabs',
tabIdentifier: TAB_IDENTIFIER,
default: 'updateStateMemoryUI',
additionalParams: true,
tabs: [
{
label: 'Update State (Table)',
name: 'updateStateMemoryUI',
type: 'datagrid',
hint: {
label: 'How to use',
value: howToUse
},
description: customOutputFuncDesc,
datagrid: [
{
field: 'key',
headerName: 'Key',
type: 'asyncSingleSelect',
loadMethod: 'loadStateKeys',
flex: 0.5,
editable: true
},
{
field: 'value',
headerName: 'Value',
type: 'freeSolo',
valueOptions: [
{
label: 'LLM Node Output (string)',
value: '$flow.output.content'
},
{
label: `LLM JSON Output Key (string)`,
value: '$flow.output.<replace-with-key>'
},
{
label: `Global variable (string)`,
value: '$vars.<variable-name>'
},
{
label: 'Input Question (string)',
value: '$flow.input'
},
{
label: 'Session Id (string)',
value: '$flow.sessionId'
},
{
label: 'Chat Id (string)',
value: '$flow.chatId'
},
{
label: 'Chatflow Id (string)',
value: '$flow.chatflowId'
}
],
editable: true,
flex: 1
}
],
optional: true,
additionalParams: true
},
{
label: 'Update State (Code)',
name: 'updateStateMemoryCode',
type: 'code',
hint: {
label: 'How to use',
value: howToUseCode
},
description: `${customOutputFuncDesc}. Must return an object representing the state`,
hideCodeExecute: true,
codeExample: defaultFunc,
optional: true,
additionalParams: true
}
]
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
// Tools can be connected through ToolNodes
let tools = nodeData.inputs?.tools
tools = flatten(tools)
let systemPrompt = nodeData.inputs?.systemMessagePrompt as string
let humanPrompt = nodeData.inputs?.humanMessagePrompt as string
const llmNodeLabel = nodeData.inputs?.llmNodeName as string
const sequentialNodes = nodeData.inputs?.sequentialNode as ISeqAgentNode[]
const model = nodeData.inputs?.model as BaseChatModel
const promptValuesStr = nodeData.inputs?.promptValues
const output = nodeData.outputs?.output as string
const llmStructuredOutput = nodeData.inputs?.llmStructuredOutput
if (!llmNodeLabel) throw new Error('LLM Node name is required!')
const llmNodeName = llmNodeLabel.toLowerCase().replace(/\s/g, '_').trim()
if (!sequentialNodes || !sequentialNodes.length) throw new Error('Agent must have a predecessor!')
let llmNodeInputVariablesValues: ICommonObject = {}
if (promptValuesStr) {
try {
llmNodeInputVariablesValues = typeof promptValuesStr === 'object' ? promptValuesStr : JSON.parse(promptValuesStr)
} catch (exception) {
throw new Error("Invalid JSON in the LLM Node's Prompt Input Values: " + exception)
}
}
llmNodeInputVariablesValues = handleEscapeCharacters(llmNodeInputVariablesValues, true)
const startLLM = sequentialNodes[0].startLLM
const llm = model || startLLM
if (nodeData.inputs) nodeData.inputs.model = llm
const multiModalMessageContent = sequentialNodes[0]?.multiModalMessageContent || (await processImageMessage(llm, nodeData, options))
const abortControllerSignal = options.signal as AbortController
const llmNodeInputVariables = uniq([...getInputVariables(systemPrompt), ...getInputVariables(humanPrompt)])
if (!llmNodeInputVariables.every((element) => Object.keys(llmNodeInputVariablesValues).includes(element))) {
throw new Error('LLM Node input variables values are not provided!')
}
const workerNode = async (state: ISeqAgentsState, config: RunnableConfig) => {
const bindModel = config.configurable?.bindModel?.[nodeData.id]
return await agentNode(
{
state,
llm,
agent: await createAgent(
llmNodeName,
state,
bindModel || llm,
[...tools],
systemPrompt,
humanPrompt,
multiModalMessageContent,
llmNodeInputVariablesValues,
llmStructuredOutput
),
name: llmNodeName,
abortControllerSignal,
nodeData,
input,
options
},
config
)
}
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: workerNode,
name: llmNodeName,
label: llmNodeLabel,
type: 'llm',
llm,
startLLM,
output,
predecessorAgents: sequentialNodes,
multiModalMessageContent,
moderations: sequentialNodes[0]?.moderations
}
return returnOutput
}
}
async function createAgent(
llmNodeName: string,
state: ISeqAgentsState,
llm: BaseChatModel,
tools: any[],
systemPrompt: string,
humanPrompt: string,
multiModalMessageContent: MessageContentImageUrl[],
llmNodeInputVariablesValues: ICommonObject,
llmStructuredOutput: string
): Promise<AgentExecutor | RunnableSequence> {
if (tools.length) {
if (llm.bindTools === undefined) {
throw new Error(`LLM Node only compatible with function calling models.`)
}
// @ts-ignore
llm = llm.bindTools(tools)
}
if (llmStructuredOutput && llmStructuredOutput !== '[]') {
try {
const structuredOutput = z.object(convertStructuredSchemaToZod(llmStructuredOutput))
if (llm instanceof ChatGoogleGenerativeAI) {
const tool = new ExtractTool({
schema: structuredOutput
})
// @ts-ignore
const modelWithTool = llm.bind({
tools: [tool]
}) as any
llm = modelWithTool
} else {
// @ts-ignore
llm = llm.withStructuredOutput(structuredOutput)
}
} catch (exception) {
console.error(exception)
}
}
const promptArrays = [new MessagesPlaceholder('messages')] as BaseMessagePromptTemplateLike[]
if (systemPrompt) promptArrays.unshift(['system', systemPrompt])
if (humanPrompt) promptArrays.push(['human', humanPrompt])
const prompt = ChatPromptTemplate.fromMessages(promptArrays)
if (multiModalMessageContent.length) {
const msg = HumanMessagePromptTemplate.fromTemplate([...multiModalMessageContent])
prompt.promptMessages.splice(1, 0, msg)
}
let chain
if (!llmNodeInputVariablesValues || !Object.keys(llmNodeInputVariablesValues).length) {
chain = RunnableSequence.from([prompt, llm]).withConfig({
metadata: { sequentialNodeName: llmNodeName }
})
} else {
chain = RunnableSequence.from([
RunnablePassthrough.assign(transformObjectPropertyToFunction(llmNodeInputVariablesValues, state)),
prompt,
llm
]).withConfig({
metadata: { sequentialNodeName: llmNodeName }
})
}
// @ts-ignore
return chain
}
async function agentNode(
{
state,
llm,
agent,
name,
abortControllerSignal,
nodeData,
input,
options
}: {
state: ISeqAgentsState
llm: BaseChatModel
agent: AgentExecutor | RunnableSequence
name: string
abortControllerSignal: AbortController
nodeData: INodeData
input: string
options: ICommonObject
},
config: RunnableConfig
) {
try {
if (abortControllerSignal.signal.aborted) {
throw new Error('Aborted!')
}
// @ts-ignore
state.messages = restructureMessages(llm, state)
let result: AIMessageChunk | ICommonObject = await agent.invoke({ ...state, signal: abortControllerSignal.signal }, config)
const llmStructuredOutput = nodeData.inputs?.llmStructuredOutput
if (llmStructuredOutput && llmStructuredOutput !== '[]' && result.tool_calls && result.tool_calls.length) {
let jsonResult = {}
for (const toolCall of result.tool_calls) {
jsonResult = { ...jsonResult, ...toolCall.args }
}
result = { ...jsonResult, additional_kwargs: { nodeId: nodeData.id } }
}
if (nodeData.inputs?.updateStateMemoryUI || nodeData.inputs?.updateStateMemoryCode) {
const returnedOutput = await getReturnOutput(nodeData, input, options, result, state)
if (nodeData.inputs?.llmStructuredOutput && nodeData.inputs.llmStructuredOutput !== '[]') {
const messages = [
new AIMessage({
content: typeof result === 'object' ? JSON.stringify(result) : result,
name,
additional_kwargs: { nodeId: nodeData.id }
})
]
return {
...returnedOutput,
messages
}
} else {
result.name = name
result.additional_kwargs = { ...result.additional_kwargs, nodeId: nodeData.id }
return {
...returnedOutput,
messages: [result]
}
}
} else {
if (nodeData.inputs?.llmStructuredOutput && nodeData.inputs.llmStructuredOutput !== '[]') {
const messages = [
new AIMessage({
content: typeof result === 'object' ? JSON.stringify(result) : result,
name,
additional_kwargs: { nodeId: nodeData.id }
})
]
return {
messages
}
} else {
result.name = name
result.additional_kwargs = { ...result.additional_kwargs, nodeId: nodeData.id }
return {
messages: [result]
}
}
}
} catch (error) {
throw new Error(error)
}
}
const getReturnOutput = async (nodeData: INodeData, input: string, options: ICommonObject, output: any, state: ISeqAgentsState) => {
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const tabIdentifier = nodeData.inputs?.[`${TAB_IDENTIFIER}_${nodeData.id}`] as string
const updateStateMemoryUI = nodeData.inputs?.updateStateMemoryUI as string
const updateStateMemoryCode = nodeData.inputs?.updateStateMemoryCode as string
const selectedTab = tabIdentifier ? tabIdentifier.split(`_${nodeData.id}`)[0] : 'updateStateMemoryUI'
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input,
output,
state,
vars: prepareSandboxVars(variables)
}
if (selectedTab === 'updateStateMemoryUI' && updateStateMemoryUI) {
try {
const parsedSchema = typeof updateStateMemoryUI === 'string' ? JSON.parse(updateStateMemoryUI) : updateStateMemoryUI
const obj: ICommonObject = {}
for (const sch of parsedSchema) {
const key = sch.key
if (!key) throw new Error(`Key is required`)
let value = sch.value as string
if (value.startsWith('$flow')) {
value = customGet(flow, sch.value.replace('$flow.', ''))
} else if (value.startsWith('$vars')) {
value = customGet(flow, sch.value.replace('$', ''))
}
obj[key] = value
}
return obj
} catch (e) {
throw new Error(e)
}
} else if (selectedTab === 'updateStateMemoryCode' && updateStateMemoryCode) {
const vm = await getVM(appDataSource, databaseEntities, nodeData, flow)
try {
const response = await vm.run(`module.exports = async function() {${updateStateMemoryCode}}()`, __dirname)
if (typeof response !== 'object') throw new Error('Return output must be an object')
return response
} catch (e) {
throw new Error(e)
}
}
}
module.exports = { nodeClass: LLMNode_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-language"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M4 5h7" /><path d="M9 3v2c0 4.418 -2.239 8 -5 8" /><path d="M5 9c0 2.144 2.952 3.908 6.7 4" /><path d="M12 20l4 -9l4 9" /><path d="M19.1 18h-6.2" /></svg>

After

Width:  |  Height:  |  Size: 472 B

View File

@ -0,0 +1,66 @@
import { INode, INodeData, INodeParams, ISeqAgentNode } from '../../../src/Interface'
class Loop_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
hideOutput: boolean
constructor() {
this.label = 'Loop'
this.name = 'seqLoop'
this.version = 1.0
this.type = 'Loop'
this.icon = 'loop.svg'
this.category = 'Sequential Agents'
this.description = 'Loop back to the specific sequential node'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Start | Agent | LLM | Tool Node',
name: 'sequentialNode',
type: 'Start | Agent | LLMNode | ToolNode',
list: true
},
{
label: 'Loop To',
name: 'loopToName',
description: 'Name of the agent to loop back to',
type: 'string',
placeholder: 'agent1'
}
]
this.hideOutput = true
}
async init(nodeData: INodeData): Promise<any> {
const sequentialNodes = nodeData.inputs?.sequentialNode as ISeqAgentNode[]
const loopToNameLabel = nodeData.inputs?.loopToName as string
if (!sequentialNodes || !sequentialNodes.length) throw new Error('Loop must have a predecessor!')
if (!loopToNameLabel) throw new Error('Loop to name is required')
const loopToName = loopToNameLabel.toLowerCase().replace(/\s/g, '_').trim()
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: loopToName,
name: loopToName,
label: loopToNameLabel,
type: 'agent',
predecessorAgents: sequentialNodes,
output: loopToName
}
return returnOutput
}
}
module.exports = { nodeClass: Loop_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-arrow-back-up-double"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M13 14l-4 -4l4 -4" /><path d="M8 14l-4 -4l4 -4" /><path d="M9 10h7a4 4 0 1 1 0 8h-1" /></svg>

After

Width:  |  Height:  |  Size: 423 B

View File

@ -0,0 +1,81 @@
import { START } from '@langchain/langgraph'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { INode, INodeData, INodeParams, ISeqAgentNode } from '../../../src/Interface'
import { Moderation } from '../../moderation/Moderation'
class Start_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Start'
this.name = 'seqStart'
this.version = 1.0
this.type = 'Start'
this.icon = 'start.svg'
this.category = 'Sequential Agents'
this.description = 'Starting point of the conversation'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Chat Model',
name: 'model',
type: 'BaseChatModel',
description: `Only compatible with models that are capable of function calling: ChatOpenAI, ChatMistral, ChatAnthropic, ChatGoogleGenerativeAI, ChatVertexAI, GroqChat`
},
{
label: 'Agent Memory',
name: 'agentMemory',
type: 'BaseCheckpointSaver',
description: 'Save the state of the agent',
optional: true
},
{
label: 'State',
name: 'state',
type: 'State',
description:
'State is an object that is updated by nodes in the graph, passing from one node to another. By default, state contains "messages" that got updated with each message sent and received.',
optional: true
},
{
label: 'Input Moderation',
description: 'Detect text that could generate harmful output and prevent it from being sent to the language model',
name: 'inputModeration',
type: 'Moderation',
optional: true,
list: true
}
]
}
async init(nodeData: INodeData): Promise<any> {
const moderations = (nodeData.inputs?.inputModeration as Moderation[]) ?? []
const model = nodeData.inputs?.model as BaseChatModel
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: START,
name: START,
label: START,
type: 'start',
output: START,
llm: model,
startLLM: model,
moderations,
checkpointMemory: nodeData.inputs?.agentMemory
}
return returnOutput
}
}
module.exports = { nodeClass: Start_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-player-play"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M7 4v16l13 -8z" /></svg>

After

Width:  |  Height:  |  Size: 345 B

View File

@ -0,0 +1,199 @@
import { START } from '@langchain/langgraph'
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeParams, ISeqAgentNode } from '../../../src/Interface'
import { availableDependencies, defaultAllowBuiltInDep, getVars, prepareSandboxVars } from '../../../src/utils'
import { NodeVM } from 'vm2'
import { DataSource } from 'typeorm'
const defaultFunc = `{
aggregate: {
value: (x, y) => x.concat(y), // here we append the new message to the existing messages
default: () => []
}
}`
const howToUse = `
Specify the Key, Operation Type, and Default Value for the state object. The Operation Type can be either "Replace" or "Append".
**Replace**
- Replace the existing value with the new value.
- If the new value is null, the existing value will be retained.
**Append**
- Append the new value to the existing value.
- Default value can be empty or an array. Ex: ["a", "b"]
- Final value is an array.
`
const TAB_IDENTIFIER = 'selectedStateTab'
class State_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'State'
this.name = 'seqState'
this.version = 1.0
this.type = 'State'
this.icon = 'state.svg'
this.category = 'Sequential Agents'
this.description = 'A centralized state object, updated by nodes in the graph, passing from one node to another'
this.baseClasses = [this.type]
this.inputs = [
{
label: 'State',
name: 'stateMemory',
type: 'tabs',
tabIdentifier: TAB_IDENTIFIER,
additionalParams: true,
default: 'stateMemoryUI',
tabs: [
{
label: 'State (Table)',
name: 'stateMemoryUI',
type: 'datagrid',
description:
'Structure for state. By default, state contains "messages" that got updated with each message sent and received.',
hint: {
label: 'How to use',
value: howToUse
},
datagrid: [
{ field: 'key', headerName: 'Key', editable: true },
{
field: 'type',
headerName: 'Operation',
type: 'singleSelect',
valueOptions: ['Replace', 'Append'],
editable: true
},
{ field: 'defaultValue', headerName: 'Default Value', flex: 1, editable: true }
],
optional: true,
additionalParams: true
},
{
label: 'State (Code)',
name: 'stateMemoryCode',
type: 'code',
description: `JSON object representing the state`,
hideCodeExecute: true,
codeExample: defaultFunc,
optional: true,
additionalParams: true
}
]
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const tabIdentifier = nodeData.inputs?.[`${TAB_IDENTIFIER}_${nodeData.id}`] as string
const stateMemoryUI = nodeData.inputs?.stateMemoryUI as string
const stateMemoryCode = nodeData.inputs?.stateMemoryCode as string
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const selectedTab = tabIdentifier ? tabIdentifier.split(`_${nodeData.id}`)[0] : 'stateMemoryUI'
if (!stateMemoryUI && !stateMemoryCode) {
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: {},
name: 'state',
label: 'state',
type: 'state',
output: START
}
return returnOutput
}
if (selectedTab === 'stateMemoryUI' && stateMemoryUI) {
try {
const parsedSchema = typeof stateMemoryUI === 'string' ? JSON.parse(stateMemoryUI) : stateMemoryUI
const obj: ICommonObject = {}
for (const sch of parsedSchema) {
const key = sch.key
if (!key) throw new Error(`Key is required`)
const type = sch.type
const defaultValue = sch.defaultValue
if (type === 'Append') {
obj[key] = {
value: (x: any, y: any) => (Array.isArray(y) ? x.concat(y) : x.concat([y])),
default: () => (defaultValue ? JSON.parse(defaultValue) : [])
}
} else {
obj[key] = {
value: (x: any, y: any) => y ?? x,
default: () => defaultValue
}
}
}
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: obj,
name: 'state',
label: 'state',
type: 'state',
output: START
}
return returnOutput
} catch (e) {
throw new Error(e)
}
} else if (selectedTab === 'stateMemoryCode' && stateMemoryCode) {
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input
}
let sandbox: any = {}
sandbox['$vars'] = prepareSandboxVars(variables)
sandbox['$flow'] = flow
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
: defaultAllowBuiltInDep
const externalDeps = process.env.TOOL_FUNCTION_EXTERNAL_DEP ? process.env.TOOL_FUNCTION_EXTERNAL_DEP.split(',') : []
const deps = availableDependencies.concat(externalDeps)
const nodeVMOptions = {
console: 'inherit',
sandbox,
require: {
external: { modules: deps },
builtin: builtinDeps
}
} as any
const vm = new NodeVM(nodeVMOptions)
try {
const response = await vm.run(`module.exports = async function() {return ${stateMemoryCode}}()`, __dirname)
if (typeof response !== 'object') throw new Error('State must be an object')
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: response,
name: 'state',
label: 'state',
type: 'state',
output: START
}
return returnOutput
} catch (e) {
throw new Error(e)
}
}
}
}
module.exports = { nodeClass: State_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-device-sd-card"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M7 21h10a2 2 0 0 0 2 -2v-14a2 2 0 0 0 -2 -2h-6.172a2 2 0 0 0 -1.414 .586l-3.828 3.828a2 2 0 0 0 -.586 1.414v10.172a2 2 0 0 0 2 2z" /><path d="M13 6v2" /><path d="M16 6v2" /><path d="M10 7v1" /></svg>

After

Width:  |  Height:  |  Size: 523 B

View File

@ -0,0 +1,498 @@
import { flatten } from 'lodash'
import { ICommonObject, IDatabaseEntity, INode, INodeData, INodeParams, ISeqAgentNode, IUsedTool } from '../../../src/Interface'
import { AIMessage, AIMessageChunk, BaseMessage, ToolMessage } from '@langchain/core/messages'
import { StructuredTool } from '@langchain/core/tools'
import { RunnableConfig } from '@langchain/core/runnables'
import { SOURCE_DOCUMENTS_PREFIX } from '../../../src/agents'
import { Document } from '@langchain/core/documents'
import { DataSource } from 'typeorm'
import { MessagesState, RunnableCallable, customGet, getVM } from '../commonUtils'
import { getVars, prepareSandboxVars } from '../../../src/utils'
import { ChatPromptTemplate } from '@langchain/core/prompts'
const defaultApprovalPrompt = `You are about to execute tool: {tools}. Ask if user want to proceed`
const customOutputFuncDesc = `This is only applicable when you have a custom State at the START node. After tool execution, you might want to update the State values`
const howToUseCode = `
1. Return the key value JSON object. For example: if you have the following State:
\`\`\`json
{
"user": null
}
\`\`\`
You can update the "user" value by returning the following:
\`\`\`js
return {
"user": "john doe"
}
\`\`\`
2. If you want to use the tool's output as the value to update state, it is available as \`$flow.output\` with the following structure (array):
\`\`\`json
[
{
"tool": "tool's name",
"toolInput": {},
"toolOutput": "tool's output content",
"sourceDocuments": [
{
"pageContent": "This is the page content",
"metadata": "{foo: var}",
}
],
}
]
\`\`\`
For example:
\`\`\`js
/* Assuming you have the following state:
{
"sources": null
}
*/
return {
"sources": $flow.output[0].sourceDocuments
}
\`\`\`
3. You can also get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const howToUse = `
1. Key and value pair to be updated. For example: if you have the following State:
| Key | Operation | Default Value |
|-----------|---------------|-------------------|
| user | Replace | |
You can update the "user" value with the following:
| Key | Value |
|-----------|-----------|
| user | john doe |
2. If you want to use the agent's output as the value to update state, it is available as available as \`$flow.output\` with the following structure (array):
\`\`\`json
[
{
"content": "Hello! How can I assist you today?",
"sourceDocuments": [
{
"pageContent": "This is the page content",
"metadata": "{foo: var}",
}
],
}
]
\`\`\`
For example:
| Key | Value |
|--------------|-------------------------------------------|
| sources | \`$flow.output[0].sourceDocuments\` |
3. You can get default flow config, including the current "state":
- \`$flow.sessionId\`
- \`$flow.chatId\`
- \`$flow.chatflowId\`
- \`$flow.input\`
- \`$flow.state\`
4. You can get custom variables: \`$vars.<variable-name>\`
`
const defaultFunc = `const result = $flow.output;
/* Suppose we have a custom State schema like this:
* {
aggregate: {
value: (x, y) => x.concat(y),
default: () => []
}
}
*/
return {
aggregate: [result.content]
};`
const TAB_IDENTIFIER = 'selectedUpdateStateMemoryTab'
class ToolNode_SeqAgents implements INode {
label: string
name: string
version: number
description: string
type: string
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
this.label = 'Tool Node'
this.name = 'seqToolNode'
this.version = 1.0
this.type = 'ToolNode'
this.icon = 'toolNode.svg'
this.category = 'Sequential Agents'
this.description = `Execute tool and return tool's output`
this.baseClasses = [this.type]
this.inputs = [
{
label: 'Tools',
name: 'tools',
type: 'Tool',
list: true,
optional: true
},
{
label: 'LLM Node',
name: 'llmNode',
type: 'LLMNode'
},
{
label: 'Name',
name: 'toolNodeName',
type: 'string',
placeholder: 'Tool'
},
{
label: 'Require Approval',
name: 'interrupt',
description: 'Require approval before executing tools',
type: 'boolean',
optional: true
},
{
label: 'Approval Prompt',
name: 'approvalPrompt',
description: 'Prompt for approval. Only applicable if "Require Approval" is enabled',
type: 'string',
default: defaultApprovalPrompt,
rows: 4,
optional: true,
additionalParams: true
},
{
label: 'Approve Button Text',
name: 'approveButtonText',
description: 'Text for approve button. Only applicable if "Require Approval" is enabled',
type: 'string',
default: 'Yes',
optional: true,
additionalParams: true
},
{
label: 'Reject Button Text',
name: 'rejectButtonText',
description: 'Text for reject button. Only applicable if "Require Approval" is enabled',
type: 'string',
default: 'No',
optional: true,
additionalParams: true
},
{
label: 'Update State',
name: 'updateStateMemory',
type: 'tabs',
tabIdentifier: TAB_IDENTIFIER,
additionalParams: true,
default: 'updateStateMemoryUI',
tabs: [
{
label: 'Update State (Table)',
name: 'updateStateMemoryUI',
type: 'datagrid',
hint: {
label: 'How to use',
value: howToUse
},
description: customOutputFuncDesc,
datagrid: [
{
field: 'key',
headerName: 'Key',
type: 'asyncSingleSelect',
loadMethod: 'loadStateKeys',
flex: 0.5,
editable: true
},
{
field: 'value',
headerName: 'Value',
type: 'freeSolo',
valueOptions: [
{
label: 'All Tools Output (array)',
value: '$flow.output'
},
{
label: 'First Tool Output (string)',
value: '$flow.output[0].toolOutput'
},
{
label: 'First Tool Input Arguments (string | json)',
value: '$flow.output[0].toolInput'
},
{
label: `First Tool Returned Source Documents (array)`,
value: '$flow.output[0].sourceDocuments'
},
{
label: `Global variable (string)`,
value: '$vars.<variable-name>'
},
{
label: 'Input Question (string)',
value: '$flow.input'
},
{
label: 'Session Id (string)',
value: '$flow.sessionId'
},
{
label: 'Chat Id (string)',
value: '$flow.chatId'
},
{
label: 'Chatflow Id (string)',
value: '$flow.chatflowId'
}
],
editable: true,
flex: 1
}
],
optional: true,
additionalParams: true
},
{
label: 'Update State (Code)',
name: 'updateStateMemoryCode',
type: 'code',
hint: {
label: 'How to use',
value: howToUseCode
},
description: `${customOutputFuncDesc}. Must return an object representing the state`,
hideCodeExecute: true,
codeExample: defaultFunc,
optional: true,
additionalParams: true
}
]
}
]
}
async init(nodeData: INodeData, input: string, options: ICommonObject): Promise<any> {
const toolNodeLabel = nodeData.inputs?.toolNodeName as string
const llmNode = nodeData.inputs?.llmNode as ISeqAgentNode
if (!llmNode) throw new Error('Tool node must have a predecessor!')
const interrupt = nodeData.inputs?.interrupt as boolean
const approvalPrompt = nodeData.inputs?.approvalPrompt as string
const approveButtonText = nodeData.inputs?.approveButtonText as string
const rejectButtonText = nodeData.inputs?.rejectButtonText as string
let tools = nodeData.inputs?.tools
tools = flatten(tools)
if (!tools || !tools.length) throw new Error('Tools must not be empty')
const output = nodeData.outputs?.output as string
if (!toolNodeLabel) throw new Error('Tool node name is required!')
const toolNodeLabelName = toolNodeLabel.toLowerCase().replace(/\s/g, '_').trim()
const toolNode = new ToolNode(tools, nodeData, input, options, toolNodeLabelName, [], { sequentialNodeName: toolNodeLabelName })
;(toolNode as any).interrupt = interrupt
if (interrupt && approvalPrompt && approveButtonText && rejectButtonText) {
;(toolNode as any).seekPermissionMessage = async (usedTools: IUsedTool[]) => {
const prompt = ChatPromptTemplate.fromMessages([['human', approvalPrompt || defaultApprovalPrompt]])
const chain = prompt.pipe(llmNode.startLLM)
const response = (await chain.invoke({
input: 'Hello there!',
tools: JSON.stringify(usedTools)
})) as AIMessageChunk
return response.content
}
}
const returnOutput: ISeqAgentNode = {
id: nodeData.id,
node: toolNode,
name: toolNodeLabelName,
label: toolNodeLabel,
type: 'tool',
output,
predecessorAgents: [llmNode],
llm: llmNode.llm,
startLLM: llmNode.startLLM,
moderations: llmNode.moderations,
multiModalMessageContent: llmNode.multiModalMessageContent
}
return returnOutput
}
}
class ToolNode<T extends BaseMessage[] | MessagesState> extends RunnableCallable<T, T> {
tools: StructuredTool[]
nodeData: INodeData
inputQuery: string
options: ICommonObject
constructor(
tools: StructuredTool[],
nodeData: INodeData,
inputQuery: string,
options: ICommonObject,
name: string = 'tools',
tags: string[] = [],
metadata: ICommonObject = {}
) {
super({ name, metadata, tags, func: (input, config) => this.run(input, config) })
this.tools = tools
this.nodeData = nodeData
this.inputQuery = inputQuery
this.options = options
}
private async run(input: BaseMessage[] | MessagesState, config: RunnableConfig): Promise<BaseMessage[] | MessagesState> {
const message = Array.isArray(input) ? input[input.length - 1] : input.messages[input.messages.length - 1]
if (message._getType() !== 'ai') {
throw new Error('ToolNode only accepts AIMessages as input.')
}
const outputs = await Promise.all(
(message as AIMessage).tool_calls?.map(async (call) => {
const tool = this.tools.find((tool) => tool.name === call.name)
if (tool === undefined) {
throw new Error(`Tool ${call.name} not found.`)
}
let output = await tool.invoke(call.args, config)
let sourceDocuments: Document[] = []
if (output?.includes(SOURCE_DOCUMENTS_PREFIX)) {
const outputArray = output.split(SOURCE_DOCUMENTS_PREFIX)
output = outputArray[0]
const docs = outputArray[1]
try {
sourceDocuments = JSON.parse(docs)
} catch (e) {
console.error('Error parsing source documents from tool')
}
}
return new ToolMessage({
name: tool.name,
content: typeof output === 'string' ? output : JSON.stringify(output),
tool_call_id: call.id!,
additional_kwargs: {
sourceDocuments,
args: call.args,
usedTools: [
{
tool: tool.name ?? '',
toolInput: call.args,
toolOutput: output
}
]
}
})
}) ?? []
)
const additional_kwargs: ICommonObject = { nodeId: this.nodeData.id }
outputs.forEach((result) => (result.additional_kwargs = { ...result.additional_kwargs, ...additional_kwargs }))
if (this.nodeData.inputs?.updateStateMemoryUI || this.nodeData.inputs?.updateStateMemoryCode) {
const returnedOutput = await getReturnOutput(this.nodeData, this.inputQuery, this.options, outputs, input)
return {
...returnedOutput,
messages: outputs
}
} else {
return Array.isArray(input) ? outputs : { messages: outputs }
}
}
}
const getReturnOutput = async (
nodeData: INodeData,
input: string,
options: ICommonObject,
outputs: ToolMessage[],
state: BaseMessage[] | MessagesState
) => {
const appDataSource = options.appDataSource as DataSource
const databaseEntities = options.databaseEntities as IDatabaseEntity
const tabIdentifier = nodeData.inputs?.[`${TAB_IDENTIFIER}_${nodeData.id}`] as string
const updateStateMemoryUI = nodeData.inputs?.updateStateMemoryUI as string
const updateStateMemoryCode = nodeData.inputs?.updateStateMemoryCode as string
const selectedTab = tabIdentifier ? tabIdentifier.split(`_${nodeData.id}`)[0] : 'updateStateMemoryUI'
const variables = await getVars(appDataSource, databaseEntities, nodeData)
const reformattedOutput = outputs.map((output) => {
return {
tool: output.name,
toolInput: output.additional_kwargs.args,
toolOutput: output.content,
sourceDocuments: output.additional_kwargs.sourceDocuments
} as IUsedTool
})
const flow = {
chatflowId: options.chatflowid,
sessionId: options.sessionId,
chatId: options.chatId,
input,
output: reformattedOutput,
state,
vars: prepareSandboxVars(variables)
}
if (selectedTab === 'updateStateMemoryUI' && updateStateMemoryUI) {
try {
const parsedSchema = typeof updateStateMemoryUI === 'string' ? JSON.parse(updateStateMemoryUI) : updateStateMemoryUI
const obj: ICommonObject = {}
for (const sch of parsedSchema) {
const key = sch.key
if (!key) throw new Error(`Key is required`)
let value = sch.value as string
if (value.startsWith('$flow')) {
value = customGet(flow, sch.value.replace('$flow.', ''))
} else if (value.startsWith('$vars')) {
value = customGet(flow, sch.value.replace('$', ''))
}
obj[key] = value
}
return obj
} catch (e) {
throw new Error(e)
}
} else if (selectedTab === 'updateStateMemoryCode' && updateStateMemoryCode) {
const vm = await getVM(appDataSource, databaseEntities, nodeData, flow)
try {
const response = await vm.run(`module.exports = async function() {${updateStateMemoryCode}}()`, __dirname)
if (typeof response !== 'object') throw new Error('Return output must be an object')
return response
} catch (e) {
throw new Error(e)
}
}
}
module.exports = { nodeClass: ToolNode_SeqAgents }

View File

@ -0,0 +1 @@
<svg xmlns="http://www.w3.org/2000/svg" width="24" height="24" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round" class="icon icon-tabler icons-tabler-outline icon-tabler-tool"><path stroke="none" d="M0 0h24v24H0z" fill="none"/><path d="M7 10h3v-3l-3.5 -3.5a6 6 0 0 1 8 8l6 6a2 2 0 0 1 -3 3l-6 -6a6 6 0 0 1 -8 -8l3.5 3.5" /></svg>

After

Width:  |  Height:  |  Size: 407 B

View File

@ -0,0 +1,345 @@
import { get } from 'lodash'
import { z } from 'zod'
import { DataSource } from 'typeorm'
import { NodeVM } from 'vm2'
import { StructuredTool } from '@langchain/core/tools'
import { ChatMistralAI } from '@langchain/mistralai'
import { ChatAnthropic } from '@langchain/anthropic'
import { Runnable, RunnableConfig, mergeConfigs } from '@langchain/core/runnables'
import { AIMessage, BaseMessage, HumanMessage, MessageContentImageUrl, ToolMessage } from '@langchain/core/messages'
import { BaseChatModel } from '@langchain/core/language_models/chat_models'
import { addImagesToMessages, llmSupportsVision } from '../../src/multiModalUtils'
import { ICommonObject, IDatabaseEntity, INodeData, ISeqAgentsState, IVisionChatModal } from '../../src/Interface'
import { availableDependencies, defaultAllowBuiltInDep, getVars, prepareSandboxVars } from '../../src/utils'
export const checkCondition = (input: string | number | undefined, condition: string, value: string | number = ''): boolean => {
if (!input) return false
// Function to check if a string is a valid number
const isNumericString = (str: string): boolean => /^-?\d*\.?\d+$/.test(str)
// Function to convert input to number if possible
const toNumber = (val: string | number): number => {
if (typeof val === 'number') return val
return isNumericString(val) ? parseFloat(val) : NaN
}
// Convert input and value to numbers
const numInput = toNumber(input)
const numValue = toNumber(value)
// Helper function for numeric comparisons
const numericCompare = (comp: (a: number, b: number) => boolean): boolean => {
if (isNaN(numInput) || isNaN(numValue)) return false
return comp(numInput, numValue)
}
// Helper function for string operations
const stringCompare = (strInput: string | number, strValue: string | number, op: (a: string, b: string) => boolean): boolean => {
return op(String(strInput), String(strValue))
}
switch (condition) {
// String conditions
case 'Contains':
return stringCompare(input, value, (a, b) => a.includes(b))
case 'Not Contains':
return stringCompare(input, value, (a, b) => !a.includes(b))
case 'Start With':
return stringCompare(input, value, (a, b) => a.startsWith(b))
case 'End With':
return stringCompare(input, value, (a, b) => a.endsWith(b))
case 'Is':
return String(input) === String(value)
case 'Is Not':
return String(input) !== String(value)
case 'Is Empty':
return String(input).trim().length === 0
case 'Is Not Empty':
return String(input).trim().length > 0
// Numeric conditions
case 'Greater Than':
return numericCompare((a, b) => a > b)
case 'Less Than':
return numericCompare((a, b) => a < b)
case 'Equal To':
return numericCompare((a, b) => a === b)
case 'Not Equal To':
return numericCompare((a, b) => a !== b)
case 'Greater Than or Equal To':
return numericCompare((a, b) => a >= b)
case 'Less Than or Equal To':
return numericCompare((a, b) => a <= b)
default:
return false
}
}
export const transformObjectPropertyToFunction = (obj: ICommonObject, state: ISeqAgentsState) => {
const transformedObject: ICommonObject = {}
for (const key in obj) {
let value = obj[key]
// get message from agent
try {
const parsedValue = JSON.parse(value)
if (typeof parsedValue === 'object' && parsedValue.id) {
const messageOutputs = ((state.messages as unknown as BaseMessage[]) ?? []).filter(
(message) => message.additional_kwargs && message.additional_kwargs?.nodeId === parsedValue.id
)
const messageOutput = messageOutputs[messageOutputs.length - 1]
if (messageOutput) value = messageOutput.content
}
} catch (e) {
// do nothing
}
// get state value
if (value.startsWith('$flow.state')) {
value = customGet(state, value.replace('$flow.state.', ''))
if (typeof value === 'object') value = JSON.stringify(value)
}
transformedObject[key] = () => value
}
return transformedObject
}
export const processImageMessage = async (llm: BaseChatModel, nodeData: INodeData, options: ICommonObject) => {
let multiModalMessageContent: MessageContentImageUrl[] = []
if (llmSupportsVision(llm)) {
const visionChatModel = llm as IVisionChatModal
multiModalMessageContent = await addImagesToMessages(nodeData, options, llm.multiModalOption)
if (multiModalMessageContent?.length) {
visionChatModel.setVisionModel()
} else {
visionChatModel.revertToOriginalModel()
}
}
return multiModalMessageContent
}
export const getVM = async (appDataSource: DataSource, databaseEntities: IDatabaseEntity, nodeData: INodeData, flow: ICommonObject) => {
const variables = await getVars(appDataSource, databaseEntities, nodeData)
let sandbox: any = {}
sandbox['$vars'] = prepareSandboxVars(variables)
sandbox['$flow'] = flow
const builtinDeps = process.env.TOOL_FUNCTION_BUILTIN_DEP
? defaultAllowBuiltInDep.concat(process.env.TOOL_FUNCTION_BUILTIN_DEP.split(','))
: defaultAllowBuiltInDep
const externalDeps = process.env.TOOL_FUNCTION_EXTERNAL_DEP ? process.env.TOOL_FUNCTION_EXTERNAL_DEP.split(',') : []
const deps = availableDependencies.concat(externalDeps)
const nodeVMOptions = {
console: 'inherit',
sandbox,
require: {
external: { modules: deps },
builtin: builtinDeps
}
} as any
return new NodeVM(nodeVMOptions)
}
export const customGet = (obj: any, path: string) => {
if (path.includes('[-1]')) {
const parts = path.split('.')
let result = obj
for (let part of parts) {
if (part.includes('[') && part.includes(']')) {
const [name, indexPart] = part.split('[')
const index = parseInt(indexPart.replace(']', ''))
result = result[name]
if (Array.isArray(result)) {
if (index < 0) {
result = result[result.length + index]
} else {
result = result[index]
}
} else {
return undefined
}
} else {
result = get(result, part)
}
if (result === undefined) {
return undefined
}
}
return result
} else {
return get(obj, path)
}
}
export const convertStructuredSchemaToZod = (schema: string | object): ICommonObject => {
try {
const parsedSchema = typeof schema === 'string' ? JSON.parse(schema) : schema
const zodObj: ICommonObject = {}
for (const sch of parsedSchema) {
if (sch.type === 'String') {
zodObj[sch.key] = z.string().describe(sch.description)
} else if (sch.type === 'String Array') {
zodObj[sch.key] = z.array(z.string()).describe(sch.description)
} else if (sch.type === 'Number') {
zodObj[sch.key] = z.number().describe(sch.description)
} else if (sch.type === 'Boolean') {
zodObj[sch.key] = z.boolean().describe(sch.description)
} else if (sch.type === 'Enum') {
zodObj[sch.key] = z.enum(sch.enumValues.split(',').map((item: string) => item.trim())).describe(sch.description)
}
}
return zodObj
} catch (e) {
throw new Error(e)
}
}
export const restructureMessages = (llm: BaseChatModel, state: ISeqAgentsState) => {
const messages: BaseMessage[] = []
for (const message of state.messages as unknown as BaseMessage[]) {
// Sometimes Anthropic can return a message with content types of array, ignore that EXECEPT when tool calls are present
if ((message as any).tool_calls?.length) {
message.content = JSON.stringify(message.content)
}
if (typeof message.content === 'string') {
messages.push(message)
}
}
const isToolMessage = (message: BaseMessage) => message instanceof ToolMessage || message.constructor.name === 'ToolMessageChunk'
const isAIMessage = (message: BaseMessage) => message instanceof AIMessage || message.constructor.name === 'AIMessageChunk'
const isHumanMessage = (message: BaseMessage) => message instanceof HumanMessage || message.constructor.name === 'HumanMessageChunk'
/*
* MistralAI does not support:
* 1.) Last message as AI Message or Tool Message
* 2.) Tool Message followed by Human Message
*/
if (llm instanceof ChatMistralAI) {
if (messages.length > 1) {
for (let i = 0; i < messages.length; i++) {
const message = messages[i]
// If last message is denied Tool Message, add a new Human Message
if (isToolMessage(message) && i === messages.length - 1 && message.additional_kwargs?.toolCallsDenied) {
messages.push(new AIMessage({ content: `Tool calls got denied. Do you have other questions?` }))
} else if (i + 1 < messages.length) {
const nextMessage = messages[i + 1]
const currentMessage = message
// If current message is Tool Message and next message is Human Message, add AI Message between Tool and Human Message
if (isToolMessage(currentMessage) && isHumanMessage(nextMessage)) {
messages.splice(i + 1, 0, new AIMessage({ content: 'Tool calls executed' }))
}
// If last message is AI Message or Tool Message, add Human Message
if (i + 1 === messages.length - 1 && (isAIMessage(nextMessage) || isToolMessage(nextMessage))) {
messages.push(new HumanMessage({ content: nextMessage.content || 'Given the user question, answer user query' }))
}
}
}
}
} else if (llm instanceof ChatAnthropic) {
/*
* Anthropic does not support first message as AI Message
*/
if (messages.length) {
const firstMessage = messages[0]
if (isAIMessage(firstMessage)) {
messages.shift()
messages.unshift(new HumanMessage({ ...firstMessage }))
}
}
}
return messages
}
export class ExtractTool extends StructuredTool {
name = 'extract'
description = 'Extract structured data from the output'
schema
constructor(fields: ICommonObject) {
super()
this.schema = fields.schema
}
async _call(input: any) {
return JSON.stringify(input)
}
}
export interface RunnableCallableArgs extends Partial<any> {
name?: string
func: (...args: any[]) => any
tags?: string[]
trace?: boolean
recurse?: boolean
}
export interface MessagesState {
messages: BaseMessage[]
}
export class RunnableCallable<I = unknown, O = unknown> extends Runnable<I, O> {
lc_namespace: string[] = ['langgraph']
func: (...args: any[]) => any
tags?: string[]
config?: RunnableConfig
trace: boolean = true
recurse: boolean = true
constructor(fields: RunnableCallableArgs) {
super()
this.name = fields.name ?? fields.func.name
this.func = fields.func
this.config = fields.tags ? { tags: fields.tags } : undefined
this.trace = fields.trace ?? this.trace
this.recurse = fields.recurse ?? this.recurse
if (fields.metadata) {
this.config = { ...this.config, metadata: { ...this.config, ...fields.metadata } }
}
}
async invoke(input: any, options?: Partial<RunnableConfig> | undefined): Promise<any> {
if (this.func === undefined) {
return this.invoke(input, options)
}
let returnValue: any
if (this.trace) {
returnValue = await this._callWithConfig(this.func, input, mergeConfigs(this.config, options))
} else {
returnValue = await this.func(input, mergeConfigs(this.config, options))
}
if (returnValue instanceof Runnable && this.recurse) {
return await returnValue.invoke(input, options)
}
return returnValue
}
}

View File

@ -1,6 +1,6 @@
import { INode } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { Calculator } from 'langchain/tools/calculator'
import { Calculator } from '@langchain/community/tools/calculator'
class Calculator_Tools implements INode {
label: string

View File

@ -4,6 +4,7 @@ import { RunnableConfig } from '@langchain/core/runnables'
import { StructuredTool, ToolParams } from '@langchain/core/tools'
import { CallbackManagerForToolRun, Callbacks, CallbackManager, parseCallbackConfigArg } from '@langchain/core/callbacks/manager'
import { availableDependencies, defaultAllowBuiltInDep, prepareSandboxVars } from '../../../src/utils'
import { ICommonObject } from '../../../src/Interface'
class ToolInputParsingException extends Error {
output?: string
@ -59,7 +60,7 @@ export class DynamicStructuredTool<
arg: z.output<T>,
configArg?: RunnableConfig | Callbacks,
tags?: string[],
flowConfig?: { sessionId?: string; chatId?: string; input?: string }
flowConfig?: { sessionId?: string; chatId?: string; input?: string; state?: ICommonObject }
): Promise<string> {
const config = parseCallbackConfigArg(configArg)
if (config.runName === undefined) {
@ -107,7 +108,7 @@ export class DynamicStructuredTool<
protected async _call(
arg: z.output<T>,
_?: CallbackManagerForToolRun,
flowConfig?: { sessionId?: string; chatId?: string; input?: string }
flowConfig?: { sessionId?: string; chatId?: string; input?: string; state?: ICommonObject }
): Promise<string> {
let sandbox: any = {}
if (typeof arg === 'object' && Object.keys(arg).length) {

View File

@ -1,19 +1,19 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" id="svg8" version="1.1" viewBox="0 0 92 92" height="92mm" width="92mm">
<defs id="defs2"/>
<metadata id="metadata5">
<rdf:RDF>
<cc:Work rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
<dc:title/>
</cc:Work>
</rdf:RDF>
</metadata>
<g transform="translate(-40.921303,-17.416526)" id="layer1">
<circle r="0" style="fill:none;stroke:#000000;stroke-width:12;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" cy="92" cx="75" id="path3713"/>
<circle r="30" cy="53.902557" cx="75.921303" id="path834" style="fill:none;fill-opacity:1;stroke:#3050ff;stroke-width:10;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
<path d="m 67.514849,37.91524 a 18,18 0 0 1 21.051475,3.312407 18,18 0 0 1 3.137312,21.078282" id="path852" style="fill:none;fill-opacity:1;stroke:#3050ff;stroke-width:5;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
<rect transform="rotate(-46.234709)" ry="1.8669105e-13" y="122.08995" x="3.7063529" height="39.963303" width="18.846331" id="rect912" style="opacity:1;fill:#3050ff;fill-opacity:1;stroke:none;stroke-width:8;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
</g>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:cc="http://creativecommons.org/ns#" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" id="svg8" version="1.1" viewBox="0 0 92 92" height="92mm" width="92mm">
<defs id="defs2"/>
<metadata id="metadata5">
<rdf:RDF>
<cc:Work rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type rdf:resource="http://purl.org/dc/dcmitype/StillImage"/>
<dc:title/>
</cc:Work>
</rdf:RDF>
</metadata>
<g transform="translate(-40.921303,-17.416526)" id="layer1">
<circle r="0" style="fill:none;stroke:#000000;stroke-width:12;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" cy="92" cx="75" id="path3713"/>
<circle r="30" cy="53.902557" cx="75.921303" id="path834" style="fill:none;fill-opacity:1;stroke:#3050ff;stroke-width:10;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
<path d="m 67.514849,37.91524 a 18,18 0 0 1 21.051475,3.312407 18,18 0 0 1 3.137312,21.078282" id="path852" style="fill:none;fill-opacity:1;stroke:#3050ff;stroke-width:5;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
<rect transform="rotate(-46.234709)" ry="1.8669105e-13" y="122.08995" x="3.7063529" height="39.963303" width="18.846331" id="rect912" style="opacity:1;fill:#3050ff;fill-opacity:1;stroke:none;stroke-width:8;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"/>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View File

@ -66,12 +66,14 @@ class IfElseFunction_Utilities implements INode {
{
label: 'True',
name: 'returnTrue',
baseClasses: ['string', 'number', 'boolean', 'json', 'array']
baseClasses: ['string', 'number', 'boolean', 'json', 'array'],
isAnchor: true
},
{
label: 'False',
name: 'returnFalse',
baseClasses: ['string', 'number', 'boolean', 'json', 'array']
baseClasses: ['string', 'number', 'boolean', 'json', 'array'],
isAnchor: true
}
]
}

View File

@ -6,6 +6,7 @@ import { Document } from '@langchain/core/documents'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams, IndexingResult } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam, getVersion } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
import { VectorStore } from '@langchain/core/vectorstores'
class MongoDBAtlas_VectorStores implements INode {
label: string
@ -178,7 +179,7 @@ class MongoDBAtlas_VectorStores implements INode {
indexName,
textKey,
embeddingKey
})
}) as unknown as VectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore)
} catch (e) {

View File

@ -3,6 +3,7 @@ import { Pinecone, PineconeConfiguration } from '@pinecone-database/pinecone'
import { PineconeStoreParams, PineconeStore } from '@langchain/pinecone'
import { Embeddings } from '@langchain/core/embeddings'
import { Document } from '@langchain/core/documents'
import { VectorStore } from '@langchain/core/vectorstores'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams, IndexingResult } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
@ -162,7 +163,7 @@ class Pinecone_VectorStores implements INode {
try {
if (recordManager) {
const vectorStore = await PineconeStore.fromExistingIndex(embeddings, obj)
const vectorStore = (await PineconeStore.fromExistingIndex(embeddings, obj)) as unknown as VectorStore
await recordManager.createSchema()
const res = await index({
docsSource: finalDocs,
@ -211,7 +212,7 @@ class Pinecone_VectorStores implements INode {
obj.filter = metadatafilter
}
const vectorStore = await PineconeStore.fromExistingIndex(embeddings, obj)
const vectorStore = (await PineconeStore.fromExistingIndex(embeddings, obj)) as unknown as VectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore, obj.filter)
}

View File

@ -3,7 +3,7 @@ import { v4 as uuid } from 'uuid'
import { QdrantClient } from '@qdrant/js-client-rest'
import { VectorStoreRetrieverInput } from '@langchain/core/vectorstores'
import { Document } from '@langchain/core/documents'
import { QdrantVectorStore, QdrantLibArgs } from '@langchain/community/vectorstores/qdrant'
import { QdrantVectorStore, QdrantLibArgs } from '@langchain/qdrant'
import { Embeddings } from '@langchain/core/embeddings'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams, IndexingResult } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'

View File

@ -7,6 +7,7 @@ import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams, Indexi
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
import { index } from '../../../src/indexing'
import { VectorStore } from '@langchain/core/vectorstores'
class Weaviate_VectorStores implements INode {
label: string
@ -179,7 +180,7 @@ class Weaviate_VectorStores implements INode {
try {
if (recordManager) {
const vectorStore = await WeaviateStore.fromExistingIndex(embeddings, obj)
const vectorStore = (await WeaviateStore.fromExistingIndex(embeddings, obj)) as unknown as VectorStore
await recordManager.createSchema()
const res = await index({
docsSource: finalDocs,
@ -234,7 +235,7 @@ class Weaviate_VectorStores implements INode {
weaviateFilter = typeof weaviateFilter === 'object' ? weaviateFilter : JSON.parse(weaviateFilter)
}
const vectorStore = await WeaviateStore.fromExistingIndex(embeddings, obj)
const vectorStore = (await WeaviateStore.fromExistingIndex(embeddings, obj)) as unknown as VectorStore
return resolveVectorStoreOrRetriever(nodeData, vectorStore, weaviateFilter)
}

View File

@ -1,12 +1,12 @@
import { flatten } from 'lodash'
import { ZepClient } from '@getzep/zep-cloud'
import { IZepConfig, ZepVectorStore } from '@getzep/zep-cloud/langchain'
import { Embeddings } from 'langchain/embeddings/base'
import { Document } from 'langchain/document'
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams, IndexingResult } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { addMMRInputParams, resolveVectorStoreOrRetriever } from '../VectorStoreUtils'
import { FakeEmbeddings } from 'langchain/embeddings/fake'
import { Embeddings } from '@langchain/core/embeddings'
class Zep_CloudVectorStores implements INode {
label: string

View File

@ -31,29 +31,30 @@
"@getzep/zep-js": "^0.9.0",
"@gomomento/sdk": "^1.51.1",
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1",
"@google/generative-ai": "^0.7.0",
"@google-ai/generativelanguage": "^2.5.0",
"@google/generative-ai": "^0.15.0",
"@huggingface/inference": "^2.6.1",
"@langchain/anthropic": "^0.1.14",
"@langchain/anthropic": "^0.2.1",
"@langchain/cohere": "^0.0.7",
"@langchain/community": "^0.0.43",
"@langchain/core": "^0.1.63",
"@langchain/community": "^0.2.17",
"@langchain/core": "^0.2.14",
"@langchain/exa": "^0.0.5",
"@langchain/google-genai": "^0.0.10",
"@langchain/google-vertexai": "^0.0.5",
"@langchain/google-genai": "^0.0.22",
"@langchain/google-vertexai": "^0.0.19",
"@langchain/groq": "^0.0.8",
"@langchain/langgraph": "^0.0.12",
"@langchain/mistralai": "^0.0.19",
"@langchain/langgraph": "^0.0.22",
"@langchain/mistralai": "^0.0.26",
"@langchain/mongodb": "^0.0.1",
"@langchain/openai": "^0.0.30",
"@langchain/pinecone": "^0.0.3",
"@langchain/qdrant": "^0.0.5",
"@langchain/weaviate": "^0.0.1",
"@mendable/firecrawl-js": "^0.0.28",
"@mistralai/mistralai": "0.1.3",
"@notionhq/client": "^2.2.8",
"@opensearch-project/opensearch": "^1.2.0",
"@pinecone-database/pinecone": "2.2.2",
"@qdrant/js-client-rest": "^1.2.2",
"@qdrant/js-client-rest": "^1.9.0",
"@supabase/supabase-js": "^2.29.0",
"@types/js-yaml": "^4.0.5",
"@types/jsdom": "^21.1.1",
@ -65,7 +66,7 @@
"axios": "1.6.2",
"cheerio": "^1.0.0-rc.12",
"chromadb": "^1.5.11",
"cohere-ai": "^6.2.0",
"cohere-ai": "^7.7.5",
"crypto-js": "^4.1.1",
"css-what": "^6.1.0",
"d3-dsv": "2",
@ -81,7 +82,7 @@
"ioredis": "^5.3.2",
"jsdom": "^22.1.0",
"jsonpointer": "^5.0.1",
"langchain": "^0.1.37",
"langchain": "^0.2.8",
"langfuse": "3.3.4",
"langfuse-langchain": "^3.3.4",
"langsmith": "0.1.6",
@ -106,7 +107,7 @@
"puppeteer": "^20.7.1",
"pyodide": ">=0.21.0-alpha.2",
"redis": "^4.6.7",
"replicate": "^0.18.0",
"replicate": "^0.31.1",
"socket.io": "^4.6.1",
"srt-parser-2": "^1.2.3",
"typeorm": "^0.3.6",
@ -127,13 +128,10 @@
"@types/pg": "^8.10.2",
"@types/ws": "^8.5.3",
"babel-register": "^6.26.0",
"eslint-plugin-markdown": "^3.0.1",
"eslint-plugin-react": "^7.33.2",
"eslint-plugin-react-hooks": "^4.6.0",
"gulp": "^4.0.2",
"rimraf": "^5.0.5",
"tsc-watch": "^6.0.4",
"tslib": "^2.6.2",
"typescript": "^4.8.4"
"typescript": "^5.4.5"
}
}

View File

@ -20,6 +20,7 @@ export type NodeParamsType =
| 'date'
| 'file'
| 'folder'
| 'tabs'
export type CommonType = string | number | boolean | undefined | null
@ -63,6 +64,8 @@ export interface INodeOutputsValue {
name: string
baseClasses: string[]
description?: string
hidden?: boolean
isAnchor?: boolean
}
export interface INodeParams {
@ -85,7 +88,11 @@ export interface INodeParams {
additionalParams?: boolean
loadMethod?: string
hidden?: boolean
variables?: ICommonObject[]
hideCodeExecute?: boolean
codeExample?: string
hint?: Record<string, string>
tabIdentifier?: string
tabs?: Array<INodeParams>
}
export interface INodeExecutionData {
@ -109,6 +116,7 @@ export interface INodeProperties {
filePath?: string
badge?: string
deprecateMessage?: string
hideOutput?: boolean
}
export interface INode extends INodeProperties {
@ -151,6 +159,7 @@ export interface IUsedTool {
tool: string
toolInput: object
toolOutput: string | object
sourceDocuments?: ICommonObject[]
}
export interface IMultiAgentNode {
@ -166,6 +175,27 @@ export interface IMultiAgentNode {
recursionLimit?: number
moderations?: Moderation[]
multiModalMessageContent?: MessageContentImageUrl[]
checkpointMemory?: any
}
type SeqAgentType = 'agent' | 'condition' | 'end' | 'start' | 'tool' | 'state' | 'llm'
export interface ISeqAgentNode {
id: string
node: any
name: string
label: string
type: SeqAgentType
output: string
llm?: any
startLLM?: any
predecessorAgents?: ISeqAgentNode[]
recursionLimit?: number
moderations?: Moderation[]
multiModalMessageContent?: MessageContentImageUrl[]
checkpointMemory?: any
agentInterruptToolNode?: any
agentInterruptToolFunc?: any
}
export interface ITeamState {
@ -176,13 +206,31 @@ export interface ITeamState {
team_members: string[]
next: string
instructions: string
summarization: string
}
export interface ISeqAgentsState {
messages: {
value: (x: BaseMessage[], y: BaseMessage[]) => BaseMessage[]
default: () => BaseMessage[]
}
}
export interface IAgentReasoning {
agentName: string
messages: string[]
next: string
instructions: string
next?: string
instructions?: string
usedTools?: IUsedTool[]
sourceDocuments?: ICommonObject[]
state?: ICommonObject
nodeName?: string
}
export interface IAction {
id?: string
elements?: Array<{ type: string; label: string }>
mapping?: { approve: string; reject: string; toolCalls: any[] }
}
export interface IFileUpload {

View File

@ -11,6 +11,7 @@ import { Runnable, RunnableSequence, RunnablePassthrough } from '@langchain/core
import { Serializable } from '@langchain/core/load/serializable'
import { renderTemplate } from '@langchain/core/prompts'
import { ChatGeneration } from '@langchain/core/outputs'
import { Document } from '@langchain/core/documents'
import { BaseChain, SerializedLLMChain } from 'langchain/chains'
import {
CreateReactAgentParams,
@ -421,7 +422,8 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
{
sessionId: this.sessionId,
chatId: this.chatId,
input: this.input
input: this.input,
state: inputs
}
)
usedTools.push({
@ -556,7 +558,8 @@ export class AgentExecutor extends BaseChain<ChainValues, AgentExecutorOutput> {
{
sessionId: this.sessionId,
chatId: this.chatId,
input: this.input
input: this.input,
state: inputs
}
)
if (typeof observation === 'string' && observation.includes(SOURCE_DOCUMENTS_PREFIX)) {

View File

@ -208,20 +208,22 @@ export const getNodeModulesPackagePath = (packageName: string): string => {
*/
export const getInputVariables = (paramValue: string): string[] => {
if (typeof paramValue !== 'string') return []
let returnVal = paramValue
const returnVal = paramValue
const variableStack = []
const inputVariables = []
let startIdx = 0
const endIdx = returnVal.length
while (startIdx < endIdx) {
const substr = returnVal.substring(startIdx, startIdx + 1)
// Check for escaped curly brackets
if (substr === '\\' && (returnVal[startIdx + 1] === '{' || returnVal[startIdx + 1] === '}')) {
startIdx += 2 // Skip the escaped bracket
continue
}
// Store the opening double curly bracket
if (substr === '{') {
variableStack.push({ substr, startIdx: startIdx + 1 })
}
// Found the complete variable
if (substr === '}' && variableStack.length > 0 && variableStack[variableStack.length - 1].substr === '{') {
const variableStartIdx = variableStack[variableStack.length - 1].startIdx
@ -729,7 +731,7 @@ export const getVars = async (appDataSource: DataSource, databaseEntities: IData
const variables = ((await appDataSource.getRepository(databaseEntities['Variable']).find()) as IVariable[]) ?? []
// override variables defined in overrideConfig
// nodeData.inputs.variables is an Object, check each property and override the variable
// nodeData.inputs.vars is an Object, check each property and override the variable
if (nodeData?.inputs?.vars) {
for (const propertyName of Object.getOwnPropertyNames(nodeData.inputs.vars)) {
const foundVar = variables.find((v) => v.name === propertyName)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,7 @@
{
"description": "Prompt engineering team working together to craft Worker Prompts for your AgentFlow.",
"framework": ["Langchain"],
"usecases": ["Engineering"],
"nodes": [
{
"id": "supervisor_0",

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,5 @@
{
"name": "add_contact_hubspot",
"framework": ["Langchain"],
"description": "Add new contact to Hubspot",
"color": "linear-gradient(rgb(85,198,123), rgb(0,230,99))",
"iconSrc": "https://cdn.worldvectorlogo.com/logos/hubspot-1.svg",

View File

@ -1,6 +1,5 @@
{
"name": "add_airtable",
"framework": ["Langchain"],
"description": "Add column1, column2 to Airtable",
"color": "linear-gradient(rgb(125,71,222), rgb(128,102,23))",
"iconSrc": "https://raw.githubusercontent.com/gilbarbara/logos/main/logos/airtable.svg",

Some files were not shown because too many files have changed in this diff Show More