From df50c0499d6572aaa6c4c321260330ba98f5315c Mon Sep 17 00:00:00 2001 From: Henry Date: Wed, 12 Apr 2023 17:54:20 +0100 Subject: [PATCH] Add BabyAGI node --- .../nodes/agents/BabyAGI/BabyAGI.ts | 376 ++++++++++++++++++ .../nodes/agents/BabyAGI/babyagi.svg | 9 + .../nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts | 12 + packages/components/package.json | 2 + packages/server/marketplaces/BabyAGI.json | 104 +++++ packages/server/src/utils/index.ts | 4 +- .../ui/src/views/chatmessage/ChatMessage.css | 10 +- .../ui/src/views/chatmessage/ChatMessage.js | 6 +- 8 files changed, 511 insertions(+), 12 deletions(-) create mode 100644 packages/components/nodes/agents/BabyAGI/BabyAGI.ts create mode 100644 packages/components/nodes/agents/BabyAGI/babyagi.svg create mode 100644 packages/server/marketplaces/BabyAGI.json diff --git a/packages/components/nodes/agents/BabyAGI/BabyAGI.ts b/packages/components/nodes/agents/BabyAGI/BabyAGI.ts new file mode 100644 index 000000000..8b2972156 --- /dev/null +++ b/packages/components/nodes/agents/BabyAGI/BabyAGI.ts @@ -0,0 +1,376 @@ +import { INode, INodeData, INodeParams } from '../../../src/Interface' +import { Configuration, CreateChatCompletionRequest, CreateCompletionRequest, OpenAIApi } from 'openai' +import { PineconeClient } from '@pinecone-database/pinecone' +import { CreateIndexRequest } from '@pinecone-database/pinecone/dist/pinecone-generated-ts-fetch' +import { VectorOperationsApi } from '@pinecone-database/pinecone/dist/pinecone-generated-ts-fetch' +import { v4 as uuidv4 } from 'uuid' + +interface Task { + id: string + name: string + priority: number // 1 is highest priority +} + +class BabyAGI_Agents implements INode { + label: string + name: string + description: string + type: string + icon: string + category: string + baseClasses: string[] + inputs: INodeParams[] + + constructor() { + this.label = 'BabyAGI' + this.name = 'babyAGI' + this.type = 'BabyAGI' + this.category = 'Agents' + this.icon = 'babyagi.svg' + this.description = 'Task Driven Autonomous Agent which creates new task and reprioritizes task list based on objective' + this.inputs = [ + { + label: 'Task Loop', + name: 'taskLoop', + type: 'number', + default: 3 + }, + { + label: 'OpenAI Api Key', + name: 'openAIApiKey', + type: 'password' + }, + { + label: 'Pinecone Api Key', + name: 'pineconeApiKey', + type: 'password' + }, + { + label: 'Pinecone Environment', + name: 'pineconeEnv', + type: 'string' + }, + { + label: 'Pinecone Index', + name: 'pineconeIndex', + type: 'string' + }, + { + label: 'Model Name', + name: 'modelName', + type: 'options', + options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-0314', + name: 'gpt-4-0314' + }, + { + label: 'gpt-4-32k-0314', + name: 'gpt-4-32k-0314' + }, + { + label: 'gpt-3.5-turbo', + name: 'gpt-3.5-turbo' + }, + { + label: 'gpt-3.5-turbo-0301', + name: 'gpt-3.5-turbo-0301' + } + ], + default: 'gpt-3.5-turbo', + optional: true + } + ] + } + + async getBaseClasses(): Promise { + return ['BabyAGI'] + } + + async init(): Promise { + return null + } + + async run(nodeData: INodeData, input: string): Promise { + const openAIApiKey = nodeData.inputs?.openAIApiKey as string + const pineconeApiKey = nodeData.inputs?.pineconeApiKey as string + const pineconeEnv = nodeData.inputs?.pineconeEnv as string + const index = nodeData.inputs?.pineconeIndex as string + const modelName = nodeData.inputs?.modelName as string + const taskLoop = nodeData.inputs?.taskLoop as string + const objective = input + + const configuration = new Configuration({ + apiKey: openAIApiKey + }) + const openai = new OpenAIApi(configuration) + + const pinecone = new PineconeClient() + await pinecone.init({ + apiKey: pineconeApiKey, + environment: pineconeEnv + }) + + const dimension = 1536 + const metric = 'cosine' + const podType = 'p1' + + const indexList = await pinecone.listIndexes() + if (!indexList.includes(index)) { + const createIndexOptions: CreateIndexRequest = { + createRequest: { + name: index, + dimension, + metric, + podType + } + } + await pinecone.createIndex(createIndexOptions) + } + + let vectorIndex: VectorOperationsApi = pinecone.Index(index) + + let taskList: Task[] = [] + let embeddingList = new Map() + + taskList = [ + { + id: uuidv4(), + name: 'Develop a task list', + priority: 1 + } + ] + + return await mainLoop(openai, pinecone, index, embeddingList, vectorIndex, taskList, objective, modelName, taskLoop) + } +} + +export const getADAEmbedding = async (openai: OpenAIApi, text: string, embeddingList: Map): Promise => { + //console.log('\nGetting ADA embedding for: ', text) + + if (embeddingList.has(text)) { + //console.log('Embedding already exists for: ', text) + const numbers = embeddingList.get(text) + return numbers ?? [] + } + + const embedding = ( + await openai.createEmbedding({ + input: [text], + model: 'text-embedding-ada-002' + }) + ).data?.data[0].embedding + + embeddingList.set(text, embedding) + + return embedding +} + +export const openAICall = async (openai: OpenAIApi, prompt: string, gptVersion: string, temperature = 0.5, max_tokens = 100) => { + if (gptVersion === 'gpt-3.5-turbo' || gptVersion === 'gpt-4' || gptVersion === 'gpt-4-32k') { + // Chat completion + const options: CreateChatCompletionRequest = { + model: gptVersion, + messages: [{ role: 'user', content: prompt }], + temperature, + max_tokens, + n: 1 + } + const data = (await openai.createChatCompletion(options)).data + + return data?.choices[0]?.message?.content.trim() ?? '' + } else { + // Prompt completion + const options: CreateCompletionRequest = { + model: gptVersion, + prompt, + temperature, + max_tokens, + top_p: 1, + frequency_penalty: 0, + presence_penalty: 0 + } + const data = (await openai.createCompletion(options)).data + + return data?.choices[0]?.text?.trim() ?? '' + } +} + +export const taskCreationAgent = async ( + openai: OpenAIApi, + taskList: Task[], + objective: string, + result: string, + taskDescription: string, + gptVersion = 'gpt-3.5-turbo' +): Promise => { + const prompt = `You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: ${objective}, The last completed task has the result: ${result}. This result was based on this task description: ${taskDescription}. These are incomplete tasks: ${taskList.join( + ', ' + )}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array.` + const response = await openAICall(openai, prompt, gptVersion) + const newTaskNames = response.split('\n') + + return newTaskNames.map((name) => ({ + id: uuidv4(), + name, + priority: taskList.length + 1 + })) +} + +export const prioritizationAgent = async ( + openai: OpenAIApi, + taskList: Task[], + taskPriority: number, + objective: string, + gptVersion = 'gpt-3.5-turbo' +): Promise => { + const taskNames = taskList.map((t) => t.name) + const startPriority = taskPriority + 1 + + const prompt = `You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: ${taskNames}. Consider the ultimate objective of your team: ${objective}. Do not remove any tasks. Return the result as a list, like: + #. First task + #. Second task + Start the task list with number ${startPriority}.` + const response = await openAICall(openai, prompt, gptVersion) + const newTasks = response.split('\n') + + // Parse and add new tasks + return ( + newTasks + .map((taskString) => { + const taskParts = taskString.trim().split('.', 2) + + if (taskParts.length === 2) { + const id = uuidv4() + const name = taskParts[1].trim() + const priority = parseInt(taskParts[0]) + return { + id, + name, + priority + } as Task + } + }) + // Remove lines that don't have a task + .filter((t) => t !== undefined) + // Sort by priority + .sort((a, b) => a!.priority - b!.priority) as Task[] + ) +} + +export const contextAgent = async ( + openai: OpenAIApi, + pinecone: PineconeClient, + indexName: string, + embeddingList: Map, + objective: string, + topK: number +) => { + const index = pinecone.Index(indexName) + const queryEmbedding = await getADAEmbedding(openai, objective, embeddingList) + + const results = await index.query({ + queryRequest: { + vector: queryEmbedding, + includeMetadata: true, + topK + } + }) + const sortedResults = results.matches?.sort((a, b) => (b?.score ?? 0) - (a?.score ?? 0)) ?? [] + + return sortedResults.map((item) => (item.metadata as any)?.task ?? '') +} + +export const executionAgent = async ( + openai: OpenAIApi, + pinecone: PineconeClient, + indexName: string, + embeddingList: Map, + objective: string, + task: Task, + gptVersion = 'gpt-3.5-turbo' +) => { + const context = await contextAgent(openai, pinecone, indexName, embeddingList, objective, 5) + const prompt = `You are an AI who performs one task based on the following objective: ${objective}.\nTake into account these previously completed tasks: ${context}\nYour task: ${task.name}\nResponse:` + + //console.log('\nexecution prompt: ', prompt, '\n') + + return openAICall(openai, prompt, gptVersion, 0.7, 2000) +} + +export const mainLoop = async ( + openai: OpenAIApi, + pinecone: PineconeClient, + indexName: string, + embeddingList: Map, + index: VectorOperationsApi, + taskList: Task[], + objective: string, + modelName: string, + taskLoop: string +): Promise => { + const RUN_LIMIT = parseInt(taskLoop, 10) || 3 + let finalResult = '' + + for (let run = 0; run < RUN_LIMIT; run++) { + let enrichedResult: any + let task: Task | undefined + + if (taskList.length > 0) { + // Step 1: Pull the task + task = taskList.shift() + + if (!task) { + //console.log('No tasks left to complete. Exiting.') + break + } + + console.log(`\x1b[95m\x1b[1m\n*****TASK LIST*****\n\x1b[0m\x1b[0m + ${taskList.map((t) => ` ${t?.priority}. ${t?.name}`).join('\n')} + \x1b[92m\x1b[1m\n*****NEXT TASK*****\n\x1b[0m\x1b[0m + ${task.name}`) + + // Step 2: Execute the task + const result = await executionAgent(openai, pinecone, indexName, embeddingList, objective, task) + console.log('\x1b[93m\x1b[1m\n*****TASK RESULT*****\n\x1b[0m\x1b[0m') + console.log(result) + finalResult = result + + // Step 3: Enrich result and store in Pinecone + enrichedResult = { data: result } + const vector = enrichedResult.data // extract the actual result from the dictionary + const embeddingResult = await getADAEmbedding(openai, vector, embeddingList) + await index.upsert({ + upsertRequest: { + vectors: [ + { + id: task.id, + values: embeddingResult, + metadata: { task: task.name, result } + } + ] + } + }) + } + + // Step 4: Create new tasks and reprioritize task list + if (enrichedResult) { + const newTasks = await taskCreationAgent(openai, taskList, objective, enrichedResult.data, task!.name) + //console.log('newTasks', newTasks) + taskList = [...taskList, ...newTasks] + + taskList = await prioritizationAgent(openai, taskList, task!.priority, objective, modelName) + //console.log(`Reprioritized task list: ${taskList.map((t) => `[${t?.priority}] ${t?.id}: ${t?.name}`).join(', ')}`) + } else { + break + } + } + + return finalResult +} + +module.exports = { nodeClass: BabyAGI_Agents } diff --git a/packages/components/nodes/agents/BabyAGI/babyagi.svg b/packages/components/nodes/agents/BabyAGI/babyagi.svg new file mode 100644 index 000000000..c87861e5c --- /dev/null +++ b/packages/components/nodes/agents/BabyAGI/babyagi.svg @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts index 9cb12592f..9701ce8b7 100644 --- a/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts +++ b/packages/components/nodes/chatmodels/ChatOpenAI/ChatOpenAI.ts @@ -29,6 +29,18 @@ class ChatOpenAI_ChatModels implements INode { name: 'modelName', type: 'options', options: [ + { + label: 'gpt-4', + name: 'gpt-4' + }, + { + label: 'gpt-4-0314', + name: 'gpt-4-0314' + }, + { + label: 'gpt-4-32k-0314', + name: 'gpt-4-32k-0314' + }, { label: 'gpt-3.5-turbo', name: 'gpt-3.5-turbo' diff --git a/packages/components/package.json b/packages/components/package.json index 0a211553d..9e8f8e675 100644 --- a/packages/components/package.json +++ b/packages/components/package.json @@ -28,10 +28,12 @@ "moment": "^2.29.3", "node-fetch": "2", "pdfjs-dist": "^3.5.141", + "uuid": "^9.0.0", "ws": "^8.9.0" }, "devDependencies": { "@types/gulp": "4.0.9", + "@types/uuid": "^9.0.1", "@types/ws": "^8.5.3", "gulp": "^4.0.2", "typescript": "^4.8.4" diff --git a/packages/server/marketplaces/BabyAGI.json b/packages/server/marketplaces/BabyAGI.json new file mode 100644 index 000000000..8091ef03f --- /dev/null +++ b/packages/server/marketplaces/BabyAGI.json @@ -0,0 +1,104 @@ +{ + "description": "Given an objective, tasks will be created, stored into Pinecone and reprioritized", + "nodes": [ + { + "width": 300, + "height": 769, + "id": "babyAGI_0", + "position": { + "x": 542.130412774738, + "y": 154.52145148106695 + }, + "type": "customNode", + "data": { + "id": "babyAGI_0", + "label": "BabyAGI", + "name": "babyAGI", + "type": "BabyAGI", + "baseClasses": ["AgentExecutor"], + "category": "Agents", + "description": "Conversational agent for a chat model. It will utilize chat specific prompts", + "inputParams": [ + { + "label": "Task Loop", + "name": "taskLoop", + "type": "number", + "default": 3 + }, + { + "label": "OpenAI Api Key", + "name": "openAIApiKey", + "type": "password" + }, + { + "label": "Pinecone Api Key", + "name": "pineconeApiKey", + "type": "password" + }, + { + "label": "Pinecone Environment", + "name": "pineconeEnv", + "type": "string" + }, + { + "label": "Pinecone Index", + "name": "pineconeIndex", + "type": "string" + }, + { + "label": "Model Name", + "name": "modelName", + "type": "options", + "options": [ + { + "label": "gpt-4", + "name": "gpt-4" + }, + { + "label": "gpt-4-0314", + "name": "gpt-4-0314" + }, + { + "label": "gpt-4-32k-0314", + "name": "gpt-4-32k-0314" + }, + { + "label": "gpt-3.5-turbo", + "name": "gpt-3.5-turbo" + }, + { + "label": "gpt-3.5-turbo-0301", + "name": "gpt-3.5-turbo-0301" + } + ], + "default": "gpt-3.5-turbo", + "optional": true + } + ], + "inputAnchors": [], + "inputs": { + "taskLoop": "3", + "pineconeEnv": "us-west4-gcp", + "pineconeIndex": "test", + "modelName": "gpt-3.5-turbo" + }, + "outputAnchors": [ + { + "id": "babyAGI_0-output-babyAGI-AgentExecutor", + "name": "babyAGI", + "label": "BabyAGI", + "type": "AgentExecutor" + } + ], + "selected": false + }, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": 542.130412774738, + "y": 154.52145148106695 + } + } + ], + "edges": [] +} diff --git a/packages/server/src/utils/index.ts b/packages/server/src/utils/index.ts index bc50aff24..e8ac8a3aa 100644 --- a/packages/server/src/utils/index.ts +++ b/packages/server/src/utils/index.ts @@ -107,7 +107,9 @@ export const getEndingNode = (nodeDependencies: INodeDependencies, graph: INodeD // Find ending node let endingNodeId = '' Object.keys(graph).forEach((nodeId) => { - if (!graph[nodeId].length && nodeDependencies[nodeId] > 0) { + if (Object.keys(nodeDependencies).length === 1) { + endingNodeId = nodeId + } else if (!graph[nodeId].length && nodeDependencies[nodeId] > 0) { endingNodeId = nodeId } }) diff --git a/packages/ui/src/views/chatmessage/ChatMessage.css b/packages/ui/src/views/chatmessage/ChatMessage.css index 4aa651b1d..a29e49ffd 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.css +++ b/packages/ui/src/views/chatmessage/ChatMessage.css @@ -1,7 +1,3 @@ -.cloudform { - position: relative; -} - .messagelist { width: 100%; height: 100%; @@ -113,13 +109,11 @@ position: relative; flex-direction: column; padding: 10px; - max-width: 500px; } .cloud { - width: '100%'; - max-width: 500px; - height: 73vh; + width: 400px; + height: calc(100vh - 260px); border-radius: 0.5rem; display: flex; justify-content: center; diff --git a/packages/ui/src/views/chatmessage/ChatMessage.js b/packages/ui/src/views/chatmessage/ChatMessage.js index 45cd3873e..4fbb54e68 100644 --- a/packages/ui/src/views/chatmessage/ChatMessage.js +++ b/packages/ui/src/views/chatmessage/ChatMessage.js @@ -336,13 +336,13 @@ export const ChatMessage = ({ chatflowid }) => {
-
-
+
+