add marketplace

This commit is contained in:
Henry 2023-05-27 12:34:35 +01:00
parent 2283093f18
commit 1e2d0bb6fe
1 changed files with 949 additions and 0 deletions

View File

@ -0,0 +1,949 @@
{
"description": "Given API docs, agent automatically decide which API to call, generating url and body request from conversation",
"nodes": [
{
"width": 300,
"height": 459,
"id": "getApiChain_0",
"position": {
"x": 1222.6923202234623,
"y": 359.97676456347756
},
"type": "customNode",
"data": {
"id": "getApiChain_0",
"label": "GET API Chain",
"name": "getApiChain",
"type": "GETApiChain",
"baseClasses": ["GETApiChain", "BaseChain", "BaseLangChain"],
"category": "Chains",
"description": "Chain to run queries against GET API",
"inputParams": [
{
"label": "API Documentation",
"name": "apiDocs",
"type": "string",
"description": "Description of how API works. Please refer to more <a target=\"_blank\" href=\"https://github.com/hwchase17/langchain/blob/master/langchain/chains/api/open_meteo_docs.py\">examples</a>",
"rows": 4,
"id": "getApiChain_0-input-apiDocs-string"
},
{
"label": "Headers",
"name": "headers",
"type": "json",
"additionalParams": true,
"optional": true,
"id": "getApiChain_0-input-headers-json"
},
{
"label": "URL Prompt",
"name": "urlPrompt",
"type": "string",
"description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}",
"default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
"rows": 4,
"additionalParams": true,
"id": "getApiChain_0-input-urlPrompt-string"
},
{
"label": "Answer Prompt",
"name": "ansPrompt",
"type": "string",
"description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}",
"default": "Given this {api_response} response for {api_url}. use the given response to answer this {question}",
"rows": 4,
"additionalParams": true,
"id": "getApiChain_0-input-ansPrompt-string"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "getApiChain_0-input-model-BaseLanguageModel"
}
],
"inputs": {
"model": "{{chatOpenAI_1.data.instance}}",
"apiDocs": "BASE URL: https://api.open-meteo.com/\n\nAPI Documentation\nThe API endpoint /v1/forecast accepts a geographical coordinate, a list of weather variables and responds with a JSON hourly weather forecast for 7 days. Time always starts at 0:00 today and contains 168 hours. All URL parameters are listed below:\n\nParameter\tFormat\tRequired\tDefault\tDescription\nlatitude, longitude\tFloating point\tYes\t\tGeographical WGS84 coordinate of the location\nhourly\tString array\tNo\t\tA list of weather variables which should be returned. Values can be comma separated, or multiple &hourly= parameter in the URL can be used.\ndaily\tString array\tNo\t\tA list of daily weather variable aggregations which should be returned. Values can be comma separated, or multiple &daily= parameter in the URL can be used. If daily weather variables are specified, parameter timezone is required.\ncurrent_weather\tBool\tNo\tfalse\tInclude current weather conditions in the JSON output.\ntemperature_unit\tString\tNo\tcelsius\tIf fahrenheit is set, all temperature values are converted to Fahrenheit.\nwindspeed_unit\tString\tNo\tkmh\tOther wind speed speed units: ms, mph and kn\nprecipitation_unit\tString\tNo\tmm\tOther precipitation amount units: inch\ntimeformat\tString\tNo\tiso8601\tIf format unixtime is selected, all time values are returned in UNIX epoch time in seconds. Please note that all timestamp are in GMT+0! For daily values with unix timestamps, please apply utc_offset_seconds again to get the correct date.\ntimezone\tString\tNo\tGMT\tIf timezone is set, all timestamps are returned as local-time and data is returned starting at 00:00 local-time. Any time zone name from the time zone database is supported. If auto is set as a time zone, the coordinates will be automatically resolved to the local time zone.\npast_days\tInteger (0-2)\tNo\t0\tIf past_days is set, yesterday or the day before yesterday data are also returned.\nstart_date\nend_date\tString (yyyy-mm-dd)\tNo\t\tThe time interval to get weather data. A day must be specified as an ISO8601 date (e.g. 2022-06-30).\nmodels\tString array\tNo\tauto\tManually select one or more weather models. Per default, the best suitable weather models will be combined.\n\nHourly Parameter Definition\nThe parameter &hourly= accepts the following values. Most weather variables are given as an instantaneous value for the indicated hour. Some variables like precipitation are calculated from the preceding hour as an average or sum.\n\nVariable\tValid time\tUnit\tDescription\ntemperature_2m\tInstant\t°C (°F)\tAir temperature at 2 meters above ground\nsnowfall\tPreceding hour sum\tcm (inch)\tSnowfall amount of the preceding hour in centimeters. For the water equivalent in millimeter, divide by 7. E.g. 7 cm snow = 10 mm precipitation water equivalent\nrain\tPreceding hour sum\tmm (inch)\tRain from large scale weather systems of the preceding hour in millimeter\nshowers\tPreceding hour sum\tmm (inch)\tShowers from convective precipitation in millimeters from the preceding hour\nweathercode\tInstant\tWMO code\tWeather condition as a numeric code. Follow WMO weather interpretation codes. See table below for details.\nsnow_depth\tInstant\tmeters\tSnow depth on the ground\nfreezinglevel_height\tInstant\tmeters\tAltitude above sea level of the 0°C level\nvisibility\tInstant\tmeters\tViewing distance in meters. Influenced by low clouds, humidity and aerosols. Maximum visibility is approximately 24 km.",
"headers": "",
"urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate the full API url to call for answering the user question.\nYou should build the API url in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\nAPI url:",
"ansPrompt": "Given this {api_response} response for {api_url}. use the given response to answer this {question}"
},
"outputAnchors": [
{
"id": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain",
"name": "getApiChain",
"label": "GETApiChain",
"type": "GETApiChain | BaseChain | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1222.6923202234623,
"y": 359.97676456347756
},
"dragging": false
},
{
"width": 300,
"height": 383,
"id": "conversationalAgent_0",
"position": {
"x": 1993.8540808923876,
"y": 952.6297081192247
},
"type": "customNode",
"data": {
"id": "conversationalAgent_0",
"label": "Conversational Agent",
"name": "conversationalAgent",
"type": "AgentExecutor",
"baseClasses": ["AgentExecutor", "BaseChain", "BaseLangChain"],
"category": "Agents",
"description": "Conversational agent for a chat model. It will utilize chat specific prompts",
"inputParams": [
{
"label": "System Message",
"name": "systemMessage",
"type": "string",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "conversationalAgent_0-input-systemMessage-string"
},
{
"label": "Human Message",
"name": "humanMessage",
"type": "string",
"rows": 4,
"optional": true,
"additionalParams": true,
"id": "conversationalAgent_0-input-humanMessage-string"
}
],
"inputAnchors": [
{
"label": "Allowed Tools",
"name": "tools",
"type": "Tool",
"list": true,
"id": "conversationalAgent_0-input-tools-Tool"
},
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "conversationalAgent_0-input-model-BaseLanguageModel"
},
{
"label": "Memory",
"name": "memory",
"type": "BaseChatMemory",
"id": "conversationalAgent_0-input-memory-BaseChatMemory"
}
],
"inputs": {
"tools": ["{{chainTool_0.data.instance}}", "{{chainTool_1.data.instance}}"],
"model": "{{chatOpenAI_0.data.instance}}",
"memory": "{{bufferMemory_0.data.instance}}",
"systemMessage": "",
"humanMessage": ""
},
"outputAnchors": [
{
"id": "conversationalAgent_0-output-conversationalAgent-AgentExecutor|BaseChain|BaseLangChain",
"name": "conversationalAgent",
"label": "AgentExecutor",
"type": "AgentExecutor | BaseChain | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1993.8540808923876,
"y": 952.6297081192247
},
"dragging": false
},
{
"width": 300,
"height": 602,
"id": "chainTool_0",
"position": {
"x": 1600.1485877701232,
"y": 276.38970893436533
},
"type": "customNode",
"data": {
"id": "chainTool_0",
"label": "Chain Tool",
"name": "chainTool",
"type": "ChainTool",
"baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"],
"category": "Tools",
"description": "Use a chain as allowed tool for agent",
"inputParams": [
{
"label": "Chain Name",
"name": "name",
"type": "string",
"placeholder": "state-of-union-qa",
"id": "chainTool_0-input-name-string"
},
{
"label": "Chain Description",
"name": "description",
"type": "string",
"rows": 3,
"placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.",
"id": "chainTool_0-input-description-string"
},
{
"label": "Return Direct",
"name": "returnDirect",
"type": "boolean",
"optional": true,
"id": "chainTool_0-input-returnDirect-boolean"
}
],
"inputAnchors": [
{
"label": "Base Chain",
"name": "baseChain",
"type": "BaseChain",
"id": "chainTool_0-input-baseChain-BaseChain"
}
],
"inputs": {
"name": "weather-qa",
"description": "useful for when you need to ask question about weather",
"returnDirect": "",
"baseChain": "{{getApiChain_0.data.instance}}"
},
"outputAnchors": [
{
"id": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
"name": "chainTool",
"label": "ChainTool",
"type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1600.1485877701232,
"y": 276.38970893436533
},
"dragging": false
},
{
"width": 300,
"height": 524,
"id": "chatOpenAI_0",
"position": {
"x": 1270.7548070814019,
"y": 1565.864417576483
},
"type": "customNode",
"data": {
"id": "chatOpenAI_0",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "BaseLangChain"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "OpenAI Api Key",
"name": "openAIApiKey",
"type": "password",
"id": "chatOpenAI_0-input-openAIApiKey-password"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-0314",
"name": "gpt-4-0314"
},
{
"label": "gpt-4-32k-0314",
"name": "gpt-4-32k-0314"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0301",
"name": "gpt-3.5-turbo-0301"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_0-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"default": 0.9,
"optional": true,
"id": "chatOpenAI_0-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_0-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1270.7548070814019,
"y": 1565.864417576483
},
"dragging": false
},
{
"width": 300,
"height": 376,
"id": "bufferMemory_0",
"position": {
"x": 1642.0644080121785,
"y": 1715.6131926891728
},
"type": "customNode",
"data": {
"id": "bufferMemory_0",
"label": "Buffer Memory",
"name": "bufferMemory",
"type": "BufferMemory",
"baseClasses": ["BufferMemory", "BaseChatMemory", "BaseMemory"],
"category": "Memory",
"description": "Remembers previous conversational back and forths directly",
"inputParams": [
{
"label": "Memory Key",
"name": "memoryKey",
"type": "string",
"default": "chat_history",
"id": "bufferMemory_0-input-memoryKey-string"
},
{
"label": "Input Key",
"name": "inputKey",
"type": "string",
"default": "input",
"id": "bufferMemory_0-input-inputKey-string"
}
],
"inputAnchors": [],
"inputs": {
"memoryKey": "chat_history",
"inputKey": "input"
},
"outputAnchors": [
{
"id": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"name": "bufferMemory",
"label": "BufferMemory",
"type": "BufferMemory | BaseChatMemory | BaseMemory"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1642.0644080121785,
"y": 1715.6131926891728
},
"dragging": false
},
{
"width": 300,
"height": 524,
"id": "chatOpenAI_1",
"position": {
"x": 865.4424095725009,
"y": 350.7505181391267
},
"type": "customNode",
"data": {
"id": "chatOpenAI_1",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "BaseLangChain"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "OpenAI Api Key",
"name": "openAIApiKey",
"type": "password",
"id": "chatOpenAI_1-input-openAIApiKey-password"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-0314",
"name": "gpt-4-0314"
},
{
"label": "gpt-4-32k-0314",
"name": "gpt-4-32k-0314"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0301",
"name": "gpt-3.5-turbo-0301"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_1-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"default": 0.9,
"optional": true,
"id": "chatOpenAI_1-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_1-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 865.4424095725009,
"y": 350.7505181391267
},
"dragging": false
},
{
"width": 300,
"height": 524,
"id": "chatOpenAI_2",
"position": {
"x": 587.6425146349426,
"y": 917.1494176892741
},
"type": "customNode",
"data": {
"id": "chatOpenAI_2",
"label": "ChatOpenAI",
"name": "chatOpenAI",
"type": "ChatOpenAI",
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel", "BaseLangChain"],
"category": "Chat Models",
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
"inputParams": [
{
"label": "OpenAI Api Key",
"name": "openAIApiKey",
"type": "password",
"id": "chatOpenAI_2-input-openAIApiKey-password"
},
{
"label": "Model Name",
"name": "modelName",
"type": "options",
"options": [
{
"label": "gpt-4",
"name": "gpt-4"
},
{
"label": "gpt-4-0314",
"name": "gpt-4-0314"
},
{
"label": "gpt-4-32k-0314",
"name": "gpt-4-32k-0314"
},
{
"label": "gpt-3.5-turbo",
"name": "gpt-3.5-turbo"
},
{
"label": "gpt-3.5-turbo-0301",
"name": "gpt-3.5-turbo-0301"
}
],
"default": "gpt-3.5-turbo",
"optional": true,
"id": "chatOpenAI_2-input-modelName-options"
},
{
"label": "Temperature",
"name": "temperature",
"type": "number",
"default": 0.9,
"optional": true,
"id": "chatOpenAI_2-input-temperature-number"
},
{
"label": "Max Tokens",
"name": "maxTokens",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-maxTokens-number"
},
{
"label": "Top Probability",
"name": "topP",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-topP-number"
},
{
"label": "Frequency Penalty",
"name": "frequencyPenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-frequencyPenalty-number"
},
{
"label": "Presence Penalty",
"name": "presencePenalty",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-presencePenalty-number"
},
{
"label": "Timeout",
"name": "timeout",
"type": "number",
"optional": true,
"additionalParams": true,
"id": "chatOpenAI_2-input-timeout-number"
}
],
"inputAnchors": [],
"inputs": {
"modelName": "gpt-3.5-turbo",
"temperature": 0.9,
"maxTokens": "",
"topP": "",
"frequencyPenalty": "",
"presencePenalty": "",
"timeout": ""
},
"outputAnchors": [
{
"id": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain",
"name": "chatOpenAI",
"label": "ChatOpenAI",
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 587.6425146349426,
"y": 917.1494176892741
},
"dragging": false
},
{
"width": 300,
"height": 602,
"id": "chainTool_1",
"position": {
"x": 1284.7746596034926,
"y": 895.1444797047182
},
"type": "customNode",
"data": {
"id": "chainTool_1",
"label": "Chain Tool",
"name": "chainTool",
"type": "ChainTool",
"baseClasses": ["ChainTool", "DynamicTool", "Tool", "StructuredTool", "BaseLangChain"],
"category": "Tools",
"description": "Use a chain as allowed tool for agent",
"inputParams": [
{
"label": "Chain Name",
"name": "name",
"type": "string",
"placeholder": "state-of-union-qa",
"id": "chainTool_1-input-name-string"
},
{
"label": "Chain Description",
"name": "description",
"type": "string",
"rows": 3,
"placeholder": "State of the Union QA - useful for when you need to ask questions about the most recent state of the union address.",
"id": "chainTool_1-input-description-string"
},
{
"label": "Return Direct",
"name": "returnDirect",
"type": "boolean",
"optional": true,
"id": "chainTool_1-input-returnDirect-boolean"
}
],
"inputAnchors": [
{
"label": "Base Chain",
"name": "baseChain",
"type": "BaseChain",
"id": "chainTool_1-input-baseChain-BaseChain"
}
],
"inputs": {
"name": "discord-bot",
"description": "useful for when you need to send message to Discord",
"returnDirect": "",
"baseChain": "{{postApiChain_0.data.instance}}"
},
"outputAnchors": [
{
"id": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
"name": "chainTool",
"label": "ChainTool",
"type": "ChainTool | DynamicTool | Tool | StructuredTool | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 1284.7746596034926,
"y": 895.1444797047182
},
"dragging": false
},
{
"width": 300,
"height": 459,
"id": "postApiChain_0",
"position": {
"x": 933.3631140153886,
"y": 974.8756002461283
},
"type": "customNode",
"data": {
"id": "postApiChain_0",
"label": "POST API Chain",
"name": "postApiChain",
"type": "POSTApiChain",
"baseClasses": ["POSTApiChain", "BaseChain", "BaseLangChain"],
"category": "Chains",
"description": "Chain to run queries against POST API",
"inputParams": [
{
"label": "API Documentation",
"name": "apiDocs",
"type": "string",
"description": "Description of how API works. Please refer to more <a target=\"_blank\" href=\"https://github.com/hwchase17/langchain/blob/master/langchain/chains/api/open_meteo_docs.py\">examples</a>",
"rows": 4,
"id": "postApiChain_0-input-apiDocs-string"
},
{
"label": "Headers",
"name": "headers",
"type": "json",
"additionalParams": true,
"optional": true,
"id": "postApiChain_0-input-headers-json"
},
{
"label": "URL Prompt",
"name": "urlPrompt",
"type": "string",
"description": "Prompt used to tell LLMs how to construct the URL. Must contains {api_docs} and {question}",
"default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:",
"rows": 4,
"additionalParams": true,
"id": "postApiChain_0-input-urlPrompt-string"
},
{
"label": "Answer Prompt",
"name": "ansPrompt",
"type": "string",
"description": "Prompt used to tell LLMs how to return the API response. Must contains {api_response}, {api_url}, and {question}",
"default": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:",
"rows": 4,
"additionalParams": true,
"id": "postApiChain_0-input-ansPrompt-string"
}
],
"inputAnchors": [
{
"label": "Language Model",
"name": "model",
"type": "BaseLanguageModel",
"id": "postApiChain_0-input-model-BaseLanguageModel"
}
],
"inputs": {
"model": "{{chatOpenAI_2.data.instance}}",
"apiDocs": "API documentation:\nEndpoint: https://eog776prcv6dg0j.m.pipedream.net\n\nThis API is for sending Discord message\n\nQuery body table:\nmessage | string | Message to send | required\n\nResponse schema (string):\nresult | string",
"headers": "",
"urlPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string:",
"ansPrompt": "You are given the below API Documentation:\n{api_docs}\nUsing this documentation, generate a json string with two keys: \"url\" and \"data\".\nThe value of \"url\" should be a string, which is the API url to call for answering the user question.\nThe value of \"data\" should be a dictionary of key-value pairs you want to POST to the url as a JSON body.\nBe careful to always use double quotes for strings in the json string.\nYou should build the json string in order to get a response that is as short as possible, while still getting the necessary information to answer the question. Pay attention to deliberately exclude any unnecessary pieces of data in the API call.\n\nQuestion:{question}\njson string: {api_url_body}\n\nHere is the response from the API:\n\n{api_response}\n\nSummarize this response to answer the original question.\n\nSummary:"
},
"outputAnchors": [
{
"id": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain",
"name": "postApiChain",
"label": "POSTApiChain",
"type": "POSTApiChain | BaseChain | BaseLangChain"
}
],
"outputs": {},
"selected": false
},
"selected": false,
"positionAbsolute": {
"x": 933.3631140153886,
"y": 974.8756002461283
},
"dragging": false
}
],
"edges": [
{
"source": "getApiChain_0",
"sourceHandle": "getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain",
"target": "chainTool_0",
"targetHandle": "chainTool_0-input-baseChain-BaseChain",
"type": "buttonedge",
"id": "getApiChain_0-getApiChain_0-output-getApiChain-GETApiChain|BaseChain|BaseLangChain-chainTool_0-chainTool_0-input-baseChain-BaseChain",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_0",
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-model-BaseLanguageModel",
"data": {
"label": ""
}
},
{
"source": "bufferMemory_0",
"sourceHandle": "bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-memory-BaseChatMemory",
"type": "buttonedge",
"id": "bufferMemory_0-bufferMemory_0-output-bufferMemory-BufferMemory|BaseChatMemory|BaseMemory-conversationalAgent_0-conversationalAgent_0-input-memory-BaseChatMemory",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_1",
"sourceHandle": "chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain",
"target": "getApiChain_0",
"targetHandle": "getApiChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_1-chatOpenAI_1-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain-getApiChain_0-getApiChain_0-input-model-BaseLanguageModel",
"data": {
"label": ""
}
},
{
"source": "chatOpenAI_2",
"sourceHandle": "chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain",
"target": "postApiChain_0",
"targetHandle": "postApiChain_0-input-model-BaseLanguageModel",
"type": "buttonedge",
"id": "chatOpenAI_2-chatOpenAI_2-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel|BaseLangChain-postApiChain_0-postApiChain_0-input-model-BaseLanguageModel",
"data": {
"label": ""
}
},
{
"source": "postApiChain_0",
"sourceHandle": "postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain",
"target": "chainTool_1",
"targetHandle": "chainTool_1-input-baseChain-BaseChain",
"type": "buttonedge",
"id": "postApiChain_0-postApiChain_0-output-postApiChain-POSTApiChain|BaseChain|BaseLangChain-chainTool_1-chainTool_1-input-baseChain-BaseChain",
"data": {
"label": ""
}
},
{
"source": "chainTool_0",
"sourceHandle": "chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-tools-Tool",
"type": "buttonedge",
"id": "chainTool_0-chainTool_0-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool",
"data": {
"label": ""
}
},
{
"source": "chainTool_1",
"sourceHandle": "chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain",
"target": "conversationalAgent_0",
"targetHandle": "conversationalAgent_0-input-tools-Tool",
"type": "buttonedge",
"id": "chainTool_1-chainTool_1-output-chainTool-ChainTool|DynamicTool|Tool|StructuredTool|BaseLangChain-conversationalAgent_0-conversationalAgent_0-input-tools-Tool",
"data": {
"label": ""
}
}
]
}