Delete extra template
This commit is contained in:
parent
137226e84c
commit
b93f0d37f1
|
|
@ -1,342 +0,0 @@
|
|||
{
|
||||
"description": "A simple LLM chain that uses Vectara to enable conversations with existing documents",
|
||||
"nodes": [
|
||||
{
|
||||
"width": 300,
|
||||
"height": 357,
|
||||
"id": "vectaraExistingIndex_0",
|
||||
"position": { "x": 465.7428816308626, "y": 241.80234144183515 },
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "vectaraExistingIndex_0",
|
||||
"label": "Vectara Load Existing Index",
|
||||
"version": 1,
|
||||
"name": "vectaraExistingIndex",
|
||||
"type": "Vectara",
|
||||
"baseClasses": ["Vectara", "VectorStoreRetriever", "BaseRetriever"],
|
||||
"category": "Vector Stores",
|
||||
"description": "Load existing index from Vectara (i.e: Document has been upserted)",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["vectaraApi"],
|
||||
"id": "vectaraExistingIndex_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Vectara Metadata Filter",
|
||||
"name": "filter",
|
||||
"type": "json",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "vectaraExistingIndex_0-input-filter-json"
|
||||
},
|
||||
{
|
||||
"label": "Lambda",
|
||||
"name": "lambda",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "vectaraExistingIndex_0-input-lambda-number"
|
||||
},
|
||||
{
|
||||
"label": "Top K",
|
||||
"name": "topK",
|
||||
"description": "Number of top results to fetch. Defaults to 4",
|
||||
"placeholder": "4",
|
||||
"type": "number",
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "vectaraExistingIndex_0-input-topK-number"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": { "filter": "", "lambda": "", "topK": "" },
|
||||
"outputAnchors": [
|
||||
{
|
||||
"name": "output",
|
||||
"label": "Output",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"id": "vectaraExistingIndex_0-output-retriever-Vectara|VectorStoreRetriever|BaseRetriever",
|
||||
"name": "retriever",
|
||||
"label": "Vectara Retriever",
|
||||
"type": "Vectara | VectorStoreRetriever | BaseRetriever"
|
||||
},
|
||||
{
|
||||
"id": "vectaraExistingIndex_0-output-vectorStore-Vectara|VectorStore",
|
||||
"name": "vectorStore",
|
||||
"label": "Vectara Vector Store",
|
||||
"type": "Vectara | VectorStore"
|
||||
}
|
||||
],
|
||||
"default": "retriever"
|
||||
}
|
||||
],
|
||||
"outputs": { "output": "retriever" },
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"dragging": false,
|
||||
"positionAbsolute": { "x": 465.7428816308626, "y": 241.80234144183515 }
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 525,
|
||||
"id": "chatOpenAI_0",
|
||||
"position": { "x": 899.9419397673153, "y": 202.43889985803958 },
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "chatOpenAI_0",
|
||||
"label": "ChatOpenAI",
|
||||
"version": 1,
|
||||
"name": "chatOpenAI",
|
||||
"type": "ChatOpenAI",
|
||||
"baseClasses": ["ChatOpenAI", "BaseChatModel", "BaseLanguageModel"],
|
||||
"category": "Chat Models",
|
||||
"description": "Wrapper around OpenAI large language models that use the Chat endpoint",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Connect Credential",
|
||||
"name": "credential",
|
||||
"type": "credential",
|
||||
"credentialNames": ["openAIApi"],
|
||||
"id": "chatOpenAI_0-input-credential-credential"
|
||||
},
|
||||
{
|
||||
"label": "Model Name",
|
||||
"name": "modelName",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{ "label": "gpt-4", "name": "gpt-4" },
|
||||
{ "label": "gpt-4-0613", "name": "gpt-4-0613" },
|
||||
{ "label": "gpt-4-32k", "name": "gpt-4-32k" },
|
||||
{ "label": "gpt-4-32k-0613", "name": "gpt-4-32k-0613" },
|
||||
{ "label": "gpt-3.5-turbo", "name": "gpt-3.5-turbo" },
|
||||
{ "label": "gpt-3.5-turbo-0613", "name": "gpt-3.5-turbo-0613" },
|
||||
{ "label": "gpt-3.5-turbo-16k", "name": "gpt-3.5-turbo-16k" },
|
||||
{
|
||||
"label": "gpt-3.5-turbo-16k-0613",
|
||||
"name": "gpt-3.5-turbo-16k-0613"
|
||||
}
|
||||
],
|
||||
"default": "gpt-3.5-turbo",
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-modelName-options"
|
||||
},
|
||||
{
|
||||
"label": "Temperature",
|
||||
"name": "temperature",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"default": 0.9,
|
||||
"optional": true,
|
||||
"id": "chatOpenAI_0-input-temperature-number"
|
||||
},
|
||||
{
|
||||
"label": "Max Tokens",
|
||||
"name": "maxTokens",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-maxTokens-number"
|
||||
},
|
||||
{
|
||||
"label": "Top Probability",
|
||||
"name": "topP",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-topP-number"
|
||||
},
|
||||
{
|
||||
"label": "Frequency Penalty",
|
||||
"name": "frequencyPenalty",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-frequencyPenalty-number"
|
||||
},
|
||||
{
|
||||
"label": "Presence Penalty",
|
||||
"name": "presencePenalty",
|
||||
"type": "number",
|
||||
"step": 0.1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-presencePenalty-number"
|
||||
},
|
||||
{
|
||||
"label": "Timeout",
|
||||
"name": "timeout",
|
||||
"type": "number",
|
||||
"step": 1,
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-timeout-number"
|
||||
},
|
||||
{
|
||||
"label": "BasePath",
|
||||
"name": "basepath",
|
||||
"type": "string",
|
||||
"optional": true,
|
||||
"additionalParams": true,
|
||||
"id": "chatOpenAI_0-input-basepath-string"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [],
|
||||
"inputs": {
|
||||
"modelName": "gpt-3.5-turbo",
|
||||
"temperature": 0.9,
|
||||
"maxTokens": "",
|
||||
"topP": "",
|
||||
"frequencyPenalty": "",
|
||||
"presencePenalty": "",
|
||||
"timeout": "",
|
||||
"basepath": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
|
||||
"name": "chatOpenAI",
|
||||
"label": "ChatOpenAI",
|
||||
"type": "ChatOpenAI | BaseChatModel | BaseLanguageModel"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"selected": false,
|
||||
"positionAbsolute": { "x": 899.9419397673153, "y": 202.43889985803958 },
|
||||
"dragging": false
|
||||
},
|
||||
{
|
||||
"width": 300,
|
||||
"height": 481,
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"position": { "x": 1290.3534883720931, "y": 376.49186046511625 },
|
||||
"type": "customNode",
|
||||
"data": {
|
||||
"id": "conversationalRetrievalQAChain_0",
|
||||
"label": "Conversational Retrieval QA Chain",
|
||||
"version": 1,
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain",
|
||||
"baseClasses": ["ConversationalRetrievalQAChain", "BaseChain"],
|
||||
"category": "Chains",
|
||||
"description": "Document QA - built on RetrievalQAChain to provide a chat history component",
|
||||
"inputParams": [
|
||||
{
|
||||
"label": "Return Source Documents",
|
||||
"name": "returnSourceDocuments",
|
||||
"type": "boolean",
|
||||
"optional": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-returnSourceDocuments-boolean"
|
||||
},
|
||||
{
|
||||
"label": "System Message",
|
||||
"name": "systemMessagePrompt",
|
||||
"type": "string",
|
||||
"rows": 4,
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"placeholder": "I want you to act as a document that I am having a conversation with. Your name is \"AI Assistant\". You will provide me with answers from the given info. If the answer is not included, say exactly \"Hmm, I am not sure.\" and stop after that. Refuse to answer any question not about the info. Never break character.",
|
||||
"id": "conversationalRetrievalQAChain_0-input-systemMessagePrompt-string"
|
||||
},
|
||||
{
|
||||
"label": "Chain Option",
|
||||
"name": "chainOption",
|
||||
"type": "options",
|
||||
"options": [
|
||||
{
|
||||
"label": "MapReduceDocumentsChain",
|
||||
"name": "map_reduce",
|
||||
"description": "Suitable for QA tasks over larger documents and can run the preprocessing step in parallel, reducing the running time"
|
||||
},
|
||||
{
|
||||
"label": "RefineDocumentsChain",
|
||||
"name": "refine",
|
||||
"description": "Suitable for QA tasks over a large number of documents."
|
||||
},
|
||||
{
|
||||
"label": "StuffDocumentsChain",
|
||||
"name": "stuff",
|
||||
"description": "Suitable for QA tasks over a small number of documents."
|
||||
}
|
||||
],
|
||||
"additionalParams": true,
|
||||
"optional": true,
|
||||
"id": "conversationalRetrievalQAChain_0-input-chainOption-options"
|
||||
}
|
||||
],
|
||||
"inputAnchors": [
|
||||
{
|
||||
"label": "Language Model",
|
||||
"name": "model",
|
||||
"type": "BaseLanguageModel",
|
||||
"id": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel"
|
||||
},
|
||||
{
|
||||
"label": "Vector Store Retriever",
|
||||
"name": "vectorStoreRetriever",
|
||||
"type": "BaseRetriever",
|
||||
"id": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever"
|
||||
},
|
||||
{
|
||||
"label": "Memory",
|
||||
"name": "memory",
|
||||
"type": "BaseMemory",
|
||||
"optional": true,
|
||||
"description": "If left empty, a default BufferMemory will be used",
|
||||
"id": "conversationalRetrievalQAChain_0-input-memory-BaseMemory"
|
||||
}
|
||||
],
|
||||
"inputs": {
|
||||
"model": "{{chatOpenAI_0.data.instance}}",
|
||||
"vectorStoreRetriever": "{{vectaraExistingIndex_0.data.instance}}",
|
||||
"memory": "",
|
||||
"returnSourceDocuments": "",
|
||||
"systemMessagePrompt": "",
|
||||
"chainOption": ""
|
||||
},
|
||||
"outputAnchors": [
|
||||
{
|
||||
"id": "conversationalRetrievalQAChain_0-output-conversationalRetrievalQAChain-ConversationalRetrievalQAChain|BaseChain",
|
||||
"name": "conversationalRetrievalQAChain",
|
||||
"label": "ConversationalRetrievalQAChain",
|
||||
"type": "ConversationalRetrievalQAChain | BaseChain"
|
||||
}
|
||||
],
|
||||
"outputs": {},
|
||||
"selected": false
|
||||
},
|
||||
"positionAbsolute": { "x": 1290.3534883720931, "y": 376.49186046511625 },
|
||||
"selected": false
|
||||
}
|
||||
],
|
||||
"edges": [
|
||||
{
|
||||
"source": "chatOpenAI_0",
|
||||
"sourceHandle": "chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel",
|
||||
"target": "conversationalRetrievalQAChain_0",
|
||||
"targetHandle": "conversationalRetrievalQAChain_0-input-model-BaseLanguageModel",
|
||||
"type": "buttonedge",
|
||||
"id": "chatOpenAI_0-chatOpenAI_0-output-chatOpenAI-ChatOpenAI|BaseChatModel|BaseLanguageModel-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-model-BaseLanguageModel",
|
||||
"data": { "label": "" }
|
||||
},
|
||||
{
|
||||
"source": "vectaraExistingIndex_0",
|
||||
"sourceHandle": "vectaraExistingIndex_0-output-retriever-Vectara|VectorStoreRetriever|BaseRetriever",
|
||||
"target": "conversationalRetrievalQAChain_0",
|
||||
"targetHandle": "conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
|
||||
"type": "buttonedge",
|
||||
"id": "vectaraExistingIndex_0-vectaraExistingIndex_0-output-retriever-Vectara|VectorStoreRetriever|BaseRetriever-conversationalRetrievalQAChain_0-conversationalRetrievalQAChain_0-input-vectorStoreRetriever-BaseRetriever",
|
||||
"data": { "label": "" }
|
||||
}
|
||||
]
|
||||
}
|
||||
Loading…
Reference in New Issue