import { omit } from 'lodash' import { ICommonObject, IDocument, INode, INodeData, INodeParams } from '../../../src/Interface' import { TextSplitter } from 'langchain/text_splitter' import { JSONLoader } from 'langchain/document_loaders/fs/json' import { getFileFromStorage } from '../../../src' class Json_DocumentLoaders implements INode { label: string name: string version: number description: string type: string icon: string category: string baseClasses: string[] inputs: INodeParams[] constructor() { this.label = 'Json File' this.name = 'jsonFile' this.version = 1.0 this.type = 'Document' this.icon = 'json.svg' this.category = 'Document Loaders' this.description = `Load data from JSON files` this.baseClasses = [this.type] this.inputs = [ { label: 'Json File', name: 'jsonFile', type: 'file', fileType: '.json' }, { label: 'Text Splitter', name: 'textSplitter', type: 'TextSplitter', optional: true }, { label: 'Pointers Extraction (separated by commas)', name: 'pointersName', type: 'string', description: 'Extracting multiple pointers', placeholder: 'Enter pointers name', optional: true }, { label: 'Additional Metadata', name: 'metadata', type: 'json', description: 'Additional metadata to be added to the extracted documents', optional: true, additionalParams: true }, { label: 'Omit Metadata Keys', name: 'omitMetadataKeys', type: 'string', rows: 4, description: 'Each document loader comes with a default set of metadata keys that are extracted from the document. You can use this field to omit some of the default metadata keys. The value should be a list of keys, seperated by comma', placeholder: 'key1, key2, key3.nestedKey1', optional: true, additionalParams: true } ] } async init(nodeData: INodeData, _: string, options: ICommonObject): Promise { const textSplitter = nodeData.inputs?.textSplitter as TextSplitter const jsonFileBase64 = nodeData.inputs?.jsonFile as string const pointersName = nodeData.inputs?.pointersName as string const metadata = nodeData.inputs?.metadata const _omitMetadataKeys = nodeData.inputs?.omitMetadataKeys as string let omitMetadataKeys: string[] = [] if (_omitMetadataKeys) { omitMetadataKeys = _omitMetadataKeys.split(',').map((key) => key.trim()) } let pointers: string[] = [] if (pointersName) { const outputString = pointersName.replace(/[^a-zA-Z0-9,]+/g, ',') pointers = outputString.split(',').map((pointer) => '/' + pointer.trim()) } let docs: IDocument[] = [] let files: string[] = [] //FILE-STORAGE::["CONTRIBUTING.md","LICENSE.md","README.md"] if (jsonFileBase64.startsWith('FILE-STORAGE::')) { const fileName = jsonFileBase64.replace('FILE-STORAGE::', '') if (fileName.startsWith('[') && fileName.endsWith(']')) { files = JSON.parse(fileName) } else { files = [fileName] } const chatflowid = options.chatflowid for (const file of files) { const fileData = await getFileFromStorage(file, chatflowid) const blob = new Blob([fileData]) const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined) if (textSplitter) { docs.push(...(await loader.loadAndSplit(textSplitter))) } else { docs.push(...(await loader.load())) } } } else { if (jsonFileBase64.startsWith('[') && jsonFileBase64.endsWith(']')) { files = JSON.parse(jsonFileBase64) } else { files = [jsonFileBase64] } for (const file of files) { const splitDataURI = file.split(',') splitDataURI.pop() const bf = Buffer.from(splitDataURI.pop() || '', 'base64') const blob = new Blob([bf]) const loader = new JSONLoader(blob, pointers.length != 0 ? pointers : undefined) if (textSplitter) { docs.push(...(await loader.loadAndSplit(textSplitter))) } else { docs.push(...(await loader.load())) } } } if (metadata) { const parsedMetadata = typeof metadata === 'object' ? metadata : JSON.parse(metadata) docs = docs.map((doc) => ({ ...doc, metadata: omit( { ...doc.metadata, ...parsedMetadata }, omitMetadataKeys ) })) } else { docs = docs.map((doc) => ({ ...doc, metadata: omit( { ...doc.metadata }, omitMetadataKeys ) })) } return docs } } module.exports = { nodeClass: Json_DocumentLoaders }