Merge branch 'FlowiseAI:main' into main

This commit is contained in:
VJSai 2023-07-08 18:49:43 +05:30 committed by GitHub
commit 6166074e07
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 393 additions and 10 deletions

View File

@ -8,7 +8,12 @@ FROM node:18-alpine
RUN apk add --update libc6-compat python3 make g++
# needed for pdfjs-dist
RUN apk add --no-cache build-base cairo-dev pango-dev
# Install Chromium
RUN apk add --no-cache chromium
ENV PUPPETEER_SKIP_DOWNLOAD=true
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
WORKDIR /usr/src/packages

View File

@ -6,7 +6,12 @@ RUN apk add --no-cache git
RUN apk add --no-cache python3 py3-pip make g++
# needed for pdfjs-dist
RUN apk add --no-cache build-base cairo-dev pango-dev
# Install Chromium
RUN apk add --no-cache chromium
ENV PUPPETEER_SKIP_DOWNLOAD=true
ENV PUPPETEER_EXECUTABLE_PATH=/usr/bin/chromium-browser
# You can install a specific version like: flowise@1.0.0
RUN npm install -g flowise

View File

@ -66,7 +66,7 @@ class SqlDatabaseChain_Chains implements INode {
const chain = await getSQLDBChain(databaseType, dbFilePath, model)
if (options.socketIO && options.socketIOClientId) {
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId)
const handler = new CustomChainHandler(options.socketIO, options.socketIOClientId, 2)
const res = await chain.run(input, [handler])
return res
} else {

View File

@ -1,6 +1,6 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { HFInput, HuggingFaceInference } from 'langchain/llms/hf'
import { HFInput, HuggingFaceInference } from './core'
class ChatHuggingFace_ChatModels implements INode {
label: string
@ -71,6 +71,15 @@ class ChatHuggingFace_ChatModels implements INode {
description: 'Frequency Penalty parameter may not apply to certain model. Please check available model parameters',
optional: true,
additionalParams: true
},
{
label: 'Endpoint',
name: 'endpoint',
type: 'string',
placeholder: 'https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2',
description: 'Using your own inference endpoint',
optional: true,
additionalParams: true
}
]
}
@ -83,6 +92,7 @@ class ChatHuggingFace_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const hfTopK = nodeData.inputs?.hfTopK as string
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const endpoint = nodeData.inputs?.endpoint as string
const obj: Partial<HFInput> = {
model,
@ -94,6 +104,7 @@ class ChatHuggingFace_ChatModels implements INode {
if (topP) obj.topP = parseInt(topP, 10)
if (hfTopK) obj.topK = parseInt(hfTopK, 10)
if (frequencyPenalty) obj.frequencyPenalty = parseInt(frequencyPenalty, 10)
if (endpoint) obj.endpoint = endpoint
const huggingFace = new HuggingFaceInference(obj)
return huggingFace

View File

@ -0,0 +1,109 @@
import { getEnvironmentVariable } from '../../../src/utils'
import { LLM, BaseLLMParams } from 'langchain/llms/base'
export interface HFInput {
/** Model to use */
model: string
/** Sampling temperature to use */
temperature?: number
/**
* Maximum number of tokens to generate in the completion.
*/
maxTokens?: number
/** Total probability mass of tokens to consider at each step */
topP?: number
/** Integer to define the top tokens considered within the sample operation to create new text. */
topK?: number
/** Penalizes repeated tokens according to frequency */
frequencyPenalty?: number
/** API key to use. */
apiKey?: string
/** Private endpoint to use. */
endpoint?: string
}
export class HuggingFaceInference extends LLM implements HFInput {
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: 'HUGGINGFACEHUB_API_KEY'
}
}
model = 'gpt2'
temperature: number | undefined = undefined
maxTokens: number | undefined = undefined
topP: number | undefined = undefined
topK: number | undefined = undefined
frequencyPenalty: number | undefined = undefined
apiKey: string | undefined = undefined
endpoint: string | undefined = undefined
constructor(fields?: Partial<HFInput> & BaseLLMParams) {
super(fields ?? {})
this.model = fields?.model ?? this.model
this.temperature = fields?.temperature ?? this.temperature
this.maxTokens = fields?.maxTokens ?? this.maxTokens
this.topP = fields?.topP ?? this.topP
this.topK = fields?.topK ?? this.topK
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty
this.endpoint = fields?.endpoint ?? ''
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
if (!this.apiKey) {
throw new Error(
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
)
}
}
_llmType() {
return 'hf'
}
/** @ignore */
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
const { HfInference } = await HuggingFaceInference.imports()
const hf = new HfInference(this.apiKey)
if (this.endpoint) hf.endpoint(this.endpoint)
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), {
model: this.model,
parameters: {
// make it behave similar to openai, returning only the generated text
return_full_text: false,
temperature: this.temperature,
max_new_tokens: this.maxTokens,
top_p: this.topP,
top_k: this.topK,
repetition_penalty: this.frequencyPenalty
},
inputs: prompt
})
return res.generated_text
}
/** @ignore */
static async imports(): Promise<{
HfInference: typeof import('@huggingface/inference').HfInference
}> {
try {
const { HfInference } = await import('@huggingface/inference')
return { HfInference }
} catch (e) {
throw new Error('Please install huggingface as a dependency with, e.g. `yarn add @huggingface/inference`')
}
}
}

View File

@ -73,7 +73,12 @@ class Puppeteer_DocumentLoaders implements INode {
const puppeteerLoader = async (url: string): Promise<any> => {
let docs = []
const loader = new PuppeteerWebBaseLoader(url)
const loader = new PuppeteerWebBaseLoader(url, {
launchOptions: {
args: ['--no-sandbox'],
headless: 'new'
}
})
if (textSplitter) {
docs = await loader.loadAndSplit(textSplitter)
} else {

View File

@ -1,6 +1,6 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { HuggingFaceInferenceEmbeddings, HuggingFaceInferenceEmbeddingsParams } from 'langchain/embeddings/hf'
import { HuggingFaceInferenceEmbeddings, HuggingFaceInferenceEmbeddingsParams } from './core'
class HuggingFaceInferenceEmbedding_Embeddings implements INode {
label: string
@ -31,6 +31,14 @@ class HuggingFaceInferenceEmbedding_Embeddings implements INode {
name: 'modelName',
type: 'string',
optional: true
},
{
label: 'Endpoint',
name: 'endpoint',
type: 'string',
placeholder: 'https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/sentence-transformers/all-MiniLM-L6-v2',
description: 'Using your own inference endpoint',
optional: true
}
]
}
@ -38,12 +46,14 @@ class HuggingFaceInferenceEmbedding_Embeddings implements INode {
async init(nodeData: INodeData): Promise<any> {
const apiKey = nodeData.inputs?.apiKey as string
const modelName = nodeData.inputs?.modelName as string
const endpoint = nodeData.inputs?.endpoint as string
const obj: Partial<HuggingFaceInferenceEmbeddingsParams> = {
apiKey
}
if (modelName) obj.model = modelName
if (endpoint) obj.endpoint = endpoint
const model = new HuggingFaceInferenceEmbeddings(obj)
return model

View File

@ -0,0 +1,48 @@
import { HfInference } from '@huggingface/inference'
import { Embeddings, EmbeddingsParams } from 'langchain/embeddings/base'
import { getEnvironmentVariable } from '../../../src/utils'
export interface HuggingFaceInferenceEmbeddingsParams extends EmbeddingsParams {
apiKey?: string
model?: string
endpoint?: string
}
export class HuggingFaceInferenceEmbeddings extends Embeddings implements HuggingFaceInferenceEmbeddingsParams {
apiKey?: string
endpoint?: string
model: string
client: HfInference
constructor(fields?: HuggingFaceInferenceEmbeddingsParams) {
super(fields ?? {})
this.model = fields?.model ?? 'sentence-transformers/distilbert-base-nli-mean-tokens'
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
this.endpoint = fields?.endpoint ?? ''
this.client = new HfInference(this.apiKey)
if (this.endpoint) this.client.endpoint(this.endpoint)
}
async _embed(texts: string[]): Promise<number[][]> {
// replace newlines, which can negatively affect performance.
const clean = texts.map((text) => text.replace(/\n/g, ' '))
return this.caller.call(() =>
this.client.featureExtraction({
model: this.model,
inputs: clean
})
) as Promise<number[][]>
}
embedQuery(document: string): Promise<number[]> {
return this._embed([document]).then((embeddings) => embeddings[0])
}
embedDocuments(documents: string[]): Promise<number[][]> {
return this._embed(documents)
}
}

View File

@ -1,6 +1,6 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { HFInput, HuggingFaceInference } from 'langchain/llms/hf'
import { HFInput, HuggingFaceInference } from './core'
class HuggingFaceInference_LLMs implements INode {
label: string
@ -71,6 +71,15 @@ class HuggingFaceInference_LLMs implements INode {
description: 'Frequency Penalty parameter may not apply to certain model. Please check available model parameters',
optional: true,
additionalParams: true
},
{
label: 'Endpoint',
name: 'endpoint',
type: 'string',
placeholder: 'https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2',
description: 'Using your own inference endpoint',
optional: true,
additionalParams: true
}
]
}
@ -83,6 +92,7 @@ class HuggingFaceInference_LLMs implements INode {
const topP = nodeData.inputs?.topP as string
const hfTopK = nodeData.inputs?.hfTopK as string
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
const endpoint = nodeData.inputs?.endpoint as string
const obj: Partial<HFInput> = {
model,
@ -94,6 +104,7 @@ class HuggingFaceInference_LLMs implements INode {
if (topP) obj.topP = parseInt(topP, 10)
if (hfTopK) obj.topK = parseInt(hfTopK, 10)
if (frequencyPenalty) obj.frequencyPenalty = parseInt(frequencyPenalty, 10)
if (endpoint) obj.endpoint = endpoint
const huggingFace = new HuggingFaceInference(obj)
return huggingFace

View File

@ -0,0 +1,109 @@
import { getEnvironmentVariable } from '../../../src/utils'
import { LLM, BaseLLMParams } from 'langchain/llms/base'
export interface HFInput {
/** Model to use */
model: string
/** Sampling temperature to use */
temperature?: number
/**
* Maximum number of tokens to generate in the completion.
*/
maxTokens?: number
/** Total probability mass of tokens to consider at each step */
topP?: number
/** Integer to define the top tokens considered within the sample operation to create new text. */
topK?: number
/** Penalizes repeated tokens according to frequency */
frequencyPenalty?: number
/** API key to use. */
apiKey?: string
/** Private endpoint to use. */
endpoint?: string
}
export class HuggingFaceInference extends LLM implements HFInput {
get lc_secrets(): { [key: string]: string } | undefined {
return {
apiKey: 'HUGGINGFACEHUB_API_KEY'
}
}
model = 'gpt2'
temperature: number | undefined = undefined
maxTokens: number | undefined = undefined
topP: number | undefined = undefined
topK: number | undefined = undefined
frequencyPenalty: number | undefined = undefined
apiKey: string | undefined = undefined
endpoint: string | undefined = undefined
constructor(fields?: Partial<HFInput> & BaseLLMParams) {
super(fields ?? {})
this.model = fields?.model ?? this.model
this.temperature = fields?.temperature ?? this.temperature
this.maxTokens = fields?.maxTokens ?? this.maxTokens
this.topP = fields?.topP ?? this.topP
this.topK = fields?.topK ?? this.topK
this.frequencyPenalty = fields?.frequencyPenalty ?? this.frequencyPenalty
this.endpoint = fields?.endpoint ?? ''
this.apiKey = fields?.apiKey ?? getEnvironmentVariable('HUGGINGFACEHUB_API_KEY')
if (!this.apiKey) {
throw new Error(
'Please set an API key for HuggingFace Hub in the environment variable HUGGINGFACEHUB_API_KEY or in the apiKey field of the HuggingFaceInference constructor.'
)
}
}
_llmType() {
return 'hf'
}
/** @ignore */
async _call(prompt: string, options: this['ParsedCallOptions']): Promise<string> {
const { HfInference } = await HuggingFaceInference.imports()
const hf = new HfInference(this.apiKey)
if (this.endpoint) hf.endpoint(this.endpoint)
const res = await this.caller.callWithOptions({ signal: options.signal }, hf.textGeneration.bind(hf), {
model: this.model,
parameters: {
// make it behave similar to openai, returning only the generated text
return_full_text: false,
temperature: this.temperature,
max_new_tokens: this.maxTokens,
top_p: this.topP,
top_k: this.topK,
repetition_penalty: this.frequencyPenalty
},
inputs: prompt
})
return res.generated_text
}
/** @ignore */
static async imports(): Promise<{
HfInference: typeof import('@huggingface/inference').HfInference
}> {
try {
const { HfInference } = await import('@huggingface/inference')
return { HfInference }
} catch (e) {
throw new Error('Please install huggingface as a dependency with, e.g. `yarn add @huggingface/inference`')
}
}
}

View File

@ -1,7 +1,7 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src/utils'
import { ChainTool } from 'langchain/tools'
import { BaseChain } from 'langchain/chains'
import { ChainTool } from './core'
class ChainTool_Tools implements INode {
label: string

View File

@ -0,0 +1,25 @@
import { DynamicTool, DynamicToolInput } from 'langchain/tools'
import { BaseChain } from 'langchain/chains'
export interface ChainToolInput extends Omit<DynamicToolInput, 'func'> {
chain: BaseChain
}
export class ChainTool extends DynamicTool {
chain: BaseChain
constructor({ chain, ...rest }: ChainToolInput) {
super({
...rest,
func: async (input, runManager) => {
// To enable LLM Chain which has promptValues
if ((chain as any).prompt && (chain as any).prompt.promptValues) {
const values = await chain.call((chain as any).prompt.promptValues, runManager?.getChild())
return values?.text
}
return chain.run(input, runManager?.getChild())
}
})
this.chain = chain
}
}

View File

@ -19,7 +19,7 @@
"@aws-sdk/client-dynamodb": "^3.360.0",
"@dqbd/tiktoken": "^1.0.7",
"@getzep/zep-js": "^0.3.1",
"@huggingface/inference": "1",
"@huggingface/inference": "^2.6.1",
"@pinecone-database/pinecone": "^0.0.12",
"@qdrant/js-client-rest": "^1.2.2",
"@supabase/supabase-js": "^2.21.0",

View File

@ -201,6 +201,20 @@ export const getAvailableURLs = async (url: string, limit: number) => {
}
}
/**
* Get env variables
* @param {string} url
* @param {number} limit
* @returns {string[]}
*/
export const getEnvironmentVariable = (name: string): string | undefined => {
try {
return typeof process !== 'undefined' ? process.env?.[name] : undefined
} catch (e) {
return undefined
}
}
/**
* Custom chain handler class
*/

View File

@ -34,7 +34,7 @@ import Transitions from 'ui-component/extended/Transitions'
import { StyledFab } from 'ui-component/button/StyledFab'
// icons
import { IconPlus, IconSearch, IconMinus } from '@tabler/icons'
import { IconPlus, IconSearch, IconMinus, IconX } from '@tabler/icons'
// const
import { baseURL } from 'store/constant'
@ -61,11 +61,20 @@ const AddNodes = ({ nodesData, node }) => {
}
}
const getSearchedNodes = (value) => {
const passed = nodesData.filter((nd) => {
const passesQuery = nd.name.toLowerCase().includes(value.toLowerCase())
const passesCategory = nd.category.toLowerCase().includes(value.toLowerCase())
return passesQuery || passesCategory
})
return passed
}
const filterSearch = (value) => {
setSearchValue(value)
setTimeout(() => {
if (value) {
const returnData = nodesData.filter((nd) => nd.name.toLowerCase().includes(value.toLowerCase()))
const returnData = getSearchedNodes(value)
groupByCategory(returnData, true)
scrollTop()
} else if (value === '') {
@ -167,7 +176,7 @@ const AddNodes = ({ nodesData, node }) => {
<Typography variant='h4'>Add Nodes</Typography>
</Stack>
<OutlinedInput
sx={{ width: '100%', pr: 1, pl: 2, my: 2 }}
sx={{ width: '100%', pr: 2, pl: 2, my: 2 }}
id='input-search-node'
value={searchValue}
onChange={(e) => filterSearch(e.target.value)}
@ -177,6 +186,28 @@ const AddNodes = ({ nodesData, node }) => {
<IconSearch stroke={1.5} size='1rem' color={theme.palette.grey[500]} />
</InputAdornment>
}
endAdornment={
<InputAdornment
position='end'
sx={{
cursor: 'pointer',
color: theme.palette.grey[500],
'&:hover': {
color: theme.palette.grey[900]
}
}}
title='Clear Search'
>
<IconX
stroke={1.5}
size='1rem'
onClick={() => filterSearch('')}
style={{
cursor: 'pointer'
}}
/>
</InputAdornment>
}
aria-describedby='search-helper-text'
inputProps={{
'aria-label': 'weight'