Merge pull request #1281 from FlowiseAI/feature/Update-Langchain-Version

Feature/update langchain version
This commit is contained in:
Henry Heng 2023-11-24 16:36:54 +00:00 committed by GitHub
commit fea89bbe42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 49 additions and 30 deletions

View File

@ -1,7 +1,7 @@
import { ICommonObject, INode, INodeData, INodeOutputsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, handleEscapeCharacters } from '../../../src/utils'
import { LLMChain } from 'langchain/chains'
import { BaseLanguageModel } from 'langchain/base_language'
import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
import { ConsoleCallbackHandler, CustomChainHandler, additionalCallbacks } from '../../../src/handler'
import { BaseOutputParser } from 'langchain/schema/output_parser'
import { formatResponse, injectOutputParser } from '../../outputparsers/OutputParserHelpers'
@ -141,7 +141,7 @@ class LLMChain_Chains implements INode {
const runPrediction = async (
inputVariables: string[],
chain: LLMChain<string | object>,
chain: LLMChain<string | object | BaseLanguageModel<any, BaseLanguageModelCallOptions>>,
input: string,
promptValuesRaw: ICommonObject | undefined,
options: ICommonObject,
@ -164,7 +164,7 @@ const runPrediction = async (
if (moderations && moderations.length > 0) {
try {
// Use the output of the moderation chain as input for the LLM chain
input = await checkInputs(moderations, chain.llm, input)
input = await checkInputs(moderations, input)
} catch (e) {
await new Promise((resolve) => setTimeout(resolve, 500))
streamResponse(isStreaming, e.message, socketIO, socketIOClientId)

View File

@ -27,7 +27,7 @@ class AWSChatBedrock_ChatModels implements INode {
constructor() {
this.label = 'AWS Bedrock'
this.name = 'awsChatBedrock'
this.version = 2.0
this.version = 3.0
this.type = 'AWSChatBedrock'
this.icon = 'awsBedrock.png'
this.category = 'Chat Models'
@ -97,7 +97,8 @@ class AWSChatBedrock_ChatModels implements INode {
options: [
{ label: 'anthropic.claude-instant-v1', name: 'anthropic.claude-instant-v1' },
{ label: 'anthropic.claude-v1', name: 'anthropic.claude-v1' },
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' }
{ label: 'anthropic.claude-v2', name: 'anthropic.claude-v2' },
{ label: 'meta.llama2-13b-chat-v1', name: 'meta.llama2-13b-chat-v1' }
],
default: 'anthropic.claude-v2'
},
@ -128,12 +129,14 @@ class AWSChatBedrock_ChatModels implements INode {
const iTemperature = nodeData.inputs?.temperature as string
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
const cache = nodeData.inputs?.cache as BaseCache
const streaming = nodeData.inputs?.streaming as boolean
const obj: BaseBedrockInput & BaseLLMParams = {
region: iRegion,
model: iModel,
maxTokens: parseInt(iMax_tokens_to_sample, 10),
temperature: parseFloat(iTemperature)
temperature: parseFloat(iTemperature),
streaming: streaming ?? true
}
/**

View File

@ -18,7 +18,7 @@ class AWSBedrockEmbedding_Embeddings implements INode {
constructor() {
this.label = 'AWS Bedrock Embeddings'
this.name = 'AWSBedrockEmbeddings'
this.version = 1.0
this.version = 2.0
this.type = 'AWSBedrockEmbeddings'
this.icon = 'awsBedrock.png'
this.category = 'Embeddings'
@ -81,7 +81,9 @@ class AWSBedrockEmbedding_Embeddings implements INode {
type: 'options',
options: [
{ label: 'amazon.titan-embed-text-v1', name: 'amazon.titan-embed-text-v1' },
{ label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' }
{ label: 'amazon.titan-embed-g1-text-02', name: 'amazon.titan-embed-g1-text-02' },
{ label: 'cohere.embed-english-v3', name: 'cohere.embed-english-v3' },
{ label: 'cohere.embed-multilingual-v3', name: 'cohere.embed-multilingual-v3' }
],
default: 'amazon.titan-embed-text-v1'
}

View File

@ -27,7 +27,7 @@ class AWSBedrock_LLMs implements INode {
constructor() {
this.label = 'AWS Bedrock'
this.name = 'awsBedrock'
this.version = 1.2
this.version = 2.0
this.type = 'AWSBedrock'
this.icon = 'awsBedrock.png'
this.category = 'LLMs'
@ -98,6 +98,7 @@ class AWSBedrock_LLMs implements INode {
{ label: 'amazon.titan-tg1-large', name: 'amazon.titan-tg1-large' },
{ label: 'amazon.titan-e1t-medium', name: 'amazon.titan-e1t-medium' },
{ label: 'cohere.command-text-v14', name: 'cohere.command-text-v14' },
{ label: 'cohere.command-light-text-v14', name: 'cohere.command-light-text-v14' },
{ label: 'ai21.j2-grande-instruct', name: 'ai21.j2-grande-instruct' },
{ label: 'ai21.j2-jumbo-instruct', name: 'ai21.j2-jumbo-instruct' },
{ label: 'ai21.j2-mid', name: 'ai21.j2-mid' },

View File

@ -1,13 +1,12 @@
import { BaseLanguageModel } from 'langchain/base_language'
import { Server } from 'socket.io'
export abstract class Moderation {
abstract checkForViolations(llm: BaseLanguageModel, input: string): Promise<string>
abstract checkForViolations(input: string): Promise<string>
}
export const checkInputs = async (inputModerations: Moderation[], llm: BaseLanguageModel, input: string): Promise<string> => {
export const checkInputs = async (inputModerations: Moderation[], input: string): Promise<string> => {
for (const moderation of inputModerations) {
input = await moderation.checkForViolations(llm, input)
input = await moderation.checkForViolations(input)
}
return input
}

View File

@ -1,5 +1,5 @@
import { INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses } from '../../../src'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src'
import { Moderation } from '../Moderation'
import { OpenAIModerationRunner } from './OpenAIModerationRunner'
@ -12,6 +12,7 @@ class OpenAIModeration implements INode {
icon: string
category: string
baseClasses: string[]
credential: INodeParams
inputs: INodeParams[]
constructor() {
@ -23,6 +24,12 @@ class OpenAIModeration implements INode {
this.category = 'Moderation'
this.description = 'Check whether content complies with OpenAI usage policies.'
this.baseClasses = [this.type, ...getBaseClasses(Moderation)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
type: 'credential',
credentialNames: ['openAIApi']
}
this.inputs = [
{
label: 'Error Message',
@ -35,8 +42,11 @@ class OpenAIModeration implements INode {
]
}
async init(nodeData: INodeData): Promise<any> {
const runner = new OpenAIModerationRunner()
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const runner = new OpenAIModerationRunner(openAIApiKey)
const moderationErrorMessage = nodeData.inputs?.moderationErrorMessage as string
if (moderationErrorMessage) runner.setErrorMessage(moderationErrorMessage)
return runner

View File

@ -1,18 +1,21 @@
import { Moderation } from '../Moderation'
import { BaseLanguageModel } from 'langchain/base_language'
import { OpenAIModerationChain } from 'langchain/chains'
export class OpenAIModerationRunner implements Moderation {
private openAIApiKey = ''
private moderationErrorMessage: string = "Text was found that violates OpenAI's content policy."
async checkForViolations(llm: BaseLanguageModel, input: string): Promise<string> {
const openAIApiKey = (llm as any).openAIApiKey
if (!openAIApiKey) {
constructor(openAIApiKey: string) {
this.openAIApiKey = openAIApiKey
}
async checkForViolations(input: string): Promise<string> {
if (!this.openAIApiKey) {
throw Error('OpenAI API key not found')
}
// Create a new instance of the OpenAIModerationChain
const moderation = new OpenAIModerationChain({
openAIApiKey: openAIApiKey,
openAIApiKey: this.openAIApiKey,
throwError: false // If set to true, the call will throw an error when the moderation chain detects violating content. If set to false, violating content will return "Text was found that violates OpenAI's content policy.".
})
// Send the user's input to the moderation chain and wait for the result

View File

@ -1,5 +1,4 @@
import { Moderation } from '../Moderation'
import { BaseLanguageModel } from 'langchain/base_language'
export class SimplePromptModerationRunner implements Moderation {
private readonly denyList: string = ''
@ -13,7 +12,7 @@ export class SimplePromptModerationRunner implements Moderation {
this.moderationErrorMessage = moderationErrorMessage
}
async checkForViolations(_: BaseLanguageModel, input: string): Promise<string> {
async checkForViolations(input: string): Promise<string> {
this.denyList.split('\n').forEach((denyListItem) => {
if (denyListItem && denyListItem !== '' && input.includes(denyListItem)) {
throw Error(this.moderationErrorMessage)

View File

@ -1,6 +1,6 @@
import { BaseOutputParser } from 'langchain/schema/output_parser'
import { LLMChain } from 'langchain/chains'
import { BaseLanguageModel } from 'langchain/base_language'
import { BaseLanguageModel, BaseLanguageModelCallOptions } from 'langchain/base_language'
import { ICommonObject } from '../../src'
import { ChatPromptTemplate, FewShotPromptTemplate, PromptTemplate, SystemMessagePromptTemplate } from 'langchain/prompts'
@ -15,7 +15,7 @@ export const formatResponse = (response: string | object): string | object => {
export const injectOutputParser = (
outputParser: BaseOutputParser<unknown>,
chain: LLMChain<string, BaseLanguageModel>,
chain: LLMChain<string | object | BaseLanguageModel<any, BaseLanguageModelCallOptions>>,
promptValues: ICommonObject | undefined = undefined
) => {
if (outputParser && chain.prompt) {

View File

@ -31,7 +31,8 @@ class InMemoryVectorStore_VectorStores implements INode {
label: 'Document',
name: 'document',
type: 'Document',
list: true
list: true,
optional: true
},
{
label: 'Embeddings',

View File

@ -22,7 +22,8 @@
"@dqbd/tiktoken": "^1.0.7",
"@elastic/elasticsearch": "^8.9.0",
"@getzep/zep-js": "^0.6.3",
"@gomomento/sdk": "^1.40.2",
"@gomomento/sdk": "^1.51.1",
"@gomomento/sdk-core": "^1.51.1",
"@google-ai/generativelanguage": "^0.2.1",
"@huggingface/inference": "^2.6.1",
"@notionhq/client": "^2.2.8",
@ -49,7 +50,7 @@
"html-to-text": "^9.0.5",
"husky": "^8.0.3",
"ioredis": "^5.3.2",
"langchain": "^0.0.165",
"langchain": "^0.0.196",
"langfuse-langchain": "^1.0.31",
"langsmith": "^0.0.32",
"linkifyjs": "^4.1.1",

View File

@ -844,7 +844,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
*/
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
const streamAvailableLLMs = {
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama'],
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
LLMs: ['azureOpenAI', 'openAI', 'ollama']
}