Merge pull request #1451 from FlowiseAI/chore/Upgrade-LC-0.0.213
Chore/update langchain version
This commit is contained in:
commit
f1f2f71fe8
|
|
@ -1,7 +1,6 @@
|
||||||
import { OpenAIBaseInput } from 'langchain/dist/types/openai-types'
|
|
||||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||||
import { AzureOpenAIInput, ChatOpenAI } from 'langchain/chat_models/openai'
|
import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||||
import { BaseCache } from 'langchain/schema'
|
import { BaseCache } from 'langchain/schema'
|
||||||
import { BaseLLMParams } from 'langchain/llms/base'
|
import { BaseLLMParams } from 'langchain/llms/base'
|
||||||
|
|
||||||
|
|
@ -123,7 +122,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
||||||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||||
|
|
||||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIBaseInput> = {
|
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
|
||||||
temperature: parseFloat(temperature),
|
temperature: parseFloat(temperature),
|
||||||
modelName,
|
modelName,
|
||||||
azureOpenAIApiKey,
|
azureOpenAIApiKey,
|
||||||
|
|
|
||||||
|
|
@ -124,13 +124,13 @@ class ChatMistral_ChatModels implements INode {
|
||||||
const safeMode = nodeData.inputs?.safeMode as boolean
|
const safeMode = nodeData.inputs?.safeMode as boolean
|
||||||
const randomSeed = nodeData.inputs?.safeMode as string
|
const randomSeed = nodeData.inputs?.safeMode as string
|
||||||
const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string
|
const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string
|
||||||
// Waiting fix from langchain + mistral to enable streaming - https://github.com/mistralai/client-js/issues/18
|
const streaming = nodeData.inputs?.streaming as boolean
|
||||||
|
|
||||||
const cache = nodeData.inputs?.cache as BaseCache
|
const cache = nodeData.inputs?.cache as BaseCache
|
||||||
|
|
||||||
const obj: ChatMistralAIInput = {
|
const obj: ChatMistralAIInput = {
|
||||||
apiKey: apiKey,
|
apiKey: apiKey,
|
||||||
modelName: modelName
|
modelName: modelName,
|
||||||
|
streaming: streaming ?? true
|
||||||
}
|
}
|
||||||
|
|
||||||
if (maxOutputTokens) obj.maxTokens = parseInt(maxOutputTokens, 10)
|
if (maxOutputTokens) obj.maxTokens = parseInt(maxOutputTokens, 10)
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { getBaseClasses } from '../../../src/utils'
|
import { getBaseClasses } from '../../../src/utils'
|
||||||
import { ChatOllama } from 'langchain/chat_models/ollama'
|
import { ChatOllama, ChatOllamaInput } from 'langchain/chat_models/ollama'
|
||||||
import { BaseCache } from 'langchain/schema'
|
import { BaseCache } from 'langchain/schema'
|
||||||
import { OllamaInput } from 'langchain/dist/util/ollama'
|
|
||||||
import { BaseLLMParams } from 'langchain/llms/base'
|
import { BaseLLMParams } from 'langchain/llms/base'
|
||||||
|
|
||||||
class ChatOllama_ChatModels implements INode {
|
class ChatOllama_ChatModels implements INode {
|
||||||
|
|
@ -209,7 +208,7 @@ class ChatOllama_ChatModels implements INode {
|
||||||
|
|
||||||
const cache = nodeData.inputs?.cache as BaseCache
|
const cache = nodeData.inputs?.cache as BaseCache
|
||||||
|
|
||||||
const obj: OllamaInput & BaseLLMParams = {
|
const obj: ChatOllamaInput & BaseLLMParams = {
|
||||||
baseUrl,
|
baseUrl,
|
||||||
temperature: parseFloat(temperature),
|
temperature: parseFloat(temperature),
|
||||||
model: modelName
|
model: modelName
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { getBaseClasses } from '../../../src/utils'
|
import { getBaseClasses } from '../../../src/utils'
|
||||||
|
import { OllamaInput } from 'langchain/llms/ollama'
|
||||||
import { OllamaEmbeddings } from 'langchain/embeddings/ollama'
|
import { OllamaEmbeddings } from 'langchain/embeddings/ollama'
|
||||||
import { OllamaInput } from 'langchain/dist/util/ollama'
|
|
||||||
|
|
||||||
class OllamaEmbedding_Embeddings implements INode {
|
class OllamaEmbedding_Embeddings implements INode {
|
||||||
label: string
|
label: string
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||||
import { getBaseClasses } from '../../../src/utils'
|
import { getBaseClasses } from '../../../src/utils'
|
||||||
import { Ollama } from 'langchain/llms/ollama'
|
import { Ollama, OllamaInput } from 'langchain/llms/ollama'
|
||||||
import { BaseCache } from 'langchain/schema'
|
import { BaseCache } from 'langchain/schema'
|
||||||
import { OllamaInput } from 'langchain/dist/util/ollama'
|
|
||||||
import { BaseLLMParams } from 'langchain/llms/base'
|
import { BaseLLMParams } from 'langchain/llms/base'
|
||||||
|
|
||||||
class Ollama_LLMs implements INode {
|
class Ollama_LLMs implements INode {
|
||||||
|
|
|
||||||
|
|
@ -26,8 +26,8 @@
|
||||||
"@gomomento/sdk-core": "^1.51.1",
|
"@gomomento/sdk-core": "^1.51.1",
|
||||||
"@google-ai/generativelanguage": "^0.2.1",
|
"@google-ai/generativelanguage": "^0.2.1",
|
||||||
"@huggingface/inference": "^2.6.1",
|
"@huggingface/inference": "^2.6.1",
|
||||||
"@langchain/google-genai": "^0.0.3",
|
"@langchain/google-genai": "^0.0.6",
|
||||||
"@langchain/mistralai": "^0.0.3",
|
"@langchain/mistralai": "^0.0.6",
|
||||||
"@notionhq/client": "^2.2.8",
|
"@notionhq/client": "^2.2.8",
|
||||||
"@opensearch-project/opensearch": "^1.2.0",
|
"@opensearch-project/opensearch": "^1.2.0",
|
||||||
"@pinecone-database/pinecone": "^1.1.1",
|
"@pinecone-database/pinecone": "^1.1.1",
|
||||||
|
|
@ -52,7 +52,7 @@
|
||||||
"html-to-text": "^9.0.5",
|
"html-to-text": "^9.0.5",
|
||||||
"husky": "^8.0.3",
|
"husky": "^8.0.3",
|
||||||
"ioredis": "^5.3.2",
|
"ioredis": "^5.3.2",
|
||||||
"langchain": "^0.0.196",
|
"langchain": "^0.0.213",
|
||||||
"langfuse": "^1.2.0",
|
"langfuse": "^1.2.0",
|
||||||
"langfuse-langchain": "^1.0.31",
|
"langfuse-langchain": "^1.0.31",
|
||||||
"langsmith": "^0.0.49",
|
"langsmith": "^0.0.49",
|
||||||
|
|
|
||||||
|
|
@ -818,7 +818,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
|
||||||
*/
|
*/
|
||||||
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
|
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
|
||||||
const streamAvailableLLMs = {
|
const streamAvailableLLMs = {
|
||||||
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
|
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'],
|
||||||
LLMs: ['azureOpenAI', 'openAI', 'ollama']
|
LLMs: ['azureOpenAI', 'openAI', 'ollama']
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue