Merge pull request #1451 from FlowiseAI/chore/Upgrade-LC-0.0.213
Chore/update langchain version
This commit is contained in:
commit
f1f2f71fe8
|
|
@ -1,7 +1,6 @@
|
|||
import { OpenAIBaseInput } from 'langchain/dist/types/openai-types'
|
||||
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
|
||||
import { AzureOpenAIInput, ChatOpenAI } from 'langchain/chat_models/openai'
|
||||
import { AzureOpenAIInput, ChatOpenAI, OpenAIChatInput } from 'langchain/chat_models/openai'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
|
|
@ -123,7 +122,7 @@ class AzureChatOpenAI_ChatModels implements INode {
|
|||
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
|
||||
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
|
||||
|
||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIBaseInput> = {
|
||||
const obj: Partial<AzureOpenAIInput> & BaseLLMParams & Partial<OpenAIChatInput> = {
|
||||
temperature: parseFloat(temperature),
|
||||
modelName,
|
||||
azureOpenAIApiKey,
|
||||
|
|
|
|||
|
|
@ -124,13 +124,13 @@ class ChatMistral_ChatModels implements INode {
|
|||
const safeMode = nodeData.inputs?.safeMode as boolean
|
||||
const randomSeed = nodeData.inputs?.safeMode as string
|
||||
const overrideEndpoint = nodeData.inputs?.overrideEndpoint as string
|
||||
// Waiting fix from langchain + mistral to enable streaming - https://github.com/mistralai/client-js/issues/18
|
||||
|
||||
const streaming = nodeData.inputs?.streaming as boolean
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: ChatMistralAIInput = {
|
||||
apiKey: apiKey,
|
||||
modelName: modelName
|
||||
modelName: modelName,
|
||||
streaming: streaming ?? true
|
||||
}
|
||||
|
||||
if (maxOutputTokens) obj.maxTokens = parseInt(maxOutputTokens, 10)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { ChatOllama } from 'langchain/chat_models/ollama'
|
||||
import { ChatOllama, ChatOllamaInput } from 'langchain/chat_models/ollama'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { OllamaInput } from 'langchain/dist/util/ollama'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class ChatOllama_ChatModels implements INode {
|
||||
|
|
@ -209,7 +208,7 @@ class ChatOllama_ChatModels implements INode {
|
|||
|
||||
const cache = nodeData.inputs?.cache as BaseCache
|
||||
|
||||
const obj: OllamaInput & BaseLLMParams = {
|
||||
const obj: ChatOllamaInput & BaseLLMParams = {
|
||||
baseUrl,
|
||||
temperature: parseFloat(temperature),
|
||||
model: modelName
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { OllamaInput } from 'langchain/llms/ollama'
|
||||
import { OllamaEmbeddings } from 'langchain/embeddings/ollama'
|
||||
import { OllamaInput } from 'langchain/dist/util/ollama'
|
||||
|
||||
class OllamaEmbedding_Embeddings implements INode {
|
||||
label: string
|
||||
|
|
|
|||
|
|
@ -1,8 +1,7 @@
|
|||
import { INode, INodeData, INodeParams } from '../../../src/Interface'
|
||||
import { getBaseClasses } from '../../../src/utils'
|
||||
import { Ollama } from 'langchain/llms/ollama'
|
||||
import { Ollama, OllamaInput } from 'langchain/llms/ollama'
|
||||
import { BaseCache } from 'langchain/schema'
|
||||
import { OllamaInput } from 'langchain/dist/util/ollama'
|
||||
import { BaseLLMParams } from 'langchain/llms/base'
|
||||
|
||||
class Ollama_LLMs implements INode {
|
||||
|
|
|
|||
|
|
@ -26,8 +26,8 @@
|
|||
"@gomomento/sdk-core": "^1.51.1",
|
||||
"@google-ai/generativelanguage": "^0.2.1",
|
||||
"@huggingface/inference": "^2.6.1",
|
||||
"@langchain/google-genai": "^0.0.3",
|
||||
"@langchain/mistralai": "^0.0.3",
|
||||
"@langchain/google-genai": "^0.0.6",
|
||||
"@langchain/mistralai": "^0.0.6",
|
||||
"@notionhq/client": "^2.2.8",
|
||||
"@opensearch-project/opensearch": "^1.2.0",
|
||||
"@pinecone-database/pinecone": "^1.1.1",
|
||||
|
|
@ -52,7 +52,7 @@
|
|||
"html-to-text": "^9.0.5",
|
||||
"husky": "^8.0.3",
|
||||
"ioredis": "^5.3.2",
|
||||
"langchain": "^0.0.196",
|
||||
"langchain": "^0.0.213",
|
||||
"langfuse": "^1.2.0",
|
||||
"langfuse-langchain": "^1.0.31",
|
||||
"langsmith": "^0.0.49",
|
||||
|
|
|
|||
|
|
@ -818,7 +818,7 @@ export const findAvailableConfigs = (reactFlowNodes: IReactFlowNode[], component
|
|||
*/
|
||||
export const isFlowValidForStream = (reactFlowNodes: IReactFlowNode[], endingNodeData: INodeData) => {
|
||||
const streamAvailableLLMs = {
|
||||
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock'],
|
||||
'Chat Models': ['azureChatOpenAI', 'chatOpenAI', 'chatAnthropic', 'chatOllama', 'awsChatBedrock', 'chatMistralAI'],
|
||||
LLMs: ['azureOpenAI', 'openAI', 'ollama']
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue