Feature/update openai version, add reasoning effort param, add o3 mini (#3973)

* update openai version, add reasoning effort param

* update azure

* add filter for pinecone llamaindex

* update graph cypher qa chain
This commit is contained in:
Henry Heng 2025-02-04 08:43:27 +00:00 committed by GitHub
parent 2a0e712b7d
commit a0b4abdd13
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 498 additions and 289 deletions

View File

@ -71,9 +71,9 @@
}, },
"resolutions": { "resolutions": {
"@google/generative-ai": "^0.15.0", "@google/generative-ai": "^0.15.0",
"@langchain/core": "0.3.29", "@langchain/core": "0.3.37",
"@qdrant/openapi-typescript-fetch": "1.2.6", "@qdrant/openapi-typescript-fetch": "1.2.6",
"openai": "4.57.3", "openai": "4.82.0",
"protobufjs": "7.4.0" "protobufjs": "7.4.0"
}, },
"eslintIgnore": [ "eslintIgnore": [

View File

@ -230,6 +230,14 @@
{ {
"name": "azureChatOpenAI", "name": "azureChatOpenAI",
"models": [ "models": [
{
"label": "o3-mini",
"name": "o3-mini"
},
{
"label": "o1",
"name": "o1"
},
{ {
"label": "o1-preview", "label": "o1-preview",
"name": "o1-preview" "name": "o1-preview"
@ -397,6 +405,10 @@
{ {
"name": "chatGoogleGenerativeAI", "name": "chatGoogleGenerativeAI",
"models": [ "models": [
{
"label": "gemini-2.0-flash-exp",
"name": "gemini-2.0-flash-exp"
},
{ {
"label": "gemini-1.5-flash-latest", "label": "gemini-1.5-flash-latest",
"name": "gemini-1.5-flash-latest" "name": "gemini-1.5-flash-latest"

View File

@ -23,7 +23,7 @@ class GraphCypherQA_Chain implements INode {
constructor(fields?: { sessionId?: string }) { constructor(fields?: { sessionId?: string }) {
this.label = 'Graph Cypher QA Chain' this.label = 'Graph Cypher QA Chain'
this.name = 'graphCypherQAChain' this.name = 'graphCypherQAChain'
this.version = 1.0 this.version = 1.1
this.type = 'GraphCypherQAChain' this.type = 'GraphCypherQAChain'
this.icon = 'graphqa.svg' this.icon = 'graphqa.svg'
this.category = 'Chains' this.category = 'Chains'
@ -47,7 +47,8 @@ class GraphCypherQA_Chain implements INode {
name: 'cypherPrompt', name: 'cypherPrompt',
optional: true, optional: true,
type: 'BasePromptTemplate', type: 'BasePromptTemplate',
description: 'Prompt template for generating Cypher queries. Must include {schema} and {question} variables' description:
'Prompt template for generating Cypher queries. Must include {schema} and {question} variables. If not provided, default prompt will be used.'
}, },
{ {
label: 'Cypher Generation Model', label: 'Cypher Generation Model',
@ -61,7 +62,8 @@ class GraphCypherQA_Chain implements INode {
name: 'qaPrompt', name: 'qaPrompt',
optional: true, optional: true,
type: 'BasePromptTemplate', type: 'BasePromptTemplate',
description: 'Prompt template for generating answers. Must include {context} and {question} variables' description:
'Prompt template for generating answers. Must include {context} and {question} variables. If not provided, default prompt will be used.'
}, },
{ {
label: 'QA Model', label: 'QA Model',
@ -111,6 +113,10 @@ class GraphCypherQA_Chain implements INode {
const returnDirect = nodeData.inputs?.returnDirect as boolean const returnDirect = nodeData.inputs?.returnDirect as boolean
const output = nodeData.outputs?.output as string const output = nodeData.outputs?.output as string
if (!model) {
throw new Error('Language Model is required')
}
// Handle prompt values if they exist // Handle prompt values if they exist
let cypherPromptTemplate: PromptTemplate | FewShotPromptTemplate | undefined let cypherPromptTemplate: PromptTemplate | FewShotPromptTemplate | undefined
let qaPromptTemplate: PromptTemplate | undefined let qaPromptTemplate: PromptTemplate | undefined
@ -147,10 +153,6 @@ class GraphCypherQA_Chain implements INode {
}) })
} }
if ((!cypherModel || !qaModel) && !model) {
throw new Error('Language Model is required when Cypher Model or QA Model are not provided')
}
// Validate required variables in prompts // Validate required variables in prompts
if ( if (
cypherPromptTemplate && cypherPromptTemplate &&
@ -165,13 +167,13 @@ class GraphCypherQA_Chain implements INode {
returnDirect returnDirect
} }
if (cypherModel && cypherPromptTemplate) { if (cypherPromptTemplate) {
fromLLMInput['cypherLLM'] = cypherModel fromLLMInput['cypherLLM'] = cypherModel ?? model
fromLLMInput['cypherPrompt'] = cypherPromptTemplate fromLLMInput['cypherPrompt'] = cypherPromptTemplate
} }
if (qaModel && qaPromptTemplate) { if (qaPromptTemplate) {
fromLLMInput['qaLLM'] = qaModel fromLLMInput['qaLLM'] = qaModel ?? model
fromLLMInput['qaPrompt'] = qaPromptTemplate fromLLMInput['qaPrompt'] = qaPromptTemplate
} }

View File

@ -1,11 +1,9 @@
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, ClientOptions, LegacyOpenAIInput } from '@langchain/openai' import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader' import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'
const serverCredentialsExists = const serverCredentialsExists =
!!process.env.AZURE_OPENAI_API_KEY && !!process.env.AZURE_OPENAI_API_KEY &&
@ -33,7 +31,7 @@ class AzureChatOpenAI_ChatModels implements INode {
this.icon = 'Azure.svg' this.icon = 'Azure.svg'
this.category = 'Chat Models' this.category = 'Chat Models'
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint' this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)] this.baseClasses = [this.type, ...getBaseClasses(LangchainAzureChatOpenAI)]
this.credential = { this.credential = {
label: 'Connect Credential', label: 'Connect Credential',
name: 'credential', name: 'credential',
@ -155,6 +153,29 @@ class AzureChatOpenAI_ChatModels implements INode {
default: 'low', default: 'low',
optional: false, optional: false,
additionalParams: true additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 models',
name: 'reasoningEffort',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'Medium',
name: 'medium'
},
{
label: 'High',
name: 'high'
}
],
default: 'low',
optional: false,
additionalParams: true
} }
] ]
} }
@ -178,6 +199,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string const topP = nodeData.inputs?.topP as string
const basePath = nodeData.inputs?.basepath as string const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData) const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
@ -188,10 +210,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string const imageResolution = nodeData.inputs?.imageResolution as string
const obj: Partial<AzureOpenAIInput> & const obj: ChatOpenAIFields & Partial<AzureOpenAIInput> = {
BaseLLMParams &
Partial<OpenAIChatInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
azureOpenAIApiKey, azureOpenAIApiKey,
@ -218,6 +237,12 @@ class AzureChatOpenAI_ChatModels implements INode {
console.error('Error parsing base options', exception) console.error('Error parsing base options', exception)
} }
} }
if (modelName === 'o3-mini') {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
}
const multiModalOption: IMultiModalOption = { const multiModalOption: IMultiModalOption = {
image: { image: {
@ -226,7 +251,7 @@ class AzureChatOpenAI_ChatModels implements INode {
} }
} }
const model = new ChatOpenAI(nodeData.id, obj) const model = new AzureChatOpenAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption) model.setMultiModalOption(multiModalOption)
return model return model
} }

View File

@ -0,0 +1,41 @@
import { AzureChatOpenAI as LangchainAzureChatOpenAI, OpenAIChatInput, AzureOpenAIInput, ClientOptions } from '@langchain/openai'
import { IMultiModalOption, IVisionChatModal } from '../../../src'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string
constructor(
id: string,
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> & {
openAIApiKey?: string
openAIApiVersion?: string
openAIBasePath?: string
deploymentName?: string
} & BaseChatModelParams & {
configuration?: ClientOptions
}
) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxTokens
}
revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
// pass
}
}

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -135,7 +134,7 @@ class ChatCerebras_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cerebrasAIApiKey = getCredentialParam('cerebrasApiKey', credentialData, nodeData) const cerebrasAIApiKey = getCredentialParam('cerebrasApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams = { const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey: cerebrasAIApiKey, openAIApiKey: cerebrasAIApiKey,
@ -158,10 +157,15 @@ class ChatCerebras_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception) throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
} }
} }
const model = new ChatOpenAI(obj, {
basePath, if (basePath || parsedBaseOptions) {
baseOptions: parsedBaseOptions obj.configuration = {
}) baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model return model
} }
} }

View File

@ -1,6 +1,5 @@
import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai' import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -108,7 +107,7 @@ class ChatLocalAI_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = { const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey: 'sk-', openAIApiKey: 'sk-',
@ -120,8 +119,9 @@ class ChatLocalAI_ChatModels implements INode {
if (timeout) obj.timeout = parseInt(timeout, 10) if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache if (cache) obj.cache = cache
if (localAIApiKey) obj.openAIApiKey = localAIApiKey if (localAIApiKey) obj.openAIApiKey = localAIApiKey
if (basePath) obj.configuration = { baseURL: basePath }
const model = new ChatOpenAI(obj, { basePath }) const model = new ChatOpenAI(obj)
return model return model
} }

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -134,7 +133,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData) const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { nvdiaNIMApiKey?: string } = { const obj: ChatOpenAIFields & { nvdiaNIMApiKey?: string } = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey: nvdiaNIMApiKey, openAIApiKey: nvdiaNIMApiKey,
@ -154,14 +153,18 @@ class ChatNvdiaNIM_ChatModels implements INode {
try { try {
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions) parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
} catch (exception) { } catch (exception) {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception) throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception)
} }
} }
const model = new ChatOpenAI(obj, { if (basePath || parsedBaseOptions) {
basePath, obj.configuration = {
baseOptions: parsedBaseOptions baseURL: basePath,
}) defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model return model
} }
} }

View File

@ -1,7 +1,5 @@
import type { ClientOptions } from 'openai' import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, AzureOpenAIInput, LegacyOpenAIInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from './FlowiseChatOpenAI' import { ChatOpenAI } from './FlowiseChatOpenAI'
@ -23,7 +21,7 @@ class ChatOpenAI_ChatModels implements INode {
constructor() { constructor() {
this.label = 'ChatOpenAI' this.label = 'ChatOpenAI'
this.name = 'chatOpenAI' this.name = 'chatOpenAI'
this.version = 8.0 this.version = 8.1
this.type = 'ChatOpenAI' this.type = 'ChatOpenAI'
this.icon = 'openai.svg' this.icon = 'openai.svg'
this.category = 'Chat Models' this.category = 'Chat Models'
@ -105,6 +103,24 @@ class ChatOpenAI_ChatModels implements INode {
optional: true, optional: true,
additionalParams: true additionalParams: true
}, },
{
label: 'Strict Tool Calling',
name: 'strictToolCalling',
type: 'boolean',
description:
'Whether the model supports the `strict` argument when passing in tools. If not specified, the `strict` argument will not be passed to OpenAI.',
optional: true,
additionalParams: true
},
{
label: 'Stop Sequence',
name: 'stopSequence',
type: 'string',
rows: 4,
optional: true,
description: 'List of stop words to use when generating. Use comma to separate multiple stop words.',
additionalParams: true
},
{ {
label: 'BasePath', label: 'BasePath',
name: 'basepath', name: 'basepath',
@ -119,15 +135,6 @@ class ChatOpenAI_ChatModels implements INode {
optional: true, optional: true,
additionalParams: true additionalParams: true
}, },
{
label: 'Stop Sequence',
name: 'stopSequence',
type: 'string',
rows: 4,
optional: true,
description: 'List of stop words to use when generating. Use comma to separate multiple stop words.',
additionalParams: true
},
{ {
label: 'BaseOptions', label: 'BaseOptions',
name: 'baseOptions', name: 'baseOptions',
@ -166,6 +173,29 @@ class ChatOpenAI_ChatModels implements INode {
default: 'low', default: 'low',
optional: false, optional: false,
additionalParams: true additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 models',
name: 'reasoningEffort',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'Medium',
name: 'medium'
},
{
label: 'High',
name: 'high'
}
],
default: 'low',
optional: false,
additionalParams: true
} }
] ]
} }
@ -187,9 +217,11 @@ class ChatOpenAI_ChatModels implements INode {
const timeout = nodeData.inputs?.timeout as string const timeout = nodeData.inputs?.timeout as string
const stopSequence = nodeData.inputs?.stopSequence as string const stopSequence = nodeData.inputs?.stopSequence as string
const streaming = nodeData.inputs?.streaming as boolean const streaming = nodeData.inputs?.streaming as boolean
const strictToolCalling = nodeData.inputs?.strictToolCalling as boolean
const basePath = nodeData.inputs?.basepath as string const basePath = nodeData.inputs?.basepath as string
const proxyUrl = nodeData.inputs?.proxyUrl as string const proxyUrl = nodeData.inputs?.proxyUrl as string
const baseOptions = nodeData.inputs?.baseOptions const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string const imageResolution = nodeData.inputs?.imageResolution as string
@ -202,9 +234,7 @@ class ChatOpenAI_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & const obj: ChatOpenAIFields = {
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey, openAIApiKey,
@ -214,6 +244,9 @@ class ChatOpenAI_ChatModels implements INode {
if (modelName === 'o3-mini') { if (modelName === 'o3-mini') {
delete obj.temperature delete obj.temperature
} }
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10) if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP) if (topP) obj.topP = parseFloat(topP)
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty) if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
@ -224,6 +257,7 @@ class ChatOpenAI_ChatModels implements INode {
const stopSequenceArray = stopSequence.split(',').map((item) => item.trim()) const stopSequenceArray = stopSequence.split(',').map((item) => item.trim())
obj.stop = stopSequenceArray obj.stop = stopSequenceArray
} }
if (strictToolCalling) obj.supportsStrictToolCalling = strictToolCalling
let parsedBaseOptions: any | undefined = undefined let parsedBaseOptions: any | undefined = undefined
@ -238,7 +272,7 @@ class ChatOpenAI_ChatModels implements INode {
if (basePath || parsedBaseOptions) { if (basePath || parsedBaseOptions) {
obj.configuration = { obj.configuration = {
baseURL: basePath, baseURL: basePath,
baseOptions: parsedBaseOptions defaultHeaders: parsedBaseOptions
} }
} }

View File

@ -1,6 +1,4 @@
import type { ClientOptions } from 'openai' import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { IMultiModalOption, IVisionChatModal } from '../../../src' import { IMultiModalOption, IVisionChatModal } from '../../../src'
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal { export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
@ -9,15 +7,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
multiModalOption: IMultiModalOption multiModalOption: IMultiModalOption
id: string id: string
constructor( constructor(id: string, fields?: ChatOpenAIFields) {
id: string, super(fields)
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput },
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput
) {
super(fields, configuration)
this.id = id this.id = id
this.configuredModel = fields?.modelName ?? '' this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxTokens this.configuredMaxToken = fields?.maxTokens

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -134,7 +133,7 @@ class ChatOpenAICustom_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = { const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey, openAIApiKey,
@ -157,10 +156,15 @@ class ChatOpenAICustom_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception) throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
} }
} }
const model = new ChatOpenAI(obj, {
basePath, if (basePath || parsedBaseOptions) {
baseOptions: parsedBaseOptions obj.configuration = {
}) baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model return model
} }
} }

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai' import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -135,7 +134,7 @@ class ChatOpenRouter_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openRouterApiKey = getCredentialParam('openRouterApiKey', credentialData, nodeData) const openRouterApiKey = getCredentialParam('openRouterApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams = { const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey: openRouterApiKey, openAIApiKey: openRouterApiKey,
@ -158,10 +157,15 @@ class ChatOpenRouter_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception) throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
} }
} }
const model = new ChatOpenAI(obj, {
basePath, if (basePath || parsedBaseOptions) {
baseOptions: parsedBaseOptions obj.configuration = {
}) baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model return model
} }
} }

View File

@ -1,7 +1,5 @@
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models' import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { ChatOpenAI, LegacyOpenAIInput, OpenAIChatInput } from '@langchain/openai'
import type { ClientOptions } from 'openai'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader' import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -151,7 +149,7 @@ class Deepseek_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = { const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey, openAIApiKey,

View File

@ -1,4 +1,4 @@
import { AzureOpenAIInput, ClientOptions, LegacyOpenAIInput, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai' import { AzureOpenAIInput, ClientOptions, AzureOpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -28,7 +28,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
this.icon = 'Azure.svg' this.icon = 'Azure.svg'
this.category = 'Embeddings' this.category = 'Embeddings'
this.description = 'Azure OpenAI API to generate embeddings for a given text' this.description = 'Azure OpenAI API to generate embeddings for a given text'
this.baseClasses = [this.type, ...getBaseClasses(OpenAIEmbeddings)] this.baseClasses = [this.type, ...getBaseClasses(AzureOpenAIEmbeddings)]
this.credential = { this.credential = {
label: 'Connect Credential', label: 'Connect Credential',
name: 'credential', name: 'credential',
@ -81,7 +81,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData) const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData) const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { configuration?: ClientOptions & LegacyOpenAIInput } = { const obj: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { configuration?: ClientOptions } = {
azureOpenAIApiKey, azureOpenAIApiKey,
azureOpenAIApiInstanceName, azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName, azureOpenAIApiDeploymentName,
@ -102,7 +102,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
} }
} }
const model = new OpenAIEmbeddings(obj) const model = new AzureOpenAIEmbeddings(obj)
return model return model
} }
} }

View File

@ -1,4 +1,4 @@
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai' import { ClientOptions, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getCredentialData, getCredentialParam } from '../../../src/utils' import { getCredentialData, getCredentialParam } from '../../../src/utils'
@ -53,14 +53,16 @@ class LocalAIEmbedding_Embeddings implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData) const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = { const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string; configuration?: ClientOptions } = {
modelName, modelName,
openAIApiKey: 'sk-' openAIApiKey: 'sk-'
} }
if (localAIApiKey) obj.openAIApiKey = localAIApiKey if (localAIApiKey) obj.openAIApiKey = localAIApiKey
const model = new OpenAIEmbeddings(obj, { basePath }) if (basePath) obj.configuration = { baseURL: basePath }
const model = new OpenAIEmbeddings(obj)
return model return model
} }

View File

@ -1,4 +1,4 @@
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai' import { ClientOptions, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { MODEL_TYPE, getModels } from '../../../src/modelLoader' import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
@ -97,7 +97,7 @@ class OpenAIEmbedding_Embeddings implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = { const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string; configuration?: ClientOptions } = {
openAIApiKey, openAIApiKey,
modelName modelName
} }
@ -107,7 +107,13 @@ class OpenAIEmbedding_Embeddings implements INode {
if (timeout) obj.timeout = parseInt(timeout, 10) if (timeout) obj.timeout = parseInt(timeout, 10)
if (dimensions) obj.dimensions = parseInt(dimensions, 10) if (dimensions) obj.dimensions = parseInt(dimensions, 10)
const model = new OpenAIEmbeddings(obj, { basePath }) if (basePath) {
obj.configuration = {
baseURL: basePath
}
}
const model = new OpenAIEmbeddings(obj)
return model return model
} }
} }

View File

@ -1,4 +1,4 @@
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai' import { ClientOptions, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils' import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -93,7 +93,7 @@ class OpenAIEmbeddingCustom_Embeddings implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options) const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData) const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = { const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string; configuration?: ClientOptions } = {
openAIApiKey openAIApiKey
} }
@ -112,7 +112,14 @@ class OpenAIEmbeddingCustom_Embeddings implements INode {
} }
} }
const model = new OpenAIEmbeddings(obj, { baseURL: basePath, defaultHeaders: parsedBaseOptions }) if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new OpenAIEmbeddings(obj)
return model return model
} }
} }

View File

@ -1,4 +1,4 @@
import { AzureOpenAIInput, OpenAI, OpenAIInput } from '@langchain/openai' import { AzureOpenAIInput, AzureOpenAI, OpenAIInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms' import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
@ -31,7 +31,7 @@ class AzureOpenAI_LLMs implements INode {
this.icon = 'Azure.svg' this.icon = 'Azure.svg'
this.category = 'LLMs' this.category = 'LLMs'
this.description = 'Wrapper around Azure OpenAI large language models' this.description = 'Wrapper around Azure OpenAI large language models'
this.baseClasses = [this.type, ...getBaseClasses(OpenAI)] this.baseClasses = [this.type, ...getBaseClasses(AzureOpenAI)]
this.credential = { this.credential = {
label: 'Connect Credential', label: 'Connect Credential',
name: 'credential', name: 'credential',
@ -165,7 +165,7 @@ class AzureOpenAI_LLMs implements INode {
if (cache) obj.cache = cache if (cache) obj.cache = cache
if (basePath) obj.azureOpenAIBasePath = basePath if (basePath) obj.azureOpenAIBasePath = basePath
const model = new OpenAI(obj) const model = new AzureOpenAI(obj)
return model return model
} }
} }

View File

@ -1,4 +1,4 @@
import { OpenAI, OpenAIInput } from '@langchain/openai' import { ClientOptions, OpenAI, OpenAIInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches' import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms' import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface' import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
@ -153,7 +153,7 @@ class OpenAI_LLMs implements INode {
const cache = nodeData.inputs?.cache as BaseCache const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIInput> & BaseLLMParams & { openAIApiKey?: string } = { const obj: Partial<OpenAIInput> & BaseLLMParams & { configuration?: ClientOptions } = {
temperature: parseFloat(temperature), temperature: parseFloat(temperature),
modelName, modelName,
openAIApiKey, openAIApiKey,
@ -179,10 +179,14 @@ class OpenAI_LLMs implements INode {
} }
} }
const model = new OpenAI(obj, { if (basePath || parsedBaseOptions) {
basePath, obj.configuration = {
baseOptions: parsedBaseOptions baseURL: basePath,
}) defaultHeaders: parsedBaseOptions
}
}
const model = new OpenAI(obj)
return model return model
} }
} }

View File

@ -295,8 +295,11 @@ class PineconeVectorStore extends VectorStoreBase implements VectorStoreNoEmbedM
async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> { async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> {
const queryOptions: any = { const queryOptions: any = {
vector: query.queryEmbedding, vector: query.queryEmbedding,
topK: query.similarityTopK, topK: query.similarityTopK
filter: this.queryFilter }
if (this.queryFilter && Object.keys(this.queryFilter).length > 0) {
queryOptions.filter = this.queryFilter
} }
const idx = await this.index() const idx = await this.index()

View File

@ -42,7 +42,7 @@
"@langchain/baidu-qianfan": "^0.1.0", "@langchain/baidu-qianfan": "^0.1.0",
"@langchain/cohere": "^0.0.7", "@langchain/cohere": "^0.0.7",
"@langchain/community": "^0.3.24", "@langchain/community": "^0.3.24",
"@langchain/core": "0.3.29", "@langchain/core": "0.3.37",
"@langchain/exa": "^0.0.5", "@langchain/exa": "^0.0.5",
"@langchain/google-genai": "0.1.3", "@langchain/google-genai": "0.1.3",
"@langchain/google-vertexai": "^0.1.2", "@langchain/google-vertexai": "^0.1.2",
@ -51,7 +51,7 @@
"@langchain/mistralai": "^0.2.0", "@langchain/mistralai": "^0.2.0",
"@langchain/mongodb": "^0.0.1", "@langchain/mongodb": "^0.0.1",
"@langchain/ollama": "0.1.2", "@langchain/ollama": "0.1.2",
"@langchain/openai": "0.3.13", "@langchain/openai": "0.4.2",
"@langchain/pinecone": "^0.1.3", "@langchain/pinecone": "^0.1.3",
"@langchain/qdrant": "^0.0.5", "@langchain/qdrant": "^0.0.5",
"@langchain/weaviate": "^0.0.1", "@langchain/weaviate": "^0.0.1",
@ -114,7 +114,7 @@
"notion-to-md": "^3.1.1", "notion-to-md": "^3.1.1",
"object-hash": "^3.0.0", "object-hash": "^3.0.0",
"ollama": "^0.5.11", "ollama": "^0.5.11",
"openai": "^4.57.3", "openai": "^4.82.0",
"papaparse": "^5.4.1", "papaparse": "^5.4.1",
"pdf-parse": "^1.1.1", "pdf-parse": "^1.1.1",
"pdfjs-dist": "^3.7.107", "pdfjs-dist": "^3.7.107",

View File

@ -3,7 +3,7 @@ import { getCredentialData } from './utils'
import { ChatAnthropic } from '@langchain/anthropic' import { ChatAnthropic } from '@langchain/anthropic'
import { ChatGoogleGenerativeAI } from '@langchain/google-genai' import { ChatGoogleGenerativeAI } from '@langchain/google-genai'
import { ChatMistralAI } from '@langchain/mistralai' import { ChatMistralAI } from '@langchain/mistralai'
import { ChatOpenAI } from '@langchain/openai' import { ChatOpenAI, AzureChatOpenAI } from '@langchain/openai'
import { z } from 'zod' import { z } from 'zod'
import { PromptTemplate } from '@langchain/core/prompts' import { PromptTemplate } from '@langchain/core/prompts'
import { StructuredOutputParser } from '@langchain/core/output_parsers' import { StructuredOutputParser } from '@langchain/core/output_parsers'
@ -46,7 +46,7 @@ export const generateFollowUpPrompts = async (
const azureOpenAIApiDeploymentName = credentialData['azureOpenAIApiDeploymentName'] const azureOpenAIApiDeploymentName = credentialData['azureOpenAIApiDeploymentName']
const azureOpenAIApiVersion = credentialData['azureOpenAIApiVersion'] const azureOpenAIApiVersion = credentialData['azureOpenAIApiVersion']
const llm = new ChatOpenAI({ const llm = new AzureChatOpenAI({
azureOpenAIApiKey, azureOpenAIApiKey,
azureOpenAIApiInstanceName, azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName, azureOpenAIApiDeploymentName,

View File

@ -97,7 +97,7 @@
"multer": "^1.4.5-lts.1", "multer": "^1.4.5-lts.1",
"multer-s3": "^3.0.1", "multer-s3": "^3.0.1",
"mysql2": "^3.11.3", "mysql2": "^3.11.3",
"openai": "^4.57.3", "openai": "^4.82.0",
"pg": "^8.11.1", "pg": "^8.11.1",
"posthog-node": "^3.5.0", "posthog-node": "^3.5.0",
"prom-client": "^15.1.3", "prom-client": "^15.1.3",

File diff suppressed because one or more lines are too long