Feature/update openai version, add reasoning effort param, add o3 mini (#3973)

* update openai version, add reasoning effort param

* update azure

* add filter for pinecone llamaindex

* update graph cypher qa chain
This commit is contained in:
Henry Heng 2025-02-04 08:43:27 +00:00 committed by GitHub
parent 2a0e712b7d
commit a0b4abdd13
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 498 additions and 289 deletions

View File

@ -71,9 +71,9 @@
},
"resolutions": {
"@google/generative-ai": "^0.15.0",
"@langchain/core": "0.3.29",
"@langchain/core": "0.3.37",
"@qdrant/openapi-typescript-fetch": "1.2.6",
"openai": "4.57.3",
"openai": "4.82.0",
"protobufjs": "7.4.0"
},
"eslintIgnore": [

View File

@ -230,6 +230,14 @@
{
"name": "azureChatOpenAI",
"models": [
{
"label": "o3-mini",
"name": "o3-mini"
},
{
"label": "o1",
"name": "o1"
},
{
"label": "o1-preview",
"name": "o1-preview"
@ -397,6 +405,10 @@
{
"name": "chatGoogleGenerativeAI",
"models": [
{
"label": "gemini-2.0-flash-exp",
"name": "gemini-2.0-flash-exp"
},
{
"label": "gemini-1.5-flash-latest",
"name": "gemini-1.5-flash-latest"

View File

@ -23,7 +23,7 @@ class GraphCypherQA_Chain implements INode {
constructor(fields?: { sessionId?: string }) {
this.label = 'Graph Cypher QA Chain'
this.name = 'graphCypherQAChain'
this.version = 1.0
this.version = 1.1
this.type = 'GraphCypherQAChain'
this.icon = 'graphqa.svg'
this.category = 'Chains'
@ -47,7 +47,8 @@ class GraphCypherQA_Chain implements INode {
name: 'cypherPrompt',
optional: true,
type: 'BasePromptTemplate',
description: 'Prompt template for generating Cypher queries. Must include {schema} and {question} variables'
description:
'Prompt template for generating Cypher queries. Must include {schema} and {question} variables. If not provided, default prompt will be used.'
},
{
label: 'Cypher Generation Model',
@ -61,7 +62,8 @@ class GraphCypherQA_Chain implements INode {
name: 'qaPrompt',
optional: true,
type: 'BasePromptTemplate',
description: 'Prompt template for generating answers. Must include {context} and {question} variables'
description:
'Prompt template for generating answers. Must include {context} and {question} variables. If not provided, default prompt will be used.'
},
{
label: 'QA Model',
@ -111,6 +113,10 @@ class GraphCypherQA_Chain implements INode {
const returnDirect = nodeData.inputs?.returnDirect as boolean
const output = nodeData.outputs?.output as string
if (!model) {
throw new Error('Language Model is required')
}
// Handle prompt values if they exist
let cypherPromptTemplate: PromptTemplate | FewShotPromptTemplate | undefined
let qaPromptTemplate: PromptTemplate | undefined
@ -147,10 +153,6 @@ class GraphCypherQA_Chain implements INode {
})
}
if ((!cypherModel || !qaModel) && !model) {
throw new Error('Language Model is required when Cypher Model or QA Model are not provided')
}
// Validate required variables in prompts
if (
cypherPromptTemplate &&
@ -165,13 +167,13 @@ class GraphCypherQA_Chain implements INode {
returnDirect
}
if (cypherModel && cypherPromptTemplate) {
fromLLMInput['cypherLLM'] = cypherModel
if (cypherPromptTemplate) {
fromLLMInput['cypherLLM'] = cypherModel ?? model
fromLLMInput['cypherPrompt'] = cypherPromptTemplate
}
if (qaModel && qaPromptTemplate) {
fromLLMInput['qaLLM'] = qaModel
if (qaPromptTemplate) {
fromLLMInput['qaLLM'] = qaModel ?? model
fromLLMInput['qaPrompt'] = qaPromptTemplate
}

View File

@ -1,11 +1,9 @@
import { AzureOpenAIInput, ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, ClientOptions, LegacyOpenAIInput } from '@langchain/openai'
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from '../ChatOpenAI/FlowiseChatOpenAI'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'
const serverCredentialsExists =
!!process.env.AZURE_OPENAI_API_KEY &&
@ -33,7 +31,7 @@ class AzureChatOpenAI_ChatModels implements INode {
this.icon = 'Azure.svg'
this.category = 'Chat Models'
this.description = 'Wrapper around Azure OpenAI large language models that use the Chat endpoint'
this.baseClasses = [this.type, ...getBaseClasses(LangchainChatOpenAI)]
this.baseClasses = [this.type, ...getBaseClasses(LangchainAzureChatOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -155,6 +153,29 @@ class AzureChatOpenAI_ChatModels implements INode {
default: 'low',
optional: false,
additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 models',
name: 'reasoningEffort',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'Medium',
name: 'medium'
},
{
label: 'High',
name: 'high'
}
],
default: 'low',
optional: false,
additionalParams: true
}
]
}
@ -178,6 +199,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
@ -188,10 +210,7 @@ class AzureChatOpenAI_ChatModels implements INode {
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string
const obj: Partial<AzureOpenAIInput> &
BaseLLMParams &
Partial<OpenAIChatInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
const obj: ChatOpenAIFields & Partial<AzureOpenAIInput> = {
temperature: parseFloat(temperature),
modelName,
azureOpenAIApiKey,
@ -218,6 +237,12 @@ class AzureChatOpenAI_ChatModels implements INode {
console.error('Error parsing base options', exception)
}
}
if (modelName === 'o3-mini') {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
}
const multiModalOption: IMultiModalOption = {
image: {
@ -226,7 +251,7 @@ class AzureChatOpenAI_ChatModels implements INode {
}
}
const model = new ChatOpenAI(nodeData.id, obj)
const model = new AzureChatOpenAI(nodeData.id, obj)
model.setMultiModalOption(multiModalOption)
return model
}

View File

@ -0,0 +1,41 @@
import { AzureChatOpenAI as LangchainAzureChatOpenAI, OpenAIChatInput, AzureOpenAIInput, ClientOptions } from '@langchain/openai'
import { IMultiModalOption, IVisionChatModal } from '../../../src'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVisionChatModal {
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
id: string
constructor(
id: string,
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> & {
openAIApiKey?: string
openAIApiVersion?: string
openAIBasePath?: string
deploymentName?: string
} & BaseChatModelParams & {
configuration?: ClientOptions
}
) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxTokens
}
revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
setMultiModalOption(multiModalOption: IMultiModalOption): void {
this.multiModalOption = multiModalOption
}
setVisionModel(): void {
// pass
}
}

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -135,7 +134,7 @@ class ChatCerebras_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const cerebrasAIApiKey = getCredentialParam('cerebrasApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: cerebrasAIApiKey,
@ -158,10 +157,15 @@ class ChatCerebras_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
}
}
const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model
}
}

View File

@ -1,6 +1,5 @@
import { OpenAIChatInput, ChatOpenAI } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -108,7 +107,7 @@ class ChatLocalAI_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: 'sk-',
@ -120,8 +119,9 @@ class ChatLocalAI_ChatModels implements INode {
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
if (basePath) obj.configuration = { baseURL: basePath }
const model = new ChatOpenAI(obj, { basePath })
const model = new ChatOpenAI(obj)
return model
}

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -134,7 +133,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const nvdiaNIMApiKey = getCredentialParam('nvdiaNIMApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { nvdiaNIMApiKey?: string } = {
const obj: ChatOpenAIFields & { nvdiaNIMApiKey?: string } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: nvdiaNIMApiKey,
@ -154,14 +153,18 @@ class ChatNvdiaNIM_ChatModels implements INode {
try {
parsedBaseOptions = typeof baseOptions === 'object' ? baseOptions : JSON.parse(baseOptions)
} catch (exception) {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
throw new Error("Invalid JSON in the ChatNvidiaNIM's baseOptions: " + exception)
}
}
const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model
}
}

View File

@ -1,7 +1,5 @@
import type { ClientOptions } from 'openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, AzureOpenAIInput, LegacyOpenAIInput } from '@langchain/openai'
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from './FlowiseChatOpenAI'
@ -23,7 +21,7 @@ class ChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI'
this.version = 8.0
this.version = 8.1
this.type = 'ChatOpenAI'
this.icon = 'openai.svg'
this.category = 'Chat Models'
@ -105,6 +103,24 @@ class ChatOpenAI_ChatModels implements INode {
optional: true,
additionalParams: true
},
{
label: 'Strict Tool Calling',
name: 'strictToolCalling',
type: 'boolean',
description:
'Whether the model supports the `strict` argument when passing in tools. If not specified, the `strict` argument will not be passed to OpenAI.',
optional: true,
additionalParams: true
},
{
label: 'Stop Sequence',
name: 'stopSequence',
type: 'string',
rows: 4,
optional: true,
description: 'List of stop words to use when generating. Use comma to separate multiple stop words.',
additionalParams: true
},
{
label: 'BasePath',
name: 'basepath',
@ -119,15 +135,6 @@ class ChatOpenAI_ChatModels implements INode {
optional: true,
additionalParams: true
},
{
label: 'Stop Sequence',
name: 'stopSequence',
type: 'string',
rows: 4,
optional: true,
description: 'List of stop words to use when generating. Use comma to separate multiple stop words.',
additionalParams: true
},
{
label: 'BaseOptions',
name: 'baseOptions',
@ -166,6 +173,29 @@ class ChatOpenAI_ChatModels implements INode {
default: 'low',
optional: false,
additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 models',
name: 'reasoningEffort',
type: 'options',
options: [
{
label: 'Low',
name: 'low'
},
{
label: 'Medium',
name: 'medium'
},
{
label: 'High',
name: 'high'
}
],
default: 'low',
optional: false,
additionalParams: true
}
]
}
@ -187,9 +217,11 @@ class ChatOpenAI_ChatModels implements INode {
const timeout = nodeData.inputs?.timeout as string
const stopSequence = nodeData.inputs?.stopSequence as string
const streaming = nodeData.inputs?.streaming as boolean
const strictToolCalling = nodeData.inputs?.strictToolCalling as boolean
const basePath = nodeData.inputs?.basepath as string
const proxyUrl = nodeData.inputs?.proxyUrl as string
const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string
@ -202,9 +234,7 @@ class ChatOpenAI_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@ -214,6 +244,9 @@ class ChatOpenAI_ChatModels implements INode {
if (modelName === 'o3-mini') {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)
if (frequencyPenalty) obj.frequencyPenalty = parseFloat(frequencyPenalty)
@ -224,6 +257,7 @@ class ChatOpenAI_ChatModels implements INode {
const stopSequenceArray = stopSequence.split(',').map((item) => item.trim())
obj.stop = stopSequenceArray
}
if (strictToolCalling) obj.supportsStrictToolCalling = strictToolCalling
let parsedBaseOptions: any | undefined = undefined
@ -238,7 +272,7 @@ class ChatOpenAI_ChatModels implements INode {
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
baseOptions: parsedBaseOptions
defaultHeaders: parsedBaseOptions
}
}

View File

@ -1,6 +1,4 @@
import type { ClientOptions } from 'openai'
import { ChatOpenAI as LangchainChatOpenAI, OpenAIChatInput, LegacyOpenAIInput, AzureOpenAIInput } from '@langchain/openai'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { IMultiModalOption, IVisionChatModal } from '../../../src'
export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal {
@ -9,15 +7,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
multiModalOption: IMultiModalOption
id: string
constructor(
id: string,
fields?: Partial<OpenAIChatInput> &
Partial<AzureOpenAIInput> &
BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput },
/** @deprecated */
configuration?: ClientOptions & LegacyOpenAIInput
) {
super(fields, configuration)
constructor(id: string, fields?: ChatOpenAIFields) {
super(fields)
this.id = id
this.configuredModel = fields?.modelName ?? ''
this.configuredMaxToken = fields?.maxTokens

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -134,7 +133,7 @@ class ChatOpenAICustom_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams & { openAIApiKey?: string } = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@ -157,10 +156,15 @@ class ChatOpenAICustom_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatOpenAI's BaseOptions: " + exception)
}
}
const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model
}
}

View File

@ -1,6 +1,5 @@
import { ChatOpenAI, OpenAIChatInput } from '@langchain/openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -135,7 +134,7 @@ class ChatOpenRouter_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openRouterApiKey = getCredentialParam('openRouterApiKey', credentialData, nodeData)
const obj: Partial<OpenAIChatInput> & BaseLLMParams = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: openRouterApiKey,
@ -158,10 +157,15 @@ class ChatOpenRouter_ChatModels implements INode {
throw new Error("Invalid JSON in the ChatCerebras's BaseOptions: " + exception)
}
}
const model = new ChatOpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new ChatOpenAI(obj)
return model
}
}

View File

@ -1,7 +1,5 @@
import { BaseCache } from '@langchain/core/caches'
import { BaseChatModelParams } from '@langchain/core/language_models/chat_models'
import { ChatOpenAI, LegacyOpenAIInput, OpenAIChatInput } from '@langchain/openai'
import type { ClientOptions } from 'openai'
import { ChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -151,7 +149,7 @@ class Deepseek_ChatModels implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIChatInput> & BaseChatModelParams & { configuration?: ClientOptions & LegacyOpenAIInput } = {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,

View File

@ -1,4 +1,4 @@
import { AzureOpenAIInput, ClientOptions, LegacyOpenAIInput, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { AzureOpenAIInput, ClientOptions, AzureOpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -28,7 +28,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
this.icon = 'Azure.svg'
this.category = 'Embeddings'
this.description = 'Azure OpenAI API to generate embeddings for a given text'
this.baseClasses = [this.type, ...getBaseClasses(OpenAIEmbeddings)]
this.baseClasses = [this.type, ...getBaseClasses(AzureOpenAIEmbeddings)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -81,7 +81,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
const azureOpenAIApiDeploymentName = getCredentialParam('azureOpenAIApiDeploymentName', credentialData, nodeData)
const azureOpenAIApiVersion = getCredentialParam('azureOpenAIApiVersion', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { configuration?: ClientOptions & LegacyOpenAIInput } = {
const obj: Partial<OpenAIEmbeddingsParams> & Partial<AzureOpenAIInput> & { configuration?: ClientOptions } = {
azureOpenAIApiKey,
azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName,
@ -102,7 +102,7 @@ class AzureOpenAIEmbedding_Embeddings implements INode {
}
}
const model = new OpenAIEmbeddings(obj)
const model = new AzureOpenAIEmbeddings(obj)
return model
}
}

View File

@ -1,4 +1,4 @@
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ClientOptions, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getCredentialData, getCredentialParam } from '../../../src/utils'
@ -53,14 +53,16 @@ class LocalAIEmbedding_Embeddings implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const localAIApiKey = getCredentialParam('localAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = {
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string; configuration?: ClientOptions } = {
modelName,
openAIApiKey: 'sk-'
}
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
const model = new OpenAIEmbeddings(obj, { basePath })
if (basePath) obj.configuration = { baseURL: basePath }
const model = new OpenAIEmbeddings(obj)
return model
}

View File

@ -1,4 +1,4 @@
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ClientOptions, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { MODEL_TYPE, getModels } from '../../../src/modelLoader'
@ -97,7 +97,7 @@ class OpenAIEmbedding_Embeddings implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = {
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string; configuration?: ClientOptions } = {
openAIApiKey,
modelName
}
@ -107,7 +107,13 @@ class OpenAIEmbedding_Embeddings implements INode {
if (timeout) obj.timeout = parseInt(timeout, 10)
if (dimensions) obj.dimensions = parseInt(dimensions, 10)
const model = new OpenAIEmbeddings(obj, { basePath })
if (basePath) {
obj.configuration = {
baseURL: basePath
}
}
const model = new OpenAIEmbeddings(obj)
return model
}
}

View File

@ -1,4 +1,4 @@
import { OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ClientOptions, OpenAIEmbeddings, OpenAIEmbeddingsParams } from '@langchain/openai'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
@ -93,7 +93,7 @@ class OpenAIEmbeddingCustom_Embeddings implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const openAIApiKey = getCredentialParam('openAIApiKey', credentialData, nodeData)
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string } = {
const obj: Partial<OpenAIEmbeddingsParams> & { openAIApiKey?: string; configuration?: ClientOptions } = {
openAIApiKey
}
@ -112,7 +112,14 @@ class OpenAIEmbeddingCustom_Embeddings implements INode {
}
}
const model = new OpenAIEmbeddings(obj, { baseURL: basePath, defaultHeaders: parsedBaseOptions })
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new OpenAIEmbeddings(obj)
return model
}
}

View File

@ -1,4 +1,4 @@
import { AzureOpenAIInput, OpenAI, OpenAIInput } from '@langchain/openai'
import { AzureOpenAIInput, AzureOpenAI, OpenAIInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
@ -31,7 +31,7 @@ class AzureOpenAI_LLMs implements INode {
this.icon = 'Azure.svg'
this.category = 'LLMs'
this.description = 'Wrapper around Azure OpenAI large language models'
this.baseClasses = [this.type, ...getBaseClasses(OpenAI)]
this.baseClasses = [this.type, ...getBaseClasses(AzureOpenAI)]
this.credential = {
label: 'Connect Credential',
name: 'credential',
@ -165,7 +165,7 @@ class AzureOpenAI_LLMs implements INode {
if (cache) obj.cache = cache
if (basePath) obj.azureOpenAIBasePath = basePath
const model = new OpenAI(obj)
const model = new AzureOpenAI(obj)
return model
}
}

View File

@ -1,4 +1,4 @@
import { OpenAI, OpenAIInput } from '@langchain/openai'
import { ClientOptions, OpenAI, OpenAIInput } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { BaseLLMParams } from '@langchain/core/language_models/llms'
import { ICommonObject, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
@ -153,7 +153,7 @@ class OpenAI_LLMs implements INode {
const cache = nodeData.inputs?.cache as BaseCache
const obj: Partial<OpenAIInput> & BaseLLMParams & { openAIApiKey?: string } = {
const obj: Partial<OpenAIInput> & BaseLLMParams & { configuration?: ClientOptions } = {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
@ -179,10 +179,14 @@ class OpenAI_LLMs implements INode {
}
}
const model = new OpenAI(obj, {
basePath,
baseOptions: parsedBaseOptions
})
if (basePath || parsedBaseOptions) {
obj.configuration = {
baseURL: basePath,
defaultHeaders: parsedBaseOptions
}
}
const model = new OpenAI(obj)
return model
}
}

View File

@ -295,8 +295,11 @@ class PineconeVectorStore extends VectorStoreBase implements VectorStoreNoEmbedM
async query(query: VectorStoreQuery): Promise<VectorStoreQueryResult> {
const queryOptions: any = {
vector: query.queryEmbedding,
topK: query.similarityTopK,
filter: this.queryFilter
topK: query.similarityTopK
}
if (this.queryFilter && Object.keys(this.queryFilter).length > 0) {
queryOptions.filter = this.queryFilter
}
const idx = await this.index()

View File

@ -42,7 +42,7 @@
"@langchain/baidu-qianfan": "^0.1.0",
"@langchain/cohere": "^0.0.7",
"@langchain/community": "^0.3.24",
"@langchain/core": "0.3.29",
"@langchain/core": "0.3.37",
"@langchain/exa": "^0.0.5",
"@langchain/google-genai": "0.1.3",
"@langchain/google-vertexai": "^0.1.2",
@ -51,7 +51,7 @@
"@langchain/mistralai": "^0.2.0",
"@langchain/mongodb": "^0.0.1",
"@langchain/ollama": "0.1.2",
"@langchain/openai": "0.3.13",
"@langchain/openai": "0.4.2",
"@langchain/pinecone": "^0.1.3",
"@langchain/qdrant": "^0.0.5",
"@langchain/weaviate": "^0.0.1",
@ -114,7 +114,7 @@
"notion-to-md": "^3.1.1",
"object-hash": "^3.0.0",
"ollama": "^0.5.11",
"openai": "^4.57.3",
"openai": "^4.82.0",
"papaparse": "^5.4.1",
"pdf-parse": "^1.1.1",
"pdfjs-dist": "^3.7.107",

View File

@ -3,7 +3,7 @@ import { getCredentialData } from './utils'
import { ChatAnthropic } from '@langchain/anthropic'
import { ChatGoogleGenerativeAI } from '@langchain/google-genai'
import { ChatMistralAI } from '@langchain/mistralai'
import { ChatOpenAI } from '@langchain/openai'
import { ChatOpenAI, AzureChatOpenAI } from '@langchain/openai'
import { z } from 'zod'
import { PromptTemplate } from '@langchain/core/prompts'
import { StructuredOutputParser } from '@langchain/core/output_parsers'
@ -46,7 +46,7 @@ export const generateFollowUpPrompts = async (
const azureOpenAIApiDeploymentName = credentialData['azureOpenAIApiDeploymentName']
const azureOpenAIApiVersion = credentialData['azureOpenAIApiVersion']
const llm = new ChatOpenAI({
const llm = new AzureChatOpenAI({
azureOpenAIApiKey,
azureOpenAIApiInstanceName,
azureOpenAIApiDeploymentName,

View File

@ -97,7 +97,7 @@
"multer": "^1.4.5-lts.1",
"multer-s3": "^3.0.1",
"mysql2": "^3.11.3",
"openai": "^4.57.3",
"openai": "^4.82.0",
"pg": "^8.11.1",
"posthog-node": "^3.5.0",
"prom-client": "^15.1.3",

File diff suppressed because one or more lines are too long