Chore/Patch OpenAI Nodes (#4958)

- update lc community and openai version
- fix chatfireworks
- update reasonings for openai models
- update openai apikey param
This commit is contained in:
Henry Heng 2025-07-28 01:17:47 +01:00 committed by GitHub
parent 8846fd14e6
commit aea2b184da
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 443 additions and 115 deletions

View File

@ -1,9 +1,10 @@
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { AzureOpenAIInput, AzureChatOpenAI as LangchainAzureChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { AzureChatOpenAI } from './FlowiseAzureChatOpenAI'
import { OpenAI as OpenAIClient } from 'openai'
const serverCredentialsExists =
!!process.env.AZURE_OPENAI_API_KEY &&
@ -26,7 +27,7 @@ class AzureChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'Azure ChatOpenAI'
this.name = 'azureChatOpenAI'
this.version = 7.0
this.version = 7.1
this.type = 'AzureChatOpenAI'
this.icon = 'Azure.svg'
this.category = 'Chat Models'
@ -154,6 +155,15 @@ class AzureChatOpenAI_ChatModels implements INode {
optional: false,
additionalParams: true
},
{
label: 'Reasoning',
description: 'Whether the model supports reasoning. Only applicable for reasoning models.',
name: 'reasoning',
type: 'boolean',
default: false,
optional: true,
additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 and o3 models.',
@ -173,9 +183,34 @@ class AzureChatOpenAI_ChatModels implements INode {
name: 'high'
}
],
default: 'medium',
optional: false,
additionalParams: true
additionalParams: true,
show: {
reasoning: true
}
},
{
label: 'Reasoning Summary',
description: `A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process`,
name: 'reasoningSummary',
type: 'options',
options: [
{
label: 'Auto',
name: 'auto'
},
{
label: 'Concise',
name: 'concise'
},
{
label: 'Detailed',
name: 'detailed'
}
],
additionalParams: true,
show: {
reasoning: true
}
}
]
}
@ -199,7 +234,8 @@ class AzureChatOpenAI_ChatModels implements INode {
const topP = nodeData.inputs?.topP as string
const basePath = nodeData.inputs?.basepath as string
const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort | null
const reasoningSummary = nodeData.inputs?.reasoningSummary as 'auto' | 'concise' | 'detailed' | null
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const azureOpenAIApiKey = getCredentialParam('azureOpenAIApiKey', credentialData, nodeData)
@ -240,8 +276,15 @@ class AzureChatOpenAI_ChatModels implements INode {
if (modelName === 'o3-mini' || modelName.includes('o1')) {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
if (modelName.includes('o1') || modelName.includes('o3')) {
const reasoning: OpenAIClient.Reasoning = {}
if (reasoningEffort) {
reasoning.effort = reasoningEffort
}
if (reasoningSummary) {
reasoning.summary = reasoningSummary
}
obj.reasoning = reasoning
}
const multiModalOption: IMultiModalOption = {

View File

@ -6,6 +6,7 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
builtInTools: Record<string, any>[] = []
id: string
constructor(
@ -27,7 +28,7 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
}
revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.model = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
@ -38,4 +39,8 @@ export class AzureChatOpenAI extends LangchainAzureChatOpenAI implements IVision
setVisionModel(): void {
// pass
}
addBuiltInTools(builtInTool: Record<string, any>): void {
this.builtInTools.push(builtInTool)
}
}

View File

@ -136,7 +136,8 @@ class ChatCerebras_ChatModels implements INode {
const obj: ChatOpenAIFields = {
temperature: parseFloat(temperature),
modelName,
model: modelName,
apiKey: cerebrasAIApiKey,
openAIApiKey: cerebrasAIApiKey,
streaming: streaming ?? true
}

View File

@ -1,7 +1,7 @@
import { BaseCache } from '@langchain/core/caches'
import { ChatFireworks } from '@langchain/community/chat_models/fireworks'
import { ICommonObject, INode, INodeData, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatFireworks, ChatFireworksParams } from './core'
class ChatFireworks_ChatModels implements INode {
label: string
@ -41,8 +41,8 @@ class ChatFireworks_ChatModels implements INode {
label: 'Model',
name: 'modelName',
type: 'string',
default: 'accounts/fireworks/models/llama-v2-13b-chat',
placeholder: 'accounts/fireworks/models/llama-v2-13b-chat'
default: 'accounts/fireworks/models/llama-v3p1-8b-instruct',
placeholder: 'accounts/fireworks/models/llama-v3p1-8b-instruct'
},
{
label: 'Temperature',
@ -71,9 +71,8 @@ class ChatFireworks_ChatModels implements INode {
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
const fireworksApiKey = getCredentialParam('fireworksApiKey', credentialData, nodeData)
const obj: Partial<ChatFireworks> = {
const obj: ChatFireworksParams = {
fireworksApiKey,
model: modelName,
modelName,
temperature: temperature ? parseFloat(temperature) : undefined,
streaming: streaming ?? true

View File

@ -0,0 +1,126 @@
import type { BaseChatModelParams, LangSmithParams } from '@langchain/core/language_models/chat_models'
import {
type OpenAIClient,
type ChatOpenAICallOptions,
type OpenAIChatInput,
type OpenAICoreRequestOptions,
ChatOpenAICompletions
} from '@langchain/openai'
import { getEnvironmentVariable } from '@langchain/core/utils/env'
type FireworksUnsupportedArgs = 'frequencyPenalty' | 'presencePenalty' | 'logitBias' | 'functions'
type FireworksUnsupportedCallOptions = 'functions' | 'function_call'
export type ChatFireworksCallOptions = Partial<Omit<ChatOpenAICallOptions, FireworksUnsupportedCallOptions>>
export type ChatFireworksParams = Partial<Omit<OpenAIChatInput, 'openAIApiKey' | FireworksUnsupportedArgs>> &
BaseChatModelParams & {
/**
* Prefer `apiKey`
*/
fireworksApiKey?: string
/**
* The Fireworks API key to use.
*/
apiKey?: string
}
export class ChatFireworks extends ChatOpenAICompletions<ChatFireworksCallOptions> {
static lc_name() {
return 'ChatFireworks'
}
_llmType() {
return 'fireworks'
}
get lc_secrets(): { [key: string]: string } | undefined {
return {
fireworksApiKey: 'FIREWORKS_API_KEY',
apiKey: 'FIREWORKS_API_KEY'
}
}
lc_serializable = true
fireworksApiKey?: string
apiKey?: string
constructor(fields?: ChatFireworksParams) {
const fireworksApiKey = fields?.apiKey || fields?.fireworksApiKey || getEnvironmentVariable('FIREWORKS_API_KEY')
if (!fireworksApiKey) {
throw new Error(
`Fireworks API key not found. Please set the FIREWORKS_API_KEY environment variable or provide the key into "fireworksApiKey"`
)
}
super({
...fields,
model: fields?.model || fields?.modelName || 'accounts/fireworks/models/llama-v3p1-8b-instruct',
apiKey: fireworksApiKey,
configuration: {
baseURL: 'https://api.fireworks.ai/inference/v1'
},
streamUsage: false
})
this.fireworksApiKey = fireworksApiKey
this.apiKey = fireworksApiKey
}
getLsParams(options: any): LangSmithParams {
const params = super.getLsParams(options)
params.ls_provider = 'fireworks'
return params
}
toJSON() {
const result = super.toJSON()
if ('kwargs' in result && typeof result.kwargs === 'object' && result.kwargs != null) {
delete result.kwargs.openai_api_key
delete result.kwargs.configuration
}
return result
}
// eslint-disable-next-line
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
options?: OpenAICoreRequestOptions
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk>>
// eslint-disable-next-line
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<OpenAIClient.Chat.Completions.ChatCompletion>
/**
* Calls the Fireworks API with retry logic in case of failures.
* @param request The request to send to the Fireworks API.
* @param options Optional configuration for the API call.
* @returns The response from the Fireworks API.
*/
// eslint-disable-next-line
async completionWithRetry(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming | OpenAIClient.Chat.ChatCompletionCreateParamsNonStreaming,
options?: OpenAICoreRequestOptions
): Promise<AsyncIterable<OpenAIClient.Chat.Completions.ChatCompletionChunk> | OpenAIClient.Chat.Completions.ChatCompletion> {
delete request.frequency_penalty
delete request.presence_penalty
delete request.logit_bias
delete request.functions
if (request.stream === true) {
return super.completionWithRetry(request, options)
}
return super.completionWithRetry(request, options)
}
}

View File

@ -124,7 +124,10 @@ class ChatLitellm_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (apiKey) obj.openAIApiKey = apiKey
if (apiKey) {
obj.openAIApiKey = apiKey
obj.apiKey = apiKey
}
const model = new ChatOpenAI(obj)

View File

@ -111,6 +111,7 @@ class ChatLocalAI_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: 'sk-',
apiKey: 'sk-',
streaming: streaming ?? true
}
@ -118,7 +119,10 @@ class ChatLocalAI_ChatModels implements INode {
if (topP) obj.topP = parseFloat(topP)
if (timeout) obj.timeout = parseInt(timeout, 10)
if (cache) obj.cache = cache
if (localAIApiKey) obj.openAIApiKey = localAIApiKey
if (localAIApiKey) {
obj.openAIApiKey = localAIApiKey
obj.apiKey = localAIApiKey
}
if (basePath) obj.configuration = { baseURL: basePath }
const model = new ChatOpenAI(obj)

View File

@ -137,6 +137,7 @@ class ChatNvdiaNIM_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: nvidiaNIMApiKey ?? 'sk-',
apiKey: nvidiaNIMApiKey ?? 'sk-',
streaming: streaming ?? true
}

View File

@ -1,10 +1,11 @@
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields, OpenAIClient } from '@langchain/openai'
import { ChatOpenAI as LangchainChatOpenAI, ChatOpenAIFields } from '@langchain/openai'
import { BaseCache } from '@langchain/core/caches'
import { ICommonObject, IMultiModalOption, INode, INodeData, INodeOptionsValue, INodeParams } from '../../../src/Interface'
import { getBaseClasses, getCredentialData, getCredentialParam } from '../../../src/utils'
import { ChatOpenAI } from './FlowiseChatOpenAI'
import { getModels, MODEL_TYPE } from '../../../src/modelLoader'
import { HttpsProxyAgent } from 'https-proxy-agent'
import { OpenAI as OpenAIClient } from 'openai'
class ChatOpenAI_ChatModels implements INode {
label: string
@ -21,7 +22,7 @@ class ChatOpenAI_ChatModels implements INode {
constructor() {
this.label = 'ChatOpenAI'
this.name = 'chatOpenAI'
this.version = 8.2
this.version = 8.3
this.type = 'ChatOpenAI'
this.icon = 'openai.svg'
this.category = 'Chat Models'
@ -176,9 +177,18 @@ class ChatOpenAI_ChatModels implements INode {
allowImageUploads: true
}
},
{
label: 'Reasoning',
description: 'Whether the model supports reasoning. Only applicable for reasoning models.',
name: 'reasoning',
type: 'boolean',
default: false,
optional: true,
additionalParams: true
},
{
label: 'Reasoning Effort',
description: 'Constrains effort on reasoning for reasoning models. Only applicable for o1 and o3 models.',
description: 'Constrains effort on reasoning for reasoning models',
name: 'reasoningEffort',
type: 'options',
options: [
@ -195,9 +205,34 @@ class ChatOpenAI_ChatModels implements INode {
name: 'high'
}
],
default: 'medium',
optional: false,
additionalParams: true
additionalParams: true,
show: {
reasoning: true
}
},
{
label: 'Reasoning Summary',
description: `A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process`,
name: 'reasoningSummary',
type: 'options',
options: [
{
label: 'Auto',
name: 'auto'
},
{
label: 'Concise',
name: 'concise'
},
{
label: 'Detailed',
name: 'detailed'
}
],
additionalParams: true,
show: {
reasoning: true
}
}
]
}
@ -223,7 +258,8 @@ class ChatOpenAI_ChatModels implements INode {
const basePath = nodeData.inputs?.basepath as string
const proxyUrl = nodeData.inputs?.proxyUrl as string
const baseOptions = nodeData.inputs?.baseOptions
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.Chat.ChatCompletionReasoningEffort
const reasoningEffort = nodeData.inputs?.reasoningEffort as OpenAIClient.ReasoningEffort | null
const reasoningSummary = nodeData.inputs?.reasoningSummary as 'auto' | 'concise' | 'detailed' | null
const allowImageUploads = nodeData.inputs?.allowImageUploads as boolean
const imageResolution = nodeData.inputs?.imageResolution as string
@ -240,14 +276,22 @@ class ChatOpenAI_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
apiKey: openAIApiKey,
streaming: streaming ?? true
}
if (modelName.includes('o3') || modelName.includes('o1')) {
delete obj.temperature
}
if ((modelName.includes('o1') || modelName.includes('o3')) && reasoningEffort) {
obj.reasoningEffort = reasoningEffort
if (modelName.includes('o1') || modelName.includes('o3')) {
const reasoning: OpenAIClient.Reasoning = {}
if (reasoningEffort) {
reasoning.effort = reasoningEffort
}
if (reasoningSummary) {
reasoning.summary = reasoningSummary
}
obj.reasoning = reasoning
}
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
if (topP) obj.topP = parseFloat(topP)

View File

@ -5,6 +5,7 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
configuredModel: string
configuredMaxToken?: number
multiModalOption: IMultiModalOption
builtInTools: Record<string, any>[] = []
id: string
constructor(id: string, fields?: ChatOpenAIFields) {
@ -15,7 +16,7 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
}
revertToOriginalModel(): void {
this.modelName = this.configuredModel
this.model = this.configuredModel
this.maxTokens = this.configuredMaxToken
}
@ -26,4 +27,8 @@ export class ChatOpenAI extends LangchainChatOpenAI implements IVisionChatModal
setVisionModel(): void {
// pass
}
addBuiltInTools(builtInTool: Record<string, any>): void {
this.builtInTools.push(builtInTool)
}
}

View File

@ -137,6 +137,7 @@ class ChatOpenAICustom_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
apiKey: openAIApiKey,
streaming: streaming ?? true
}

View File

@ -138,6 +138,7 @@ class ChatOpenRouter_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey: openRouterApiKey,
apiKey: openRouterApiKey,
streaming: streaming ?? true
}

View File

@ -153,6 +153,7 @@ class Deepseek_ChatModels implements INode {
temperature: parseFloat(temperature),
modelName,
openAIApiKey,
apiKey: openAIApiKey,
streaming: streaming ?? true
}

View File

@ -41,7 +41,7 @@
"@langchain/aws": "^0.1.11",
"@langchain/baidu-qianfan": "^0.1.0",
"@langchain/cohere": "^0.0.7",
"@langchain/community": "^0.3.29",
"@langchain/community": "^0.3.47",
"@langchain/core": "0.3.61",
"@langchain/exa": "^0.0.5",
"@langchain/google-genai": "0.2.3",
@ -51,7 +51,7 @@
"@langchain/mistralai": "^0.2.0",
"@langchain/mongodb": "^0.0.1",
"@langchain/ollama": "0.2.0",
"@langchain/openai": "0.5.6",
"@langchain/openai": "0.6.3",
"@langchain/pinecone": "^0.1.3",
"@langchain/qdrant": "^0.0.5",
"@langchain/weaviate": "^0.0.1",

View File

@ -303,7 +303,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"conditionAgentModel": "chatOpenAI"
}
},
@ -642,7 +642,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},
@ -964,7 +964,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},
@ -1210,7 +1210,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"conditionAgentModel": "chatOpenAI"
}
},
@ -1539,7 +1539,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},
@ -1871,7 +1871,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},

View File

@ -300,7 +300,7 @@
"proxyUrl": "",
"baseOptions": "",
"allowImageUploads": true,
"reasoningEffort": "medium",
"reasoningEffort": "",
"conditionAgentModel": "chatOpenAI"
}
},

View File

@ -497,7 +497,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},

View File

@ -554,7 +554,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"agentModel": "chatOpenAI"
}
},
@ -683,7 +683,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"humanInputModel": "chatOpenAI"
},
"humanInputDescription": "<p>Are you sure you want to proceed?</p>"
@ -1208,7 +1208,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},

View File

@ -582,7 +582,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"agentModel": "chatOpenAI"
}
},

View File

@ -1217,7 +1217,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"agentModel": "chatOpenAI"
}
},

View File

@ -743,7 +743,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"conditionAgentModel": "chatOpenAI"
}
},

View File

@ -533,7 +533,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"agentModel": "chatOpenAI"
}
},

View File

@ -818,7 +818,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"agentModel": "chatOpenAI"
}
},
@ -1168,7 +1168,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"agentModel": "chatOpenAI"
}
},
@ -1975,7 +1975,7 @@
"proxyUrl": "",
"baseOptions": "",
"allowImageUploads": "",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},

View File

@ -493,7 +493,7 @@
"baseOptions": "",
"allowImageUploads": "",
"imageResolution": "low",
"reasoningEffort": "medium",
"reasoningEffort": "",
"llmModel": "chatOpenAI"
}
},

View File

@ -394,7 +394,7 @@
"baseOptions": "",
"allowImageUploads": true,
"imageResolution": "low",
"reasoningEffort": "medium"
"reasoningEffort": ""
},
"outputAnchors": [
{

File diff suppressed because one or more lines are too long