Skip to content

Commit 1b48d56

Browse files
Add missing inputs (#3726)
* Add missing inputs * Change version
1 parent bb15e5c commit 1b48d56

File tree

1 file changed

+64
-1
lines changed

1 file changed

+64
-1
lines changed

packages/components/nodes/chatmodels/ChatIBMWatsonx/ChatIBMWatsonx.ts

+64-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ class ChatIBMWatsonx_ChatModels implements INode {
2727
constructor() {
2828
this.label = 'ChatIBMWatsonx'
2929
this.name = 'chatIBMWatsonx'
30-
this.version = 1.0
30+
this.version = 2.0
3131
this.type = 'ChatIBMWatsonx'
3232
this.icon = 'ibm.png'
3333
this.category = 'Chat Models'
@@ -75,6 +75,59 @@ class ChatIBMWatsonx_ChatModels implements INode {
7575
step: 1,
7676
optional: true,
7777
additionalParams: true
78+
},
79+
{
80+
label: 'Frequency Penalty',
81+
name: 'frequencyPenalty',
82+
type: 'number',
83+
step: 1,
84+
optional: true,
85+
additionalParams: true,
86+
description:
87+
"Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim."
88+
},
89+
{
90+
label: 'Log Probs',
91+
name: 'logprobs',
92+
type: 'boolean',
93+
default: false,
94+
optional: true,
95+
additionalParams: true,
96+
description:
97+
'Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.'
98+
},
99+
{
100+
label: 'N',
101+
name: 'n',
102+
type: 'number',
103+
step: 1,
104+
default: 1,
105+
optional: true,
106+
additionalParams: true,
107+
description:
108+
'How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.'
109+
},
110+
{
111+
label: 'Presence Penalty',
112+
name: 'presencePenalty',
113+
type: 'number',
114+
step: 1,
115+
default: 1,
116+
optional: true,
117+
additionalParams: true,
118+
description:
119+
"Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics."
120+
},
121+
{
122+
label: 'Top P',
123+
name: 'topP',
124+
type: 'number',
125+
step: 0.1,
126+
default: 0.1,
127+
optional: true,
128+
additionalParams: true,
129+
description:
130+
'An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.'
78131
}
79132
]
80133
}
@@ -84,6 +137,11 @@ class ChatIBMWatsonx_ChatModels implements INode {
84137
const temperature = nodeData.inputs?.temperature as string
85138
const modelName = nodeData.inputs?.modelName as string
86139
const maxTokens = nodeData.inputs?.maxTokens as string
140+
const frequencyPenalty = nodeData.inputs?.frequencyPenalty as string
141+
const logprobs = nodeData.inputs?.logprobs as boolean
142+
const n = nodeData.inputs?.n as string
143+
const presencePenalty = nodeData.inputs?.presencePenalty as string
144+
const topP = nodeData.inputs?.topP as string
87145
const streaming = nodeData.inputs?.streaming as boolean
88146

89147
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
@@ -111,6 +169,11 @@ class ChatIBMWatsonx_ChatModels implements INode {
111169
}
112170
if (cache) obj.cache = cache
113171
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
172+
if (frequencyPenalty) obj.frequencyPenalty = parseInt(frequencyPenalty, 10)
173+
if (logprobs) obj.logprobs = logprobs
174+
if (n) obj.maxTokens = parseInt(n, 10)
175+
if (presencePenalty) obj.presencePenalty = parseInt(presencePenalty, 10)
176+
if (topP) obj.topP = parseFloat(topP)
114177

115178
const model = new ChatWatsonx(obj)
116179
return model

0 commit comments

Comments
 (0)