Skip to content

Commit 19824bf

Browse files
author
Martin Andrews
committed
Moved APIkey helper text, and fixed linter complaints
1 parent e5167f3 commit 19824bf

File tree

2 files changed

+32
-29
lines changed

2 files changed

+32
-29
lines changed

packages/components/credentials/GoogleMakerSuite.credential.ts

+3
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,15 @@ class GoogleMakerSuite implements INodeCredential {
44
label: string
55
name: string
66
version: number
7+
description: string
78
inputs: INodeParams[]
89

910
constructor() {
1011
this.label = 'Google MakerSuite'
1112
this.name = 'googleMakerSuite'
1213
this.version = 1.0
14+
this.description =
15+
'Use the <a target="_blank" href="https://makersuite.google.com/app/apikey">Google MakerSuite API credential site</a> to get this key.'
1316
this.inputs = [
1417
{
1518
label: 'MakerSuite API Key',

packages/components/nodes/llms/GooglePaLM/GooglePaLM.ts

+29-29
Original file line numberDiff line numberDiff line change
@@ -27,12 +27,9 @@ class GooglePaLM_LLMs implements INode {
2727
label: 'Connect Credential',
2828
name: 'credential',
2929
type: 'credential',
30-
credentialNames: ['googleMakerSuite'],
31-
description:
32-
'Google MakerSuite API credential. Get this from https://makersuite.google.com/app/apikey'
33-
30+
credentialNames: ['googleMakerSuite']
3431
}
35-
this.inputs = [
32+
this.inputs = [
3633
{
3734
label: 'Model Name',
3835
name: 'modelName',
@@ -52,12 +49,13 @@ class GooglePaLM_LLMs implements INode {
5249
type: 'number',
5350
step: 0.1,
5451
default: 0.7,
55-
optional: true,
56-
description: "Controls the randomness of the output.\n"+
57-
"Values can range from [0.0,1.0], inclusive. A value closer to 1.0 "+
58-
"will produce responses that are more varied and creative, while"+
59-
"a value closer to 0.0 will typically result in more straightforward"+
60-
"responses from the model."
52+
optional: true,
53+
description:
54+
'Controls the randomness of the output.\n' +
55+
'Values can range from [0.0,1.0], inclusive. A value closer to 1.0 ' +
56+
'will produce responses that are more varied and creative, while ' +
57+
'a value closer to 0.0 will typically result in more straightforward ' +
58+
'responses from the model.'
6159
},
6260
{
6361
label: 'Max Output Tokens',
@@ -66,21 +64,22 @@ class GooglePaLM_LLMs implements INode {
6664
step: 1,
6765
optional: true,
6866
additionalParams: true,
69-
description: "Maximum number of tokens to generate in the completion."
67+
description: 'Maximum number of tokens to generate in the completion.'
7068
},
7169
{
7270
label: 'Top Probability',
7371
name: 'topP',
7472
type: 'number',
7573
step: 0.1,
7674
optional: true,
77-
additionalParams: true,
78-
description: "Top-p changes how the model selects tokens for output.\n"+
79-
"Tokens are selected from most probable to least until "+
80-
"the sum of their probabilities equals the top-p value.\n"+
81-
"For example, if tokens A, B, and C have a probability of .3, .2, and .1 "+
82-
"and the top-p value is .5, then the model will select either A or B "+
83-
"as the next token (using temperature)."
75+
additionalParams: true,
76+
description:
77+
'Top-p changes how the model selects tokens for output.\n' +
78+
'Tokens are selected from most probable to least until ' +
79+
'the sum of their probabilities equals the top-p value.\n' +
80+
'For example, if tokens A, B, and C have a probability of .3, .2, and .1 ' +
81+
'and the top-p value is .5, then the model will select either A or B ' +
82+
'as the next token (using temperature).'
8483
},
8584
{
8685
label: 'Top-k',
@@ -89,11 +88,12 @@ class GooglePaLM_LLMs implements INode {
8988
step: 1,
9089
optional: true,
9190
additionalParams: true,
92-
description: "Top-k changes how the model selects tokens for output.\n"+
93-
"A top-k of 1 means the selected token is the most probable among "+
94-
"all tokens in the model’s vocabulary (also called greedy decoding), "+
95-
"while a top-k of 3 means that the next token is selected from "+
96-
"among the 3 most probable tokens (using temperature)."
91+
description:
92+
'Top-k changes how the model selects tokens for output.\n' +
93+
'A top-k of 1 means the selected token is the most probable among ' +
94+
'all tokens in the model vocabulary (also called greedy decoding), ' +
95+
'while a top-k of 3 means that the next token is selected from ' +
96+
'among the 3 most probable tokens (using temperature).'
9797
},
9898
{
9999
label: 'Stop Sequences',
@@ -102,11 +102,11 @@ class GooglePaLM_LLMs implements INode {
102102
optional: true,
103103
additionalParams: true
104104
//default: { list:[] },
105-
//description:
106-
// "The 'list' field should contain a list of character strings (up to 5) that will stop output generation.\n"+
107-
// " * If specified, the API will stop at the first appearance of a stop sequence.\n"+
108-
// "Note: The stop sequence will not be included as part of the response."
109-
}
105+
//description:
106+
// 'The "list" field should contain a list of character strings (up to 5) that will stop output generation.\n' +
107+
// ' * If specified, the API will stop at the first appearance of a stop sequence.\n' +
108+
// 'Note: The stop sequence will not be included as part of the response.'
109+
}
110110
/*
111111
{
112112
label: 'Safety Settings',

0 commit comments

Comments
 (0)