Skip to content

Commit e987da8

Browse files
committed
Updated nuget packages and added gpt-4-turbo-preview to the model options
1 parent 9e7fd4c commit e987da8

File tree

6 files changed

+9
-7
lines changed

6 files changed

+9
-7
lines changed

.gitignore

+3-1
Original file line numberDiff line numberDiff line change
@@ -287,7 +287,9 @@ node_modules/
287287
# Visual Studio LightSwitch build output
288288
**/*.HTMLClient/GeneratedArtifacts
289289
**/*.DesktopClient/GeneratedArtifacts
290-
**/*.DesktopClient/ModelManifest.xml
290+
**/*.DesktopClient/
291+
292+
ModelManifest.xml
291293
**/*.Server/GeneratedArtifacts
292294
**/*.Server/ModelManifest.xml
293295
_Pvt_Extensions

Chat/Discord/InstructionGPT.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ private async Task<RestGlobalCommand> CreateGlobalCommand()
162162
new SlashCommandOptionBuilder().WithName("max-tokens").WithDescription("Set the maximum tokens")
163163
.WithType(ApplicationCommandOptionType.Integer).WithMinValue(50),
164164
new SlashCommandOptionBuilder().WithName("model").WithDescription("Set the model")
165-
.WithType(ApplicationCommandOptionType.String).AddChoice("gpt-4", "gpt-4").AddChoice("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k")
165+
.WithType(ApplicationCommandOptionType.String).AddChoice("gpt-4-turbo-preview", "gpt-4-turbo-preview").AddChoice("gpt-4", "gpt-4").AddChoice("gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k")
166166
}
167167
});
168168

Dockerfile.discord

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,4 +9,4 @@ RUN yum install -y libicu
99
COPY --from=build /app/publish .
1010
COPY --from=build /src/appSettings.json .
1111
ENV MODEL=""
12-
ENTRYPOINT ./gpt discord --chunk-size=1536 --max-tokens=4096 --model=gpt-4
12+
ENTRYPOINT ./gpt discord --chunk-size=1536 --max-tokens=4096 --model=gpt-4-turbo-preview

OpenAILogic.cs

+2-2
Original file line numberDiff line numberDiff line change
@@ -53,9 +53,9 @@ public async Task<List<double>> GetEmbeddingForPrompt(string prompt)
5353
.Data.First().Embedding;
5454
}
5555

56-
public static int CountTokens(string prompt, string modelName)
56+
public static async Task<int> CountTokensAsync(string prompt, string modelName)
5757
{
58-
var tokenizer = TokenizerBuilder.CreateByModelName(modelName);
58+
var tokenizer = await TokenizerBuilder.CreateByModelNameAsync(modelName);
5959
var encoded = tokenizer.Encode(prompt, new List<string>());
6060
return encoded.Count;
6161
}

Program.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ static async Task Main(string[] args)
3030
var configOption = new Option<string>("--config", () => "appSettings.json", "The path to the appSettings.json config file");
3131

3232
// Add the rest of the available fields as command line parameters
33-
var modelOption = new Option<string>("--model", () => "gpt-3.5-turbo-16k", "The model ID to use.");
33+
var modelOption = new Option<string>("--model", () => "gpt-4-turbo-preview", "The model ID to use.");
3434
var maxTokensOption = new Option<int>("--max-tokens", () => 3584, "The maximum number of tokens to generate in the completion.");
3535
var temperatureOption = new Option<double>("--temperature", "The sampling temperature to use, between 0 and 2");
3636
var topPOption = new Option<double>("--top-p", "The value for nucleus sampling");

Properties/launchSettings.json

+1-1
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
"profiles": {
33
"GPT-CLI": {
44
"commandName": "Project",
5-
"commandLineArgs": "discord --chunk-size=1536 --max-tokens=8000 --max-chat-history-length=3072 --model=gpt-4",
5+
"commandLineArgs": "discord --chunk-size=1536 --max-tokens=8000 --max-chat-history-length=3072 --model=gpt-4-turbo-preview",
66
"workingDirectory": "C:\\projects\\GPT-CLI\\"
77
}
88
}

0 commit comments

Comments
 (0)