Skip to content
This repository was archived by the owner on Nov 13, 2025. It is now read-only.

Commit 2496a98

Browse files
author
Tomasz Juszczak
committed
Added defaults and fixed message formatting for some elements
1 parent 6b5cd46 commit 2496a98

File tree

13 files changed

+287
-154
lines changed

13 files changed

+287
-154
lines changed

Readme.md

Lines changed: 6 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,12 @@ You can start your message with the name of the model without parameters eg:
9090
9191
Will use gpt-3.5-turbo instead of the default gpt-4 model. See GptClient for more aliases.
9292

93+
### Built-in Parameters
94+
There are some built in parameters. Use `/gpt help` to see them. You can modify the parameters in `appsettings.json` file,
95+
or per request.
96+
97+
See GptDefaults.cs for more information about the defaults or `appsettings.Example.json`.
98+
9399
### Custom parameters
94100
You can add you own custom parameters to the bot to minimize the typing for each repated request. To do so, add the it's definition
95101
to the `GptCommands` section in `appsettings.json`. For example:
@@ -115,65 +121,6 @@ usage:
115121

116122
> @GPT-4 -refactor `public class Foo { public void Bar() { Console.WriteLine("Hello World"); } }`
117123

118-
### Parameters
119-
**FAQ: Fine-tuning requests using parameters**
120-
121-
**Q1: How do I use the parameters?**
122-
123-
A1: You can use these parameters at the beginning of your request to fine-tune the model's output. To utilize them, pass
124-
the desired parameter name followed by its value. For example: `-maxTokens 50`. The request should be followed by your
125-
output. eg:
126-
127-
> @GPT-4 -maxTokens 50 How are you today?
128-
129-
Please note that the parameters should be separated by a space. Should be at the beginning of the request. Right after
130-
the bot's name. And should be followed by a space. For parameters that have spaces in them `"`.
131-
132-
**Q2: What does the `-maxTokens` parameter do?**
133-
134-
A2: The `-maxTokens` parameter limits the number of tokens (words or word segments) in the generated output. You can set
135-
this value by passing an integer to the parameter. Example usage: `-maxTokens 100`. Default is 2048 tokens. GPT-3.5 has a
136-
limit of 4000 and GPT-4 has a limit of 8000 tokens.
137-
138-
**Q3: How does the `-temperature` parameter work?**
139-
140-
A3: The `-temperature` parameter controls the randomness of the model's output. Higher values (e.g., 1.0) make the output
141-
more random, while lower values (e.g., 0.1) make it more deterministic. You can set this value by passing a float to the
142-
parameter. Example usage: `-temperature 0.7`. Default is 0.7
143-
144-
**Q4: What is the `-topP` parameter?**
145-
146-
A4: The `-topP` parameter (also known as "nucleus sampling") filters the model's token choices based on cumulative
147-
probability. You can set this value by passing a float between 0 and 1 to the parameter. Lower values make the output
148-
more focused, while higher values allow for more diversity. Example usage: `-topP 0.9`. Default is 1.
149-
150-
**Q5: How do I use the `-presencePenalty` parameter?**
151-
152-
A5: The `-presencePenalty` parameter penalizes tokens that are already present in the generated text. A higher value
153-
discourages repetition, while a lower value allows for more repetition. You can set this value by passing a float to the
154-
parameter. Example usage: `-presencePenalty 0.5`. Default is 0.
155-
156-
**Q6: What does the `-frequencyPenalty` parameter do?**
157-
158-
A6: The `-frequencyPenalty` parameter discourages the use of tokens that appear frequently in the training data. A higher
159-
value will make the output more creative, while a lower value will make it more common. You can set this value by
160-
passing a float to the parameter. Example usage: `-frequencyPenalty 0.3`. Default is 0.
161-
162-
**Q7: What is the -model parameter?**
163-
164-
A7: The -model parameter allows you to specify the name of the model you want to use for generating the output. You can
165-
set this value by passing a string to the parameter. Example usage: `-model "gpt-3.5-turbo"`. Default is gpt-4.
166-
Available models:
167-
- gpt-4
168-
- gpt-3.5-turbo
169-
170-
**Q8: How do I use the -system parameter?**
171-
172-
A8: The -system parameter lets you specify the system message that the model should use. The default message is "You are
173-
a helpful assistant. Today is {Current Date}", but you can use anything you want. Eg
174-
> @GPT-4 -system "You are a Math tutor, your task is to help but not to provide answers so that the student can think
175-
for themselves." I don't know how mutch is 37 * 12, please give me an answer.
176-
177124
## Docker
178125

179126
You can start the docker container with the following command:

Slack-GPT-Socket/ApiSettings.cs

Lines changed: 0 additions & 58 deletions
This file was deleted.

Slack-GPT-Socket/GptApi/GptClient.cs

Lines changed: 51 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ public class GptClient
1515
private readonly OpenAIClient _api;
1616
private readonly GptCustomCommands _customCommands;
1717
private readonly ILogger _log;
18+
private readonly GptDefaults _gptDefaults;
1819

1920
/// <summary>
2021
/// Initializes static members of the <see cref="GptClient" /> class.
@@ -34,7 +35,7 @@ static GptClient()
3435
/// <param name="customCommands">Custom commands handler</param>
3536
/// <param name="log">The logger instance.</param>
3637
/// <param name="settings">The API settings.</param>
37-
public GptClient(GptCustomCommands customCommands, ILogger<GptClient> log, IOptions<ApiSettings> settings)
38+
public GptClient(GptCustomCommands customCommands, ILogger<GptClient> log, IOptions<GptDefaults> gptDefaults, IOptions<ApiSettings> settings)
3839
{
3940
var httpClient = new HttpClient()
4041
{
@@ -43,6 +44,7 @@ public GptClient(GptCustomCommands customCommands, ILogger<GptClient> log, IOpti
4344
_api = new OpenAIClient(settings.Value.OpenAIKey, OpenAIClientSettings.Default, httpClient);
4445
_customCommands = customCommands;
4546
_log = log;
47+
_gptDefaults = gptDefaults.Value;
4648
}
4749

4850
/// <summary>
@@ -55,18 +57,16 @@ public async Task<GptResponse> GeneratePrompt(List<WritableChatPrompt> chatPromp
5557
{
5658
// get the last prompt
5759
var userPrompt = chatPrompts.Last(chatPrompt => chatPrompt.Role == "user");
58-
var prompt = new GptRequest
59-
{
60-
UserId = userId,
61-
Prompt = userPrompt.Content
62-
};
60+
var prompt = GptRequest.Default(_gptDefaults);
61+
prompt.UserId = userId;
62+
prompt.Prompt = userPrompt.Content;
6363

6464
var chatRequest = ParseRequest(chatPrompts, prompt);
6565

6666
try
6767
{
6868
var result = await _api.ChatEndpoint.GetCompletionAsync(chatRequest);
69-
_log.LogInformation("GPT-3 response: {Response}", result.FirstChoice);
69+
_log.LogInformation("GPT response: {Response}", result.FirstChoice);
7070

7171
return new GptResponse
7272
{
@@ -96,7 +96,8 @@ private ChatRequest ParseRequest(List<WritableChatPrompt> chatPrompts, GptReques
9696
{
9797
foreach (var chatPrompt in chatPrompts)
9898
{
99-
var content = new GptRequest { Prompt = chatPrompt.Content };
99+
var content = GptRequest.Default(_gptDefaults);
100+
content.Prompt = chatPrompt.Content;
100101
ResolveModel(ref content);
101102
ResolveParameters(ref content);
102103
chatPrompt.Content = content.Prompt;
@@ -106,12 +107,12 @@ private ChatRequest ParseRequest(List<WritableChatPrompt> chatPrompts, GptReques
106107
ResolveParameters(ref request);
107108

108109
WritableChatPrompt system;
109-
if (request.System != null)
110-
system = new WritableChatPrompt("system", request.System);
110+
if (request.System.ShoudReplace)
111+
system = new WritableChatPrompt("system", request.System.Build());
111112
else
112113
{
113114
system = new WritableChatPrompt("system",
114-
$"You are a helpful assistant. Today is {DateTime.Now:yyyy-MM-ddTHH:mm:ssZ}");
115+
$"You are a helpful assistant. Today is {DateTime.Now:yyyy-MM-ddTHH:mm:ssZ} " + request.System.Build());
115116
}
116117

117118
var requestPrompts = new List<WritableChatPrompt>();
@@ -162,9 +163,13 @@ public static void ResolveModel(ref GptRequest input)
162163
}
163164
}
164165

165-
if (!modelFound)
166+
if (modelFound) return;
167+
168+
var inputModel = input.Model;
169+
// check if current model is valid
170+
if (Models.All(modelInfo => modelInfo.Model != inputModel))
166171
{
167-
// if no match is found, set model property of input to the model of the first item in the models array
172+
// if not, set model property of input to the model of the first item in the models array
168173
input.Model = Models[0].Model;
169174
}
170175
}
@@ -187,14 +192,7 @@ public void ResolveParameters(ref GptRequest input)
187192
var paramValueTrim = match.Groups[2]?.Value.Trim() ?? string.Empty;
188193
var paramValue = paramValueTrim.Trim('"');
189194

190-
var paramNameIndex = inputPrompt.IndexOf(paramName, StringComparison.InvariantCultureIgnoreCase);
191-
var paramEndIndex = paramNameIndex + paramName.Length + paramValueTrim.Length + 2;
192-
193-
if (lastIndex + 5 < paramNameIndex) break;
194-
195-
lastIndex = paramEndIndex;
196-
input.Prompt = input.Prompt.Replace(paramName + " " + paramValueTrim, "").Trim();
197-
195+
bool hasValue = true;
198196
try
199197
{
200198
switch (paramName)
@@ -218,19 +216,49 @@ public void ResolveParameters(ref GptRequest input)
218216
input.Model = paramValue.ToLowerInvariant();
219217
break;
220218
case "-system":
221-
input.System = paramValue;
219+
input.System.Replace(paramValue);
222220
break;
223221
default:
224222
if (_customCommands.TryResolveCommand(paramName, out var prompt))
225223
{
226-
input.Prompt = prompt + "\n" + input.Prompt;
224+
if (prompt.AsSystem)
225+
{
226+
input.System.Append(prompt!.Prompt);
227+
}
228+
else
229+
{
230+
input.Prompt = prompt!.Prompt + "\n" + input.Prompt;
231+
}
232+
hasValue = false;
227233
}
228234
else
229235
{
230236
Console.WriteLine($"Unrecognized parameter: {paramName}");
231237
}
232238
break;
233239
}
240+
241+
// Trim the input Prompt to remove the parameter,
242+
// update last index to check if we've reached the end of the parameters
243+
int paramNameIndex = inputPrompt.IndexOf(paramName, StringComparison.InvariantCultureIgnoreCase);
244+
245+
int paramEndIndex;
246+
string searchString;
247+
if (hasValue)
248+
{
249+
paramEndIndex = paramNameIndex + paramName.Length + paramValueTrim.Length + 2;
250+
searchString = paramName + " " + paramValueTrim;
251+
}
252+
else
253+
{
254+
paramEndIndex = paramNameIndex + paramName.Length + 2;
255+
searchString = paramName + " ";
256+
}
257+
258+
if (lastIndex + 5 < paramNameIndex) break;
259+
260+
lastIndex = paramEndIndex;
261+
input.Prompt = input.Prompt.Replace(searchString, "").Trim();
234262
}
235263
catch (Exception e)
236264
{

Slack-GPT-Socket/GptApi/GptCustomCommands.cs

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,17 @@ public GptCustomCommands(IOptionsMonitor<GptCommands> gptCommands)
2121
/// Checks if the command is a custom command and returns the prompt if it is.
2222
/// </summary>
2323
/// <param name="command">Command to resolve</param>
24-
/// <param name="prompt">Prompt to add to the request</param>
24+
/// <param name="result">GptCommand object</param>
2525
/// <returns>True if found</returns>
26-
public bool TryResolveCommand(string command, out string prompt)
26+
public bool TryResolveCommand(string command, out GptCommand? result)
2727
{
28-
prompt = string.Empty;
28+
result = null;
2929
foreach (var gptCommand in _gptCommands.CurrentValue.Commands)
3030
{
31-
if (gptCommand.Command == command)
32-
{
33-
prompt = gptCommand.Prompt;
34-
return true;
35-
}
31+
if (gptCommand.Command != command) continue;
32+
33+
result = gptCommand;
34+
return true;
3635
}
3736

3837
return false;

Slack-GPT-Socket/GptApi/GptRequest.cs

Lines changed: 31 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,36 @@
55
/// </summary>
66
public class GptRequest
77
{
8+
/// <summary>
9+
/// Creates a new GPT request with default values.
10+
/// </summary>
11+
/// <param name="defaults"></param>
12+
/// <returns></returns>
13+
public static GptRequest Default(GptDefaults defaults)
14+
{
15+
var request = new GptRequest();
16+
if (defaults.MaxTokens.HasValue)
17+
request.MaxTokens = defaults.MaxTokens.Value;
18+
if (defaults.Temperature.HasValue)
19+
request.Temperature = defaults.Temperature.Value;
20+
if (defaults.TopP.HasValue)
21+
request.TopP = defaults.TopP.Value;
22+
if (defaults.PresencePenalty.HasValue)
23+
request.PresencePenalty = defaults.PresencePenalty.Value;
24+
if (defaults.FrequencyPenalty.HasValue)
25+
request.FrequencyPenalty = defaults.FrequencyPenalty.Value;
26+
if (defaults.Model != null)
27+
request.Model = defaults.Model;
28+
if (defaults.System != null)
29+
request.System.Replace(defaults.System);
30+
return request;
31+
}
32+
33+
/// <summary>
34+
/// Hide the default constructor.
35+
/// </summary>
36+
private GptRequest(){}
37+
838
/// <summary>
939
/// Gets or sets the user identifier.
1040
/// </summary>
@@ -48,5 +78,5 @@ public class GptRequest
4878
/// <summary>
4979
/// Gets or sets the system identifier (optional).
5080
/// </summary>
51-
public string? System { get; set; }
81+
public GptSystemMessageBuilder System { get; set; } = new();
5282
}

0 commit comments

Comments
 (0)