Skip to content

Commit 61d631c

Browse files
committed
Require messages param in ChatEndpoint
1 parent d026255 commit 61d631c

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

OpenAI_API/Chat/ChatEndpoint.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ public Task<ChatResult> CreateChatAsync(ChatRequest request, int numOutputs = 5)
6666
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
6767
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
6868
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
69-
public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages = null,
69+
public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages,
7070
Model model = null,
7171
double? temperature = null,
7272
double? top_p = null,
@@ -168,7 +168,7 @@ public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(ChatRequest reques
168168
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
169169
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
170170
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
171-
public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(IEnumerable<ChatMessage> messages = null,
171+
public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(IEnumerable<ChatMessage> messages,
172172
Model model = null,
173173
double? temperature = null,
174174
double? top_p = null,

0 commit comments

Comments
 (0)