Skip to content

Commit d026255

Browse files
committed
Make Messages IEnumerable instead of List
1 parent 477105a commit d026255

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

OpenAI_API/Chat/ChatEndpoint.cs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ public Task<ChatResult> CreateChatAsync(ChatRequest request, int numOutputs = 5)
6666
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
6767
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
6868
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
69-
public Task<ChatResult> CreateChatAsync(List<ChatMessage> messages = null,
69+
public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages = null,
7070
Model model = null,
7171
double? temperature = null,
7272
double? top_p = null,
@@ -98,7 +98,7 @@ public Task<ChatResult> CreateChatAsync(List<ChatMessage> messages = null,
9898
/// </summary>
9999
/// <param name="messages">The messages to use in the generation.</param>
100100
/// <returns></returns>
101-
public Task<ChatResult> CreateChatAsync(List<ChatMessage> messages)
101+
public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages)
102102
{
103103
ChatRequest request = new ChatRequest(DefaultChatRequestArgs)
104104
{
@@ -168,7 +168,7 @@ public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(ChatRequest reques
168168
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
169169
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
170170
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
171-
public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(List<ChatMessage> messages = null,
171+
public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(IEnumerable<ChatMessage> messages = null,
172172
Model model = null,
173173
double? temperature = null,
174174
double? top_p = null,

OpenAI_API/Chat/ChatRequest.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ public class ChatRequest
2222
/// The messages to send with this Chat Request
2323
/// </summary>
2424
[JsonProperty("messages")]
25-
public List<ChatMessage> Messages { get; set; }
25+
public IEnumerable<ChatMessage> Messages { get; set; }
2626

2727
/// <summary>
2828
/// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <see cref="TopP"/> but not both.

0 commit comments

Comments
 (0)