Skip to content

Commit c81ac58

Browse files
committed
Chat endpoint improvements, tests, readme updates, and an alternate Conversation class
1 parent 057918f commit c81ac58

File tree

7 files changed

+607
-113
lines changed

7 files changed

+607
-113
lines changed

OpenAI_API/Chat/ChatEndpoint.cs

Lines changed: 31 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
using OpenAI_API.Models;
22
using System;
33
using System.Collections.Generic;
4+
using System.Linq;
45
using System.Net.Http;
56
using System.Text;
67
using System.Threading.Tasks;
@@ -10,8 +11,8 @@ namespace OpenAI_API.Chat
1011
/// <summary>
1112
/// ChatGPT API endpoint. Use this endpoint to send multiple messages and carry on a conversation.
1213
/// </summary>
13-
public class ChatEndpoint : EndpointBase
14-
{
14+
public class ChatEndpoint : EndpointBase
15+
{
1516
/// <summary>
1617
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
1718
/// </summary>
@@ -28,14 +29,23 @@ public class ChatEndpoint : EndpointBase
2829
/// <param name="api"></param>
2930
internal ChatEndpoint(OpenAIAPI api) : base(api) { }
3031

32+
/// <summary>
33+
/// Creates an ongoing chat which can easily encapsulate the conversation. This is the simplest way to use the Chat endpoint.
34+
/// </summary>
35+
/// <returns></returns>
36+
public Conversation CreateConversation()
37+
{
38+
return new Conversation(this, defaultChatRequestArgs: DefaultChatRequestArgs);
39+
}
40+
3141
#region Non-streaming
3242

3343
/// <summary>
3444
/// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
3545
/// </summary>
3646
/// <param name="request">The request to send to the API.</param>
3747
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
38-
public async Task<ChatResult> CreateChatAsync(ChatRequest request)
48+
public async Task<ChatResult> CreateChatCompletionAsync(ChatRequest request)
3949
{
4050
return await HttpPost<ChatResult>(postData: request);
4151
}
@@ -46,10 +56,10 @@ public async Task<ChatResult> CreateChatAsync(ChatRequest request)
4656
/// <param name="request">The request to send to the API.</param>
4757
/// <param name="numOutputs">Overrides <see cref="ChatRequest.NumChoicesPerMessage"/> as a convenience.</param>
4858
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
49-
public Task<ChatResult> CreateChatAsync(ChatRequest request, int numOutputs = 5)
59+
public Task<ChatResult> CreateChatCompletionAsync(ChatRequest request, int numOutputs = 5)
5060
{
5161
request.NumChoicesPerMessage = numOutputs;
52-
return CreateChatAsync(request);
62+
return CreateChatCompletionAsync(request);
5363
}
5464

5565
/// <summary>
@@ -66,15 +76,15 @@ public Task<ChatResult> CreateChatAsync(ChatRequest request, int numOutputs = 5)
6676
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
6777
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
6878
/// <returns>Asynchronously returns the completion result. Look in its <see cref="ChatResult.Choices"/> property for the results.</returns>
69-
public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages,
79+
public Task<ChatResult> CreateChatCompletionAsync(IList<ChatMessage> messages,
7080
Model model = null,
7181
double? temperature = null,
7282
double? top_p = null,
7383
int? numOutputs = null,
7484
int? max_tokens = null,
7585
double? frequencyPenalty = null,
7686
double? presencePenalty = null,
77-
IReadOnlyDictionary<string, float> logitBias = null,
87+
IReadOnlyDictionary<string, float> logitBias = null,
7888
params string[] stopSequences)
7989
{
8090
ChatRequest request = new ChatRequest(DefaultChatRequestArgs)
@@ -90,23 +100,30 @@ public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages,
90100
PresencePenalty = presencePenalty ?? DefaultChatRequestArgs.PresencePenalty,
91101
LogitBias = logitBias ?? DefaultChatRequestArgs.LogitBias
92102
};
93-
return CreateChatAsync(request);
103+
return CreateChatCompletionAsync(request);
94104
}
95105

96106
/// <summary>
97-
/// Ask the API to complete the request using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
107+
/// Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
98108
/// </summary>
99109
/// <param name="messages">The messages to use in the generation.</param>
100-
/// <returns></returns>
101-
public Task<ChatResult> CreateChatAsync(IEnumerable<ChatMessage> messages)
110+
/// <returns>The <see cref="ChatResult"/> with the API response.</returns>
111+
public Task<ChatResult> CreateChatCompletionAsync(params ChatMessage[] messages)
102112
{
103113
ChatRequest request = new ChatRequest(DefaultChatRequestArgs)
104114
{
105115
Messages = messages
106116
};
107-
return CreateChatAsync(request);
117+
return CreateChatCompletionAsync(request);
108118
}
109119

120+
/// <summary>
121+
/// Ask the API to complete the request using the specified message(s). Any parameters will fall back to default values specified in <see cref="DefaultChatRequestArgs"/> if present.
122+
/// </summary>
123+
/// <param name="userMessages">The user message or messages to use in the generation. All strings are assumed to be of Role <see cref="ChatMessageRole.User"/></param>
124+
/// <returns>The <see cref="ChatResult"/> with the API response.</returns>
125+
public Task<ChatResult> CreateChatCompletionAsync(params string[] userMessages) => CreateChatCompletionAsync(userMessages.Select(m => new ChatMessage(ChatMessageRole.User, m)).ToArray());
126+
110127
#endregion
111128

112129
#region Streaming
@@ -168,15 +185,15 @@ public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(ChatRequest reques
168185
/// <param name="logitBias">Maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.</param>
169186
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
170187
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
171-
public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(IEnumerable<ChatMessage> messages,
188+
public IAsyncEnumerable<ChatResult> StreamChatEnumerableAsync(IList<ChatMessage> messages,
172189
Model model = null,
173190
double? temperature = null,
174191
double? top_p = null,
175192
int? numOutputs = null,
176193
int? max_tokens = null,
177194
double? frequencyPenalty = null,
178195
double? presencePenalty = null,
179-
IReadOnlyDictionary<string, float> logitBias = null,
196+
IReadOnlyDictionary<string, float> logitBias = null,
180197
params string[] stopSequences)
181198
{
182199
ChatRequest request = new ChatRequest(DefaultChatRequestArgs)

OpenAI_API/Chat/ChatMessageRole.cs

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
using Newtonsoft.Json;
2+
using System;
3+
using System.Collections.Generic;
4+
using System.ComponentModel.Design;
5+
using System.Text;
6+
7+
namespace OpenAI_API.Chat
8+
{
9+
/// <summary>
10+
/// Represents the Role of a <see cref="ChatMessage"/>. Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages. See <see href="https://platform.openai.com/docs/guides/chat/introduction">the OpenAI docs</see> for more details about usage.
11+
/// </summary>
12+
public class ChatMessageRole : IEquatable<ChatMessageRole>
13+
{
14+
/// <summary>
15+
/// Contructor is prvate to force usage of strongly typed values
16+
/// </summary>
17+
/// <param name="value"></param>
18+
private ChatMessageRole(string value) { Value = value; }
19+
20+
/// <summary>
21+
/// Gets the singleton instance of <see cref="ChatMessageRole"/> based on the string value.
22+
/// </summary>
23+
/// <param name="roleName">Muse be one of "system", "user", or "assistant"</param>
24+
/// <returns></returns>
25+
public static ChatMessageRole FromString(string roleName)
26+
{
27+
switch (roleName)
28+
{
29+
case "system":
30+
return ChatMessageRole.System;
31+
case "user":
32+
return ChatMessageRole.User;
33+
case "assistant":
34+
return ChatMessageRole.Assistant;
35+
default:
36+
return null;
37+
}
38+
}
39+
40+
private string Value { get; set; }
41+
42+
/// <summary>
43+
/// The system message helps set the behavior of the assistant.
44+
/// </summary>
45+
public static ChatMessageRole System { get { return new ChatMessageRole("system"); } }
46+
/// <summary>
47+
/// The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
48+
/// </summary>
49+
public static ChatMessageRole User { get { return new ChatMessageRole("user"); } }
50+
/// <summary>
51+
/// The assistant messages help store prior responses. They can also be written by a developer to help give examples of desired behavior.
52+
/// </summary>
53+
public static ChatMessageRole Assistant { get { return new ChatMessageRole("assistant"); } }
54+
55+
/// <summary>
56+
/// Gets the string value for this role to pass to the API
57+
/// </summary>
58+
/// <returns>The size as a string</returns>
59+
public override string ToString()
60+
{
61+
return Value;
62+
}
63+
64+
/// <summary>
65+
/// Determines whether this instance and a specified object have the same value.
66+
/// </summary>
67+
/// <param name="obj">The ChatMessageRole to compare to this instance</param>
68+
/// <returns>true if obj is a ChatMessageRole and its value is the same as this instance; otherwise, false. If obj is null, the method returns false</returns>
69+
public override bool Equals(object obj)
70+
{
71+
return Value.Equals((obj as ChatMessageRole).Value);
72+
}
73+
74+
/// <summary>
75+
/// Returns the hash code for this object
76+
/// </summary>
77+
/// <returns>A 32-bit signed integer hash code</returns>
78+
public override int GetHashCode()
79+
{
80+
return Value.GetHashCode();
81+
}
82+
83+
/// <summary>
84+
/// Determines whether this instance and a specified object have the same value.
85+
/// </summary>
86+
/// <param name="other">The ChatMessageRole to compare to this instance</param>
87+
/// <returns>true if other's value is the same as this instance; otherwise, false. If other is null, the method returns false</returns>
88+
public bool Equals(ChatMessageRole other)
89+
{
90+
return Value.Equals(other.Value);
91+
}
92+
93+
/// <summary>
94+
/// Gets the string value for this role to pass to the API
95+
/// </summary>
96+
/// <param name="value">The ChatMessageRole to convert</param>
97+
public static implicit operator String(ChatMessageRole value) { return value; }
98+
99+
///// <summary>
100+
///// Used during the Json serialization process
101+
///// </summary>
102+
//internal class ChatMessageRoleJsonConverter : JsonConverter<ChatMessageRole>
103+
//{
104+
// public override void WriteJson(JsonWriter writer, ChatMessageRole value, JsonSerializer serializer)
105+
// {
106+
// writer.WriteValue(value.ToString());
107+
// }
108+
109+
// public override ChatMessageRole ReadJson(JsonReader reader, Type objectType, ChatMessageRole existingValue, bool hasExistingValue, JsonSerializer serializer)
110+
// {
111+
// if (reader.TokenType != JsonToken.String)
112+
// {
113+
// throw new JsonSerializationException();
114+
// }
115+
// return new ChatMessageRole(reader.ReadAsString());
116+
// }
117+
//}
118+
}
119+
}

OpenAI_API/Chat/ChatRequest.cs

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ public class ChatRequest
2222
/// The messages to send with this Chat Request
2323
/// </summary>
2424
[JsonProperty("messages")]
25-
public IEnumerable<ChatMessage> Messages { get; set; }
25+
public IList<ChatMessage> Messages { get; set; }
2626

2727
/// <summary>
2828
/// What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <see cref="TopP"/> but not both.
@@ -52,7 +52,7 @@ public class ChatRequest
5252
/// This is only used for serializing the request into JSON, do not use it directly.
5353
/// </summary>
5454
[JsonProperty("stop")]
55-
public object CompiledStop
55+
internal object CompiledStop
5656
{
5757
get
5858
{
@@ -109,9 +109,9 @@ public string StopSequence
109109
/// Accepts a json object that maps tokens(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
110110
/// Mathematically, the bias is added to the logits generated by the model prior to sampling.
111111
/// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
112-
/// </summary>
112+
/// </summary>
113113
[JsonProperty("logit_bias")]
114-
public IReadOnlyDictionary<string, float> LogitBias { get; set; }
114+
public IReadOnlyDictionary<string, float> LogitBias { get; set; }
115115

116116
/// <summary>
117117
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
@@ -123,16 +123,17 @@ public string StopSequence
123123
/// Creates a new, empty <see cref="ChatRequest"/>
124124
/// </summary>
125125
public ChatRequest()
126-
{
127-
this.Model = OpenAI_API.Models.Model.ChatGPTTurbo;
128-
}
126+
{ }
129127

130128
/// <summary>
131129
/// Create a new chat request using the data from the input chat request.
132130
/// </summary>
133131
/// <param name="basedOn"></param>
134132
public ChatRequest(ChatRequest basedOn)
135-
{
133+
{
134+
if (basedOn == null)
135+
return;
136+
136137
this.Model = basedOn.Model;
137138
this.Messages = basedOn.Messages;
138139
this.Temperature = basedOn.Temperature;

0 commit comments

Comments
 (0)