Skip to content

Commit f709a5a

Browse files
committed
#49 IMPLEMENT ICompletionEndpoint
1 parent e949bd4 commit f709a5a

File tree

2 files changed

+136
-1
lines changed

2 files changed

+136
-1
lines changed

OpenAI_API/Completions/CompletionEndpoint.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace OpenAI_API.Completions
99
/// <summary>
1010
/// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
1111
/// </summary>
12-
public class CompletionEndpoint : EndpointBase
12+
public class CompletionEndpoint : EndpointBase, ICompletionEndpoint
1313
{
1414
/// <summary>
1515
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
using System;
2+
using System.Collections.Generic;
3+
using System.Threading.Tasks;
4+
using OpenAI_API.Models;
5+
6+
namespace OpenAI_API.Completions
7+
{
8+
/// <summary>
9+
/// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
10+
/// </summary>
11+
public interface ICompletionEndpoint
12+
{
13+
/// <summary>
14+
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
15+
/// </summary>
16+
CompletionRequest DefaultCompletionRequestArgs { get; set; }
17+
18+
/// <summary>
19+
/// Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
20+
/// </summary>
21+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
22+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
23+
Task<CompletionResult> CreateCompletionAsync(CompletionRequest request);
24+
25+
/// <summary>
26+
/// Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
27+
/// </summary>
28+
/// <param name="prompt">The prompt to generate from</param>
29+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
30+
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
31+
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
32+
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
33+
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
34+
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
35+
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
36+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
37+
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
38+
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
39+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
40+
Task<CompletionResult> CreateCompletionAsync(string prompt,
41+
Model model = null,
42+
int? max_tokens = null,
43+
double? temperature = null,
44+
double? top_p = null,
45+
int? numOutputs = null,
46+
double? presencePenalty = null,
47+
double? frequencyPenalty = null,
48+
int? logProbs = null,
49+
bool? echo = null,
50+
params string[] stopSequences
51+
);
52+
53+
/// <summary>
54+
/// Ask the API to complete the prompt(s) using the specified promptes, with other paramets being drawn from default values specified in <see cref="DefaultCompletionRequestArgs"/> if present. This is non-streaming, so it will wait until the API returns the full result.
55+
/// </summary>
56+
/// <param name="prompts">One or more prompts to generate from</param>
57+
/// <returns></returns>
58+
Task<CompletionResult> CreateCompletionAsync(params string[] prompts);
59+
60+
/// <summary>
61+
/// Ask the API to complete the prompt(s) using the specified request and a requested number of outputs. This is non-streaming, so it will wait until the API returns the full result.
62+
/// </summary>
63+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
64+
/// <param name="numOutputs">Overrides <see cref="CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
65+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
66+
Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request, int numOutputs = 5);
67+
68+
/// <summary>
69+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
70+
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamCompletionEnumerableAsync(CompletionRequest)"/> instead.
71+
/// </summary>
72+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
73+
/// <param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
74+
Task StreamCompletionAsync(CompletionRequest request, Action<int, CompletionResult> resultHandler);
75+
76+
/// <summary>
77+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
78+
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamCompletionEnumerableAsync(CompletionRequest)"/> instead.
79+
/// </summary>
80+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
81+
/// <param name="resultHandler">An action to be called as each new result arrives.</param>
82+
Task StreamCompletionAsync(CompletionRequest request, Action<CompletionResult> resultHandler);
83+
84+
/// <summary>
85+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
86+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
87+
/// </summary>
88+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
89+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
90+
IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(CompletionRequest request);
91+
92+
/// <summary>
93+
/// Ask the API to complete the prompt(s) using the specified parameters.
94+
/// Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
95+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
96+
/// </summary>
97+
/// <param name="prompt">The prompt to generate from</param>
98+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
99+
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
100+
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
101+
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
102+
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
103+
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
104+
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
105+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
106+
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
107+
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
108+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
109+
IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(string prompt,
110+
Model model = null,
111+
int? max_tokens = null,
112+
double? temperature = null,
113+
double? top_p = null,
114+
int? numOutputs = null,
115+
double? presencePenalty = null,
116+
double? frequencyPenalty = null,
117+
int? logProbs = null,
118+
bool? echo = null,
119+
params string[] stopSequences);
120+
121+
/// <summary>
122+
/// Simply returns a string of the prompt followed by the best completion
123+
/// </summary>
124+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
125+
/// <returns>A string of the prompt followed by the best completion</returns>
126+
Task<string> CreateAndFormatCompletion(CompletionRequest request);
127+
128+
/// <summary>
129+
/// Simply returns the best completion
130+
/// </summary>
131+
/// <param name="prompt">The prompt to complete</param>
132+
/// <returns>The best completion</returns>
133+
Task<string> GetCompletion(string prompt);
134+
}
135+
}

0 commit comments

Comments
 (0)