Skip to content

Commit 1adcae8

Browse files
authored
Merge pull request #57 from pandapknaepel/feature-interfaces
Implement Interfaces for endpoint and api classes Closes #49
2 parents 30aca80 + b70077e commit 1adcae8

File tree

10 files changed

+304
-5
lines changed

10 files changed

+304
-5
lines changed

OpenAI_API/Completions/CompletionEndpoint.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace OpenAI_API.Completions
99
/// <summary>
1010
/// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
1111
/// </summary>
12-
public class CompletionEndpoint : EndpointBase
12+
public class CompletionEndpoint : EndpointBase, ICompletionEndpoint
1313
{
1414
/// <summary>
1515
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
using System;
2+
using System.Collections.Generic;
3+
using System.Threading.Tasks;
4+
using OpenAI_API.Models;
5+
6+
namespace OpenAI_API.Completions
7+
{
8+
/// <summary>
9+
/// Text generation is the core function of the API. You give the API a prompt, and it generates a completion. The way you “program” the API to do a task is by simply describing the task in plain english or providing a few written examples. This simple approach works for a wide range of use cases, including summarization, translation, grammar correction, question answering, chatbots, composing emails, and much more (see the prompt library for inspiration).
10+
/// </summary>
11+
public interface ICompletionEndpoint
12+
{
13+
/// <summary>
14+
/// This allows you to set default parameters for every request, for example to set a default temperature or max tokens. For every request, if you do not have a parameter set on the request but do have it set here as a default, the request will automatically pick up the default value.
15+
/// </summary>
16+
CompletionRequest DefaultCompletionRequestArgs { get; set; }
17+
18+
/// <summary>
19+
/// Ask the API to complete the prompt(s) using the specified request. This is non-streaming, so it will wait until the API returns the full result.
20+
/// </summary>
21+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
22+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
23+
Task<CompletionResult> CreateCompletionAsync(CompletionRequest request);
24+
25+
/// <summary>
26+
/// Ask the API to complete the prompt(s) using the specified parameters. This is non-streaming, so it will wait until the API returns the full result. Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
27+
/// </summary>
28+
/// <param name="prompt">The prompt to generate from</param>
29+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
30+
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
31+
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
32+
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
33+
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
34+
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
35+
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
36+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
37+
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
38+
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
39+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions.</returns>
40+
Task<CompletionResult> CreateCompletionAsync(string prompt,
41+
Model model = null,
42+
int? max_tokens = null,
43+
double? temperature = null,
44+
double? top_p = null,
45+
int? numOutputs = null,
46+
double? presencePenalty = null,
47+
double? frequencyPenalty = null,
48+
int? logProbs = null,
49+
bool? echo = null,
50+
params string[] stopSequences
51+
);
52+
53+
/// <summary>
54+
/// Ask the API to complete the prompt(s) using the specified promptes, with other paramets being drawn from default values specified in <see cref="DefaultCompletionRequestArgs"/> if present. This is non-streaming, so it will wait until the API returns the full result.
55+
/// </summary>
56+
/// <param name="prompts">One or more prompts to generate from</param>
57+
/// <returns></returns>
58+
Task<CompletionResult> CreateCompletionAsync(params string[] prompts);
59+
60+
/// <summary>
61+
/// Ask the API to complete the prompt(s) using the specified request and a requested number of outputs. This is non-streaming, so it will wait until the API returns the full result.
62+
/// </summary>
63+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
64+
/// <param name="numOutputs">Overrides <see cref="CompletionRequest.NumChoicesPerPrompt"/> as a convenience.</param>
65+
/// <returns>Asynchronously returns the completion result. Look in its <see cref="CompletionResult.Completions"/> property for the completions, which should have a length equal to <paramref name="numOutputs"/>.</returns>
66+
Task<CompletionResult> CreateCompletionsAsync(CompletionRequest request, int numOutputs = 5);
67+
68+
/// <summary>
69+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
70+
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamCompletionEnumerableAsync(CompletionRequest)"/> instead.
71+
/// </summary>
72+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
73+
/// <param name="resultHandler">An action to be called as each new result arrives, which includes the index of the result in the overall result set.</param>
74+
Task StreamCompletionAsync(CompletionRequest request, Action<int, CompletionResult> resultHandler);
75+
76+
/// <summary>
77+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results to the <paramref name="resultHandler"/> as they come in.
78+
/// If you are on the latest C# supporting async enumerables, you may prefer the cleaner syntax of <see cref="StreamCompletionEnumerableAsync(CompletionRequest)"/> instead.
79+
/// </summary>
80+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
81+
/// <param name="resultHandler">An action to be called as each new result arrives.</param>
82+
Task StreamCompletionAsync(CompletionRequest request, Action<CompletionResult> resultHandler);
83+
84+
/// <summary>
85+
/// Ask the API to complete the prompt(s) using the specified request, and stream the results as they come in.
86+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
87+
/// </summary>
88+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
89+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams"/> for more details on how to consume an async enumerable.</returns>
90+
IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(CompletionRequest request);
91+
92+
/// <summary>
93+
/// Ask the API to complete the prompt(s) using the specified parameters.
94+
/// Any non-specified parameters will fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/> if present.
95+
/// If you are not using C# 8 supporting async enumerables or if you are using the .NET Framework, you may need to use <see cref="StreamCompletionAsync(CompletionRequest, Action{CompletionResult})"/> instead.
96+
/// </summary>
97+
/// <param name="prompt">The prompt to generate from</param>
98+
/// <param name="model">The model to use. You can use <see cref="ModelsEndpoint.GetModelsAsync()"/> to see all of your available models, or use a standard model like <see cref="Model.DavinciText"/>.</param>
99+
/// <param name="max_tokens">How many tokens to complete to. Can return fewer if a stop sequence is hit.</param>
100+
/// <param name="temperature">What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. It is generally recommend to use this or <paramref name="top_p"/> but not both.</param>
101+
/// <param name="top_p">An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. It is generally recommend to use this or <paramref name="temperature"/> but not both.</param>
102+
/// <param name="numOutputs">How many different choices to request for each prompt.</param>
103+
/// <param name="presencePenalty">The scale of the penalty applied if a token is already present at all. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
104+
/// <param name="frequencyPenalty">The scale of the penalty for how often a token is used. Should generally be between 0 and 1, although negative numbers are allowed to encourage token reuse.</param>
105+
/// <param name="logProbs">Include the log probabilities on the logprobs most likely tokens, which can be found in <see cref="CompletionResult.Completions"/> -> <see cref="Choice.Logprobs"/>. So for example, if logprobs is 10, the API will return a list of the 10 most likely tokens. If logprobs is supplied, the API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.</param>
106+
/// <param name="echo">Echo back the prompt in addition to the completion.</param>
107+
/// <param name="stopSequences">One or more sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.</param>
108+
/// <returns>An async enumerable with each of the results as they come in. See <see href="https://docs.microsoft.com/en-us/dotnet/csharp/whats-new/csharp-8#asynchronous-streams">the C# docs</see> for more details on how to consume an async enumerable.</returns>
109+
IAsyncEnumerable<CompletionResult> StreamCompletionEnumerableAsync(string prompt,
110+
Model model = null,
111+
int? max_tokens = null,
112+
double? temperature = null,
113+
double? top_p = null,
114+
int? numOutputs = null,
115+
double? presencePenalty = null,
116+
double? frequencyPenalty = null,
117+
int? logProbs = null,
118+
bool? echo = null,
119+
params string[] stopSequences);
120+
121+
/// <summary>
122+
/// Simply returns a string of the prompt followed by the best completion
123+
/// </summary>
124+
/// <param name="request">The request to send to the API. This does not fall back to default values specified in <see cref="DefaultCompletionRequestArgs"/>.</param>
125+
/// <returns>A string of the prompt followed by the best completion</returns>
126+
Task<string> CreateAndFormatCompletion(CompletionRequest request);
127+
128+
/// <summary>
129+
/// Simply returns the best completion
130+
/// </summary>
131+
/// <param name="prompt">The prompt to complete</param>
132+
/// <returns>The best completion</returns>
133+
Task<string> GetCompletion(string prompt);
134+
}
135+
}

OpenAI_API/Embedding/EmbeddingEndpoint.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ namespace OpenAI_API.Embedding
66
/// <summary>
77
/// OpenAI’s text embeddings measure the relatedness of text strings by generating an embedding, which is a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
88
/// </summary>
9-
public class EmbeddingEndpoint : EndpointBase
9+
public class EmbeddingEndpoint : EndpointBase, IEmbeddingEndpoint
1010
{
1111
/// <summary>
1212
/// This allows you to send request to the recommended model without needing to specify. Every request uses the <see cref="Model.AdaTextEmbedding"/> model
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
using System.Threading.Tasks;
2+
3+
namespace OpenAI_API.Embedding
4+
{
5+
/// <summary>
6+
/// OpenAI’s text embeddings measure the relatedness of text strings by generating an embedding, which is a vector (list) of floating point numbers. The distance between two vectors measures their relatedness. Small distances suggest high relatedness and large distances suggest low relatedness.
7+
/// </summary>
8+
public interface IEmbeddingEndpoint
9+
{
10+
/// <summary>
11+
/// This allows you to send request to the recommended model without needing to specify. Every request uses the <see cref="Model.AdaTextEmbedding"/> model
12+
/// </summary>
13+
EmbeddingRequest DefaultEmbeddingRequestArgs { get; set; }
14+
15+
/// <summary>
16+
/// Ask the API to embedd text using the default embedding model <see cref="Model.AdaTextEmbedding"/>
17+
/// </summary>
18+
/// <param name="input">Text to be embedded</param>
19+
/// <returns>Asynchronously returns the embedding result. Look in its <see cref="Data.Embedding"/> property of <see cref="EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
20+
Task<EmbeddingResult> CreateEmbeddingAsync(string input);
21+
22+
/// <summary>
23+
/// Ask the API to embedd text using a custom request
24+
/// </summary>
25+
/// <param name="request">Request to be send</param>
26+
/// <returns>Asynchronously returns the embedding result. Look in its <see cref="Data.Embedding"/> property of <see cref="EmbeddingResult.Data"/> to find the vector of floating point numbers</returns>
27+
Task<EmbeddingResult> CreateEmbeddingAsync(EmbeddingRequest request);
28+
29+
/// <summary>
30+
/// Ask the API to embedd text using the default embedding model <see cref="Model.AdaTextEmbedding"/>
31+
/// </summary>
32+
/// <param name="input">Text to be embedded</param>
33+
/// <returns>Asynchronously returns the first embedding result as an array of floats.</returns>
34+
Task<float[]> GetEmbeddingsAsync(string input);
35+
}
36+
}

OpenAI_API/Files/FilesEndpoint.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace OpenAI_API.Files
99
/// <summary>
1010
/// The API endpoint for operations List, Upload, Delete, Retrieve files
1111
/// </summary>
12-
public class FilesEndpoint : EndpointBase
12+
public class FilesEndpoint : EndpointBase, IFilesEndpoint
1313
{
1414
/// <summary>
1515
/// Constructor of the api endpoint. Rather than instantiating this yourself, access it through an instance of <see cref="OpenAIAPI"/> as <see cref="OpenAIAPI.Files"/>.

OpenAI_API/Files/IFilesEndpoint.cs

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
using System.Collections.Generic;
2+
using System.Threading.Tasks;
3+
4+
namespace OpenAI_API.Files
5+
{
6+
/// <summary>
7+
/// The API endpoint for operations List, Upload, Delete, Retrieve files
8+
/// </summary>
9+
public interface IFilesEndpoint
10+
{
11+
/// <summary>
12+
/// Get the list of all files
13+
/// </summary>
14+
/// <returns></returns>
15+
/// <exception cref="HttpRequestException"></exception>
16+
Task<List<File>> GetFilesAsync();
17+
18+
/// <summary>
19+
/// Returns information about a specific file
20+
/// </summary>
21+
/// <param name="fileId">The ID of the file to use for this request</param>
22+
/// <returns></returns>
23+
Task<File> GetFileAsync(string fileId);
24+
25+
/// <summary>
26+
/// Returns the contents of the specific file as string
27+
/// </summary>
28+
/// <param name="fileId">The ID of the file to use for this request</param>
29+
/// <returns></returns>
30+
Task<string> GetFileContentAsStringAsync(string fileId);
31+
32+
/// <summary>
33+
/// Delete a file
34+
/// </summary>
35+
/// <param name="fileId">The ID of the file to use for this request</param>
36+
/// <returns></returns>
37+
Task<File> DeleteFileAsync(string fileId);
38+
39+
/// <summary>
40+
/// Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact OpenAI if you need to increase the storage limit
41+
/// </summary>
42+
/// <param name="filePath">The name of the file to use for this request</param>
43+
/// <param name="purpose">The intendend purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.</param>
44+
Task<File> UploadFileAsync(string filePath, string purpose = "fine-tune");
45+
}
46+
}

0 commit comments

Comments
 (0)