|
| 1 | +namespace Cnblogs.DashScope.Sdk.BaiChuan; |
| 2 | + |
| 3 | +/// <summary> |
| 4 | +/// BaiChuan LLM generation apis, doc: https://help.aliyun.com/zh/dashscope/developer-reference/api-details-2 |
| 5 | +/// </summary> |
| 6 | +public static class BaiChuanTextGenerationApi |
| 7 | +{ |
| 8 | + /// <summary> |
| 9 | + /// Get text completion from baichuan model. |
| 10 | + /// </summary> |
| 11 | + /// <param name="client">The <see cref="IDashScopeClient"/>.</param> |
| 12 | + /// <param name="llm">The llm to use.</param> |
| 13 | + /// <param name="prompt">The prompt to generate completion from.</param> |
| 14 | + /// <returns></returns> |
| 15 | + public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetBaiChuanTextCompletionAsync( |
| 16 | + this IDashScopeClient client, |
| 17 | + BaiChuanLlm llm, |
| 18 | + string prompt) |
| 19 | + { |
| 20 | + return client.GetBaiChuanTextCompletionAsync(llm.GetModelName(), prompt); |
| 21 | + } |
| 22 | + |
| 23 | + /// <summary> |
| 24 | + /// Get text completion from baichuan model. |
| 25 | + /// </summary> |
| 26 | + /// <param name="client">The <see cref="IDashScopeClient"/>.</param> |
| 27 | + /// <param name="llm">The llm to use.</param> |
| 28 | + /// <param name="prompt">The prompt to generate completion from.</param> |
| 29 | + /// <returns></returns> |
| 30 | + public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetBaiChuanTextCompletionAsync( |
| 31 | + this IDashScopeClient client, |
| 32 | + string llm, |
| 33 | + string prompt) |
| 34 | + { |
| 35 | + return client.GetTextCompletionAsync( |
| 36 | + new ModelRequest<TextGenerationInput, TextGenerationParameters> |
| 37 | + { |
| 38 | + Model = llm, |
| 39 | + Input = new TextGenerationInput { Prompt = prompt }, |
| 40 | + Parameters = null |
| 41 | + }); |
| 42 | + } |
| 43 | + |
| 44 | + /// <summary> |
| 45 | + /// Get text completion from baichuan model. |
| 46 | + /// </summary> |
| 47 | + /// <param name="client">The <see cref="IDashScopeClient"/>.</param> |
| 48 | + /// <param name="llm">The model name.</param> |
| 49 | + /// <param name="messages">The context messages.</param> |
| 50 | + /// <param name="resultFormat">Can be 'text' or 'message', defaults to 'text'. Call <see cref="ResultFormats"/> to get available options.</param> |
| 51 | + /// <returns></returns> |
| 52 | + public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetBaiChuanTextCompletionAsync( |
| 53 | + this IDashScopeClient client, |
| 54 | + BaiChuan2Llm llm, |
| 55 | + IEnumerable<ChatMessage> messages, |
| 56 | + string? resultFormat = null) |
| 57 | + { |
| 58 | + return client.GetBaiChuanTextCompletionAsync(llm.GetModelName(), messages, resultFormat); |
| 59 | + } |
| 60 | + |
| 61 | + /// <summary> |
| 62 | + /// Get text completion from baichuan model. |
| 63 | + /// </summary> |
| 64 | + /// <param name="client">The <see cref="IDashScopeClient"/>.</param> |
| 65 | + /// <param name="llm">The model name.</param> |
| 66 | + /// <param name="messages">The context messages.</param> |
| 67 | + /// <param name="resultFormat">Can be 'text' or 'message', defaults to 'text'. Call <see cref="ResultFormats"/> to get available options.</param> |
| 68 | + /// <returns></returns> |
| 69 | + public static Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>> GetBaiChuanTextCompletionAsync( |
| 70 | + this IDashScopeClient client, |
| 71 | + string llm, |
| 72 | + IEnumerable<ChatMessage> messages, |
| 73 | + string? resultFormat = null) |
| 74 | + { |
| 75 | + return client.GetTextCompletionAsync( |
| 76 | + new ModelRequest<TextGenerationInput, TextGenerationParameters> |
| 77 | + { |
| 78 | + Model = llm, |
| 79 | + Input = new TextGenerationInput { Messages = messages }, |
| 80 | + Parameters = string.IsNullOrEmpty(resultFormat) == false |
| 81 | + ? new TextGenerationParameters { ResultFormat = resultFormat } |
| 82 | + : null |
| 83 | + }); |
| 84 | + } |
| 85 | +} |
0 commit comments