Skip to content

Commit 0d22674

Browse files
committed
feat: support llama2 text generation
1 parent 8dc63f2 commit 0d22674

File tree

6 files changed

+133
-2
lines changed

6 files changed

+133
-2
lines changed
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
namespace Cnblogs.DashScope.Sdk.Llama2;
2+
3+
/// <summary>
4+
/// Supported models for LLaMa2.
5+
/// </summary>
6+
public enum Llama2Model
7+
{
8+
/// <summary>
9+
/// llama2-7b-chat-v2
10+
/// </summary>
11+
Chat7Bv2 = 1,
12+
13+
/// <summary>
14+
/// llama2-13b-chat-v2
15+
/// </summary>
16+
Chat13Bv2 = 2
17+
}
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
namespace Cnblogs.DashScope.Sdk.Llama2;
2+
3+
internal static class Llama2ModelNames
4+
{
5+
public static string GetModelName(this Llama2Model model)
6+
{
7+
return model switch
8+
{
9+
Llama2Model.Chat7Bv2 => "llama2-7b-chat-v2",
10+
Llama2Model.Chat13Bv2 => "llama2-13b-chat-v2",
11+
_ => throw new ArgumentOutOfRangeException(
12+
nameof(model),
13+
model,
14+
"Unknown model type, please use the overload that accepts a string ‘model’ parameter.")
15+
};
16+
}
17+
}
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
namespace Cnblogs.DashScope.Sdk.Llama2;
2+
3+
/// <summary>
4+
/// Extensions for llama2 text generation, docs: https://help.aliyun.com/zh/dashscope/developer-reference/api-details-11
5+
/// </summary>
6+
public static class Llama2TextGenerationApi
7+
{
8+
/// <summary>
9+
/// Get text completion from llama2 model.
10+
/// </summary>
11+
/// <param name="client">The <see cref="IDashScopeClient"/>.</param>
12+
/// <param name="model">The model name.</param>
13+
/// <param name="messages">The context messages.</param>
14+
/// <param name="resultFormat">Can be 'text' or 'message'. Call <see cref="ResultFormats"/> to get available options.</param>
15+
/// <returns></returns>
16+
public static async Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
17+
GetLlama2TextCompletionAsync(
18+
this IDashScopeClient client,
19+
Llama2Model model,
20+
IEnumerable<ChatMessage> messages,
21+
string? resultFormat = null)
22+
{
23+
return await client.GetLlama2TextCompletionAsync(model.GetModelName(), messages, resultFormat);
24+
}
25+
26+
/// <summary>
27+
/// Get text completion from llama2 model.
28+
/// </summary>
29+
/// <param name="client">The <see cref="IDashScopeClient"/>.</param>
30+
/// <param name="model">The model name.</param>
31+
/// <param name="messages">The context messages.</param>
32+
/// <param name="resultFormat">Can be 'text' or 'message'. Call <see cref="ResultFormats"/> to get available options.</param>
33+
/// <returns></returns>
34+
public static async Task<ModelResponse<TextGenerationOutput, TextGenerationTokenUsage>>
35+
GetLlama2TextCompletionAsync(
36+
this IDashScopeClient client,
37+
string model,
38+
IEnumerable<ChatMessage> messages,
39+
string? resultFormat = null)
40+
{
41+
return await client.GetTextCompletionAsync(
42+
new ModelRequest<TextGenerationInput, TextGenerationParameters>
43+
{
44+
Model = model,
45+
Input = new TextGenerationInput { Messages = messages },
46+
Parameters = resultFormat != null
47+
? new TextGenerationParameters { ResultFormat = resultFormat }
48+
: null
49+
});
50+
}
51+
}
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
using Cnblogs.DashScope.Sdk.Llama2;
2+
using NSubstitute;
3+
4+
namespace Cnblogs.DashScope.Sdk.UnitTests;
5+
6+
public class Llama2TextGenerationTests
7+
{
8+
private static readonly List<ChatMessage> Messages =
9+
[new("system", "you are a helpful assistant"), new("user", "hello")];
10+
11+
[Fact]
12+
public async Task Llama2_UseEnum_SuccessAsync()
13+
{
14+
// Arrange
15+
var client = Substitute.For<IDashScopeClient>();
16+
17+
// Act
18+
_ = await client.GetLlama2TextCompletionAsync(Llama2Model.Chat13Bv2, Messages, ResultFormats.Message);
19+
20+
// Assert
21+
_ = await client.Received().GetTextCompletionAsync(
22+
Arg.Is<ModelRequest<TextGenerationInput, TextGenerationParameters>>(
23+
s => s.Input.Messages == Messages
24+
&& s.Model == "llama2-13b-chat-v2"
25+
&& s.Parameters != null
26+
&& s.Parameters.ResultFormat == ResultFormats.Message));
27+
}
28+
29+
[Fact]
30+
public async Task Llama2_CustomModel_SuccessAsync()
31+
{
32+
// Arrange
33+
var client = Substitute.For<IDashScopeClient>();
34+
35+
// Act
36+
_ = await client.GetLlama2TextCompletionAsync("custom-model", Messages, ResultFormats.Message);
37+
38+
// Assert
39+
_ = await client.Received().GetTextCompletionAsync(
40+
Arg.Is<ModelRequest<TextGenerationInput, TextGenerationParameters>>(
41+
s => s.Input.Messages == Messages
42+
&& s.Model == "custom-model"
43+
&& s.Parameters != null
44+
&& s.Parameters.ResultFormat == ResultFormats.Message));
45+
}
46+
}

test/Cnblogs.DashScope.Sdk.UnitTests/QWenMultimodalCompletionTests.cs renamed to test/Cnblogs.DashScope.Sdk.UnitTests/QWenMultimodalGenerationTests.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
namespace Cnblogs.DashScope.Sdk.UnitTests;
55

6-
public class QWenMultimodalCompletionTests
6+
public class QWenMultimodalGenerationTests
77
{
88
private const string CustomModel = "custom-model";
99

test/Cnblogs.DashScope.Sdk.UnitTests/QWenTextCompletionTests.cs renamed to test/Cnblogs.DashScope.Sdk.UnitTests/QWenTextGenerationTests.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
namespace Cnblogs.DashScope.Sdk.UnitTests;
55

6-
public class QWenTextCompletionTests
6+
public class QWenTextGenerationTests
77
{
88
private const string CustomModel = "custom-model";
99

0 commit comments

Comments
 (0)