Skip to content

Commit 172c7f1

Browse files
mxsl-grmarkpollack
authored andcommitted
Add more models and tests for MiniMax AI Model
1 parent d5b8123 commit 172c7f1

File tree

5 files changed

+84
-17
lines changed

5 files changed

+84
-17
lines changed

models/spring-ai-minimax/src/main/java/org/springframework/ai/minimax/api/MiniMaxApi.java

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
*/
5050
public class MiniMaxApi {
5151

52-
public static final String DEFAULT_CHAT_MODEL = ChatModel.ABAB_5_5_Chat.getValue();
52+
public static final String DEFAULT_CHAT_MODEL = ChatModel.ABAB_6_5_G_Chat.getValue();
5353
public static final String DEFAULT_EMBEDDING_MODEL = EmbeddingModel.Embo_01.getValue();
5454
private static final Predicate<String> SSE_DONE_PREDICATE = "[DONE]"::equals;
5555

@@ -114,9 +114,15 @@ public MiniMaxApi(String baseUrl, String miniMaxToken, RestClient.Builder restCl
114114
* <a href="https://www.minimaxi.com/document/algorithm-concept">MiniMax Model</a>.
115115
*/
116116
public enum ChatModel implements ModelDescription {
117-
ABAB_6_Chat("abab6-chat"),
117+
ABAB_6_5_Chat("abab6.5-chat"),
118+
ABAB_6_5_S_Chat("abab6.5s-chat"),
119+
ABAB_6_5_T_Chat("abab6.5t-chat"),
120+
ABAB_6_5_G_Chat("abab6.5g-chat"),
118121
ABAB_5_5_Chat("abab5.5-chat"),
119-
ABAB_5_5_S_Chat("abab5.5s-chat");
122+
ABAB_5_5_S_Chat("abab5.5s-chat"),
123+
124+
@Deprecated(since = "1.0.0-M2", forRemoval = true) // Replaced by ABAB_6_5_S_Chat
125+
ABAB_6_Chat("abab6-chat");
120126

121127
public final String value;
122128

@@ -408,8 +414,7 @@ public enum Role {
408414
* @param type Content type, each can be of type text or image_url.
409415
* @param text The text content of the message.
410416
* @param imageUrl The image content of the message. You can pass multiple
411-
* images by adding multiple image_url content parts. Image input is only
412-
* supported when using the glm-4v model.
417+
* images by adding multiple image_url content parts.
413418
*/
414419
@JsonInclude(Include.NON_NULL)
415420
public record MediaContent(
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
/*
2+
* Copyright 2023 - 2024 the original author or authors.
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* https://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
package org.springframework.ai.minimax;
17+
18+
import org.junit.jupiter.api.Test;
19+
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable;
20+
import org.springframework.ai.embedding.EmbeddingResponse;
21+
import org.springframework.beans.factory.annotation.Autowired;
22+
import org.springframework.boot.test.context.SpringBootTest;
23+
24+
import java.util.List;
25+
26+
import static org.assertj.core.api.Assertions.assertThat;
27+
28+
/**
29+
* @author Geng Rong
30+
*/
31+
@SpringBootTest
32+
@EnabledIfEnvironmentVariable(named = "MINIMAX_API_KEY", matches = ".+")
33+
class EmbeddingIT {
34+
35+
@Autowired
36+
private MiniMaxEmbeddingModel embeddingModel;
37+
38+
@Test
39+
void defaultEmbedding() {
40+
assertThat(embeddingModel).isNotNull();
41+
42+
EmbeddingResponse embeddingResponse = embeddingModel.embedForResponse(List.of("Hello World"));
43+
assertThat(embeddingResponse.getResults()).hasSize(1);
44+
assertThat(embeddingResponse.getResults().get(0)).isNotNull();
45+
assertThat(embeddingResponse.getResults().get(0).getOutput()).hasSize(1536);
46+
47+
assertThat(embeddingModel.dimensions()).isEqualTo(1536);
48+
}
49+
50+
@Test
51+
void batchEmbedding() {
52+
assertThat(embeddingModel).isNotNull();
53+
EmbeddingResponse embeddingResponse = embeddingModel.embedForResponse(List.of("Hello World", "HI"));
54+
assertThat(embeddingResponse.getResults()).hasSize(2);
55+
assertThat(embeddingResponse.getResults().get(0)).isNotNull();
56+
assertThat(embeddingResponse.getResults().get(0).getOutput()).hasSize(1536);
57+
assertThat(embeddingResponse.getResults().get(1)).isNotNull();
58+
assertThat(embeddingResponse.getResults().get(1).getOutput()).hasSize(1536);
59+
60+
assertThat(embeddingModel.dimensions()).isEqualTo(1536);
61+
}
62+
63+
}

models/spring-ai-minimax/src/test/java/org/springframework/ai/minimax/api/MiniMaxApiIT.java

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
import org.junit.jupiter.api.Test;
1919
import org.junit.jupiter.api.condition.EnabledIfEnvironmentVariable;
20-
2120
import org.springframework.ai.minimax.api.MiniMaxApi.ChatCompletion;
2221
import org.springframework.ai.minimax.api.MiniMaxApi.ChatCompletionChunk;
2322
import org.springframework.ai.minimax.api.MiniMaxApi.ChatCompletionMessage;
@@ -43,8 +42,8 @@ public class MiniMaxApiIT {
4342
@Test
4443
void chatCompletionEntity() {
4544
ChatCompletionMessage chatCompletionMessage = new ChatCompletionMessage("Hello world", Role.USER);
46-
ResponseEntity<ChatCompletion> response = miniMaxApi.chatCompletionEntity(
47-
new ChatCompletionRequest(List.of(chatCompletionMessage), "glm-3-turbo", 0.7f, false));
45+
ResponseEntity<ChatCompletion> response = miniMaxApi
46+
.chatCompletionEntity(new ChatCompletionRequest(List.of(chatCompletionMessage), "abab6.5g", 0.7f, false));
4847

4948
assertThat(response).isNotNull();
5049
assertThat(response.getBody()).isNotNull();
@@ -54,7 +53,7 @@ void chatCompletionEntity() {
5453
void chatCompletionStream() {
5554
ChatCompletionMessage chatCompletionMessage = new ChatCompletionMessage("Hello world", Role.USER);
5655
Flux<ChatCompletionChunk> response = miniMaxApi
57-
.chatCompletionStream(new ChatCompletionRequest(List.of(chatCompletionMessage), "glm-3-turbo", 0.7f, true));
56+
.chatCompletionStream(new ChatCompletionRequest(List.of(chatCompletionMessage), "abab6.5g", 0.7f, true));
5857

5958
assertThat(response).isNotNull();
6059
assertThat(response.collectList().block()).isNotNull();

models/spring-ai-minimax/src/test/java/org/springframework/ai/minimax/api/MiniMaxApiToolFunctionCallIT.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ public void toolFunctionCall() {
8585
List<ChatCompletionMessage> messages = new ArrayList<>(List.of(message));
8686

8787
ChatCompletionRequest chatCompletionRequest = new ChatCompletionRequest(messages,
88-
org.springframework.ai.minimax.api.MiniMaxApi.ChatModel.ABAB_6_Chat.getValue(), List.of(functionTool),
88+
org.springframework.ai.minimax.api.MiniMaxApi.ChatModel.ABAB_6_5_Chat.getValue(), List.of(functionTool),
8989
ToolChoiceBuilder.AUTO);
9090

9191
ResponseEntity<ChatCompletion> chatCompletion = miniMaxApi.chatCompletionEntity(chatCompletionRequest);
@@ -116,7 +116,7 @@ public void toolFunctionCall() {
116116
}
117117

118118
var functionResponseRequest = new ChatCompletionRequest(messages,
119-
org.springframework.ai.minimax.api.MiniMaxApi.ChatModel.ABAB_6_Chat.getValue(), 0.5F);
119+
org.springframework.ai.minimax.api.MiniMaxApi.ChatModel.ABAB_6_5_Chat.getValue(), 0.5F);
120120

121121
ResponseEntity<ChatCompletion> chatCompletion2 = miniMaxApi.chatCompletionEntity(functionResponseRequest);
122122

spring-ai-docs/src/main/antora/modules/ROOT/pages/api/chat/minimax-chat.adoc

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ The prefix `spring.ai.minimax.chat` is the property prefix that lets you configu
9090
| spring.ai.minimax.chat.enabled | Enable MiniMax chat model. | true
9191
| spring.ai.minimax.chat.base-url | Optional overrides the spring.ai.minimax.base-url to provide chat specific url | https://api.minimax.chat
9292
| spring.ai.minimax.chat.api-key | Optional overrides the spring.ai.minimax.api-key to provide chat specific api-key | -
93-
| spring.ai.minimax.chat.options.model | This is the MiniMax Chat model to use | `abab5.5-chat` (the `abab5.5s-chat`, `abab5.5-chat`, and `abab6-chat` point to the latest model versions)
93+
| spring.ai.minimax.chat.options.model | This is the MiniMax Chat model to use | `abab6.5g-chat` (the `abab5.5-chat`, `abab5.5s-chat`, `abab6.5-chat`, `abab6.5g-chat`, `abab6.5t-chat` and `abab6.5s-chat` point to the latest model versions)
9494
| spring.ai.minimax.chat.options.maxTokens | The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. | -
9595
| spring.ai.minimax.chat.options.temperature | The sampling temperature to use that controls the apparent creativity of generated completions. Higher values will make output more random while lower values will make results more focused and deterministic. It is not recommended to modify temperature and top_p for the same completions request as the interaction of these two settings is difficult to predict. | 0.7
9696
| spring.ai.minimax.chat.options.topP | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. | 1.0
@@ -121,7 +121,7 @@ ChatResponse response = chatModel.call(
121121
new Prompt(
122122
"Generate the names of 5 famous pirates.",
123123
MiniMaxChatOptions.builder()
124-
.withModel(MiniMaxApi.ChatModel.GLM_3_Turbo.getValue())
124+
.withModel(MiniMaxApi.ChatModel.ABAB_5_5_Chat.getValue())
125125
.withTemperature(0.5f)
126126
.build()
127127
));
@@ -138,7 +138,7 @@ Add a `application.properties` file, under the `src/main/resources` directory, t
138138
[source,application.properties]
139139
----
140140
spring.ai.minimax.api-key=YOUR_API_KEY
141-
spring.ai.minimax.chat.options.model=glm-3-turbo
141+
spring.ai.minimax.chat.options.model=abab6.5g-chat
142142
spring.ai.minimax.chat.options.temperature=0.7
143143
----
144144

@@ -204,7 +204,7 @@ Next, create a `MiniMaxChatModel` and use it for text generations:
204204
var miniMaxApi = new MiniMaxApi(System.getenv("MINIMAX_API_KEY"));
205205
206206
var chatModel = new MiniMaxChatModel(miniMaxApi, MiniMaxChatOptions.builder()
207-
.withModel(MiniMaxApi.ChatModel.GLM_3_Turbo.getValue())
207+
.withModel(MiniMaxApi.ChatModel.ABAB_5_5_Chat.getValue())
208208
.withTemperature(0.4f)
209209
.withMaxTokens(200)
210210
.build());
@@ -236,11 +236,11 @@ ChatCompletionMessage chatCompletionMessage =
236236
237237
// Sync request
238238
ResponseEntity<ChatCompletion> response = miniMaxApi.chatCompletionEntity(
239-
new ChatCompletionRequest(List.of(chatCompletionMessage), MiniMaxApi.ChatModel.GLM_3_Turbo.getValue(), 0.7f, false));
239+
new ChatCompletionRequest(List.of(chatCompletionMessage), MiniMaxApi.ChatModel.ABAB_5_5_Chat.getValue(), 0.7f, false));
240240
241241
// Streaming request
242242
Flux<ChatCompletionChunk> streamResponse = miniMaxApi.chatCompletionStream(
243-
new ChatCompletionRequest(List.of(chatCompletionMessage), MiniMaxApi.ChatModel.GLM_3_Turbo.getValue(), 0.7f, true));
243+
new ChatCompletionRequest(List.of(chatCompletionMessage), MiniMaxApi.ChatModel.ABAB_5_5_Chat.getValue(), 0.7f, true));
244244
----
245245

246246
Follow the https://github.com/spring-projects/spring-ai/blob/main/models/spring-ai-minimax/src/main/java/org/springframework/ai/minimax/api/MiniMaxApi.java[MiniMaxApi.java]'s JavaDoc for further information.

0 commit comments

Comments
 (0)