Skip to content

Commit 611cc70

Browse files
committed
update api to 2024.7
1 parent 8f7d1ea commit 611cc70

File tree

14 files changed

+220
-132
lines changed

14 files changed

+220
-132
lines changed

src/main/java/com/plexpt/chatgpt/entity/chat/ChatChoice.java

Lines changed: 24 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22

33
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
44
import com.fasterxml.jackson.annotation.JsonProperty;
5-
65
import lombok.Data;
76

87
/**
@@ -30,17 +29,38 @@ public class ChatChoice {
3029
// "logprobs": null,
3130
// "finish_reason": "tool_calls"
3231
// }
33-
private long index;
32+
private Integer index;
3433
/**
3534
* 请求参数stream为true返回是delta
3635
*/
37-
@JsonProperty("delta")
3836
private Message delta;
3937
/**
4038
* 请求参数stream为false返回是message
4139
*/
42-
@JsonProperty("message")
4340
private Message message;
41+
/**
42+
* The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
43+
* 用于指示模型停止生成令牌的原因。这个参数在响应体中返回,用于解释生成过程为什么终止。
44+
* <p>
45+
* 可能的值
46+
* stop:
47+
* <p>
48+
* 模型达到了一个自然的停止点或遇到了提供的停止序列。
49+
* 例如,生成的文本包含一个定义的停止序列(如 "END"),或者模型认为已经生成了一个完整的句子或段落。
50+
* length:
51+
* <p>
52+
* 达到了请求中指定的最大令牌数。
53+
* 例如,请求中设置了 max_tokens 为 50,当生成的令牌数量达到 50 时,模型停止生成。
54+
* content_filter:
55+
* <p>
56+
* 由于内容过滤器的标志,部分内容被省略。
57+
* 例如,生成的文本可能包含不适当的内容,触发了内容过滤器,导致生成过程提前终止。
58+
* tool_calls:
59+
* <p>
60+
* 模型调用了一个工具。
61+
* 这种情况通常用于增强模型的功能,如调用外部API或执行特定任务。
62+
*/
4463
@JsonProperty("finish_reason")
4564
private String finishReason;
65+
4666
}

src/main/java/com/plexpt/chatgpt/entity/chat/ChatCompletion.java

Lines changed: 86 additions & 59 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
* chat
1515
*
1616
* @author plexpt
17+
* @link https://platform.openai.com/docs/overview
1718
*/
1819
@Data
1920
@Builder
@@ -35,126 +36,152 @@ public class ChatCompletion {
3536
* <p>
3637
* 不要同时改这个和topP
3738
*/
38-
@Builder.Default
39-
private double temperature = 0.9;
39+
private Double temperature;
4040

4141
/**
4242
* 0-1
4343
* 建议0.9
4444
* 不要同时改这个和temperature
4545
*/
4646
@JsonProperty("top_p")
47-
@Builder.Default
48-
private double topP = 0.9;
49-
47+
private Double topP;
5048

51-
/**
52-
* auto
53-
*/
54-
String function_call;
5549

5650
@JsonProperty("tool_choice")
5751
String toolChoice;
5852

5953
List<ChatTool> tools;
6054

61-
List<ChatFunction> functions;
62-
6355
/**
6456
* 结果数。
6557
*/
6658
@Builder.Default
67-
private Integer n = 1;
59+
Integer n = 1;
6860

6961

7062
/**
7163
* 是否流式输出.
7264
* default:false
7365
*/
7466
@Builder.Default
75-
private boolean stream = false;
67+
Boolean stream = false;
7668
/**
7769
* 停用词
70+
* <p>
71+
* stop 参数用于指定 API 生成令牌时应停止的序列。该参数可以是字符串、字符串数组或 null,最多可以包含 4 个序列。这是一个可选参数,默认值为 null。
72+
* <p>
73+
* 参数说明
74+
* 类型:string、array 或 null
75+
* 可选:是
76+
* 默认值:null
77+
* 用途:指定在生成的文本中,API 遇到这些序列时停止生成后续令牌
78+
* 使用场景
79+
* 单个停止序列:
80+
* <p>
81+
* 如果指定一个字符串,API 在生成文本时遇到该字符串就会停止。例如,如果设置 stop 参数为 "END", 当生成的文本包含 "END" 时,API 将停止生成后续文本。
82+
* 多个停止序列:
83+
* <p>
84+
* 如果指定一个字符串数组,API 在生成文本时遇到任何一个字符串都会停止。例如,如果设置 stop 参数为 ["END", "STOP"], 当生成的文本包含 "END" 或 "STOP" 时,API 将停止生成后续文本。
85+
* 不使用停止序列:
86+
* <p>
87+
* 如果将 stop 参数设置为 null 或不设置,API 将根据其默认行为生成文本,直到达到最大令牌限制或结束标记。
7888
*/
79-
private List<String> stop;
89+
List<String> stop;
8090
/**
8191
* 3.5 最大支持4096
8292
* 4.0 最大32k
8393
*/
8494
@JsonProperty("max_tokens")
85-
private Integer maxTokens;
95+
Integer maxTokens;
8696

8797

98+
/**
99+
* Optional
100+
* Defaults to 0
101+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,
102+
* increasing the model's likelihood to talk about new topics.
103+
*/
88104
@JsonProperty("presence_penalty")
89-
private double presencePenalty;
105+
Double presencePenalty;
90106

91107
/**
92-
* -2.0 ~~ 2.0
108+
* -2.0 ~~ 2.0 Defaults to 0
93109
*/
94110
@JsonProperty("frequency_penalty")
95-
private double frequencyPenalty;
111+
Double frequencyPenalty;
96112

113+
/**
114+
* Optional
115+
* Defaults to null
116+
*/
97117
@JsonProperty("logit_bias")
98-
private Map logitBias;
118+
Map logitBias;
99119
/**
100120
* 用户唯一值,确保接口不被重复调用
101121
*/
102-
private String user;
122+
String user;
103123

104124
/**
105125
* 返回格式 当前只有gpt-3.5-turbo-1106和gpt-4-1106-preview 支持json_object格式返回
106126
*/
107127
@JsonProperty("response_format")
108-
private ResponseFormat responseFormat;
128+
ResponseFormat responseFormat;
109129

130+
/**
131+
* boolean or null
132+
* <p>
133+
* Optional
134+
* Defaults to false
135+
*/
136+
Boolean logprobs;
137+
138+
/**
139+
* integer or null
140+
* <p>
141+
* Optional
142+
*/
143+
@JsonProperty("top_logprobs")
144+
Integer topLogprobs;
110145

111-
@Getter
112-
@AllArgsConstructor
113-
public enum Model {
146+
Integer seed;
147+
148+
/**
149+
* Specifies the latency tier to use for processing the request. This parameter is relevant for customers subscribed to the scale tier service:
150+
* <p>
151+
* If set to 'auto', the system will utilize scale tier credits until they are exhausted.
152+
* If set to 'default', the request will be processed using the default service tier with a lower uptime SLA and no latency guarentee.
153+
* When this parameter is set, the response body will include the service_tier utilized.
154+
*/
155+
@JsonProperty("service_tier")
156+
String serviceTier;
157+
158+
@JsonProperty("stream_options")
159+
StreamOption streamOptions;
160+
161+
@JsonProperty("parallel_tool_calls")
162+
Boolean parallelToolCalls;
163+
164+
/**
165+
* model
166+
*/
167+
public interface Model {
114168
/**
115169
* gpt-3.5-turbo
116170
*/
117-
GPT_3_5_TURBO("gpt-3.5-turbo"),
118-
GPT_3_5_TURBO_0613("gpt-3.5-turbo-0613"),
119-
GPT_3_5_TURBO_16K("gpt-3.5-turbo-16k"),
120-
/**
121-
* 临时模型,不建议使用
122-
*/
123-
GPT_3_5_TURBO_0301("gpt-3.5-turbo-0301"),
124-
GPT_3_5_TURBO_1106("gpt-3.5-turbo-1106"),
125-
GPT_3_5_TURBO_0125("gpt-3.5-turbo-0125"),
126-
GPT_3_5_TURBO_INSTRUCT("gpt-3.5-turbo-instruct"),
171+
String GPT_3_5_TURBO = "gpt-3.5-turbo";
172+
String GPT_3_5_TURBO_16K = "gpt-3.5-turbo-16k";
173+
String GPT_3_5_TURBO_INSTRUCT = "gpt-3.5-turbo-instruct";
127174
/**
128175
* GPT4.0
129176
*/
130-
GPT_4("gpt-4"),
131-
GPT4Turbo("gpt-4-1106-preview"),
132-
GPT4Turbo0125("gpt-4-0125-preview"),
133-
GPT_4VP("gpt-4-vision-preview"),
134-
GPT_4V("gpt-4-vision-preview"),
135-
GPT_4o("gpt-4o"),
136-
/**
137-
* 临时模型,不建议使用
138-
*/
139-
GPT_4_0314("gpt-4-0314"),
140-
/**
141-
* 支持函数
142-
*/
143-
GPT_4_0613("gpt-4-0613"),
144-
/**
145-
* GPT4.0 超长上下文
146-
*/
147-
GPT_4_32K("gpt-4-32k"),
177+
String GPT4 = "gpt-4";
178+
String GPT4V = "gpt-4-vision-preview";
179+
String GPT4o = "gpt-4o";
148180
/**
149181
* GPT4.0 超长上下文
150182
*/
151-
GPT_4_32K_0613("gpt-4-32k-0613"),
152-
/**
153-
* 临时模型,不建议使用
154-
*/
155-
GPT_4_32K_0314("gpt-4-32k-0314"),
156-
;
157-
private String name;
183+
String GPT_4_32K = "gpt-4-32k";
184+
158185
}
159186

160187
public int countTokens() {

src/main/java/com/plexpt/chatgpt/entity/chat/ChatCompletionResponse.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,12 @@
1717
public class ChatCompletionResponse {
1818
private String id;
1919
private String object;
20-
private long created;
20+
private Long created;
2121
private String model;
2222
@JsonProperty("system_fingerprint")
2323
private String systemFingerprint;
2424
private List<ChatChoice> choices;
2525
private Usage usage;
26+
Object logprobs;
2627

2728
}

src/main/java/com/plexpt/chatgpt/entity/chat/ChatToolFunction.java

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,7 @@
33

44
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
55
import com.fasterxml.jackson.annotation.JsonInclude;
6-
import lombok.AllArgsConstructor;
7-
import lombok.Builder;
8-
import lombok.Data;
9-
import lombok.NoArgsConstructor;
6+
import lombok.*;
107

118
import java.util.List;
129

@@ -18,6 +15,11 @@
1815
@JsonIgnoreProperties(ignoreUnknown = true)
1916
public class ChatToolFunction {
2017

18+
/**
19+
* Required
20+
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
21+
*/
22+
@NonNull
2123
String name;
2224

2325
String description;
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
package com.plexpt.chatgpt.entity.chat;
2+
3+
4+
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
5+
import com.fasterxml.jackson.annotation.JsonInclude;
6+
import lombok.AllArgsConstructor;
7+
import lombok.Data;
8+
import lombok.NoArgsConstructor;
9+
10+
/**
11+
*
12+
* completion_tokens
13+
* integer
14+
*
15+
* Number of tokens in the generated completion.
16+
*
17+
* prompt_tokens
18+
* integer
19+
*
20+
* Number of tokens in the prompt.
21+
*
22+
* total_tokens
23+
* integer
24+
*
25+
* Total number of tokens used in the request (prompt + completion).
26+
*
27+
* @author pt
28+
*/
29+
@Data
30+
@AllArgsConstructor
31+
@NoArgsConstructor(force = true)
32+
@JsonInclude(JsonInclude.Include.NON_NULL)
33+
@JsonIgnoreProperties(ignoreUnknown = true)
34+
public class CompletionUseage {
35+
36+
Integer completion_tokens;
37+
Integer prompt_tokens;
38+
Integer total_tokens;
39+
40+
}

src/main/java/com/plexpt/chatgpt/entity/chat/Message.java

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,6 @@ public class Message {
2424
private String content;
2525
private String name;
2626

27-
@JsonProperty("function_call")
28-
private FunctionCallResult functionCall;
2927

3028
@JsonProperty("tool_calls")
3129
private List<ToolCallResult> toolCalls;

src/main/java/com/plexpt/chatgpt/entity/chat/ResponseFormat.java

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,6 @@
55
import lombok.*;
66
import lombok.extern.slf4j.Slf4j;
77

8-
/**
9-
* @author hq
10-
* @version 1.0
11-
* @date 2023/12/11
12-
*/
138

149
@Data
1510
@Builder
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
package com.plexpt.chatgpt.entity.chat;
2+
3+
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
4+
import com.fasterxml.jackson.annotation.JsonInclude;
5+
import lombok.AllArgsConstructor;
6+
import lombok.Builder;
7+
import lombok.Data;
8+
import lombok.NoArgsConstructor;
9+
10+
11+
@Data
12+
@AllArgsConstructor
13+
@NoArgsConstructor(force = true)
14+
@JsonInclude(JsonInclude.Include.NON_NULL)
15+
@JsonIgnoreProperties(ignoreUnknown = true)
16+
public class StreamOption {
17+
18+
public Boolean include_usage;
19+
20+
}

src/main/java/com/plexpt/chatgpt/entity/chat/ToolCallResult.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
@JsonIgnoreProperties(ignoreUnknown = true)
99
public class ToolCallResult {
1010

11+
Integer index;
1112
String id;
1213

1314
String type;

0 commit comments

Comments
 (0)