Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
* which it will respond with an AI generated answer.
*/
public final class ChatGptCommand extends SlashCommandAdapter {
private static final ChatGptModel CHAT_GPT_MODEL = ChatGptModel.HIGH_QUALITY;
public static final String COMMAND_NAME = "chatgpt";
private static final String QUESTION_INPUT = "question";
private static final int MAX_MESSAGE_INPUT_LENGTH = 200;
Expand Down Expand Up @@ -82,8 +83,8 @@ public void onModalSubmitted(ModalInteractionEvent event, List<String> args) {

String question = event.getValue(QUESTION_INPUT).getAsString();

Optional<String> chatgptResponse =
chatGptService.ask(question, "You may use markdown syntax for the response");
Optional<String> chatgptResponse = chatGptService.ask(question,
"You may use markdown syntax for the response", CHAT_GPT_MODEL);
if (chatgptResponse.isPresent()) {
userIdToAskedAtCache.put(event.getMember().getId(), Instant.now());
}
Expand All @@ -96,7 +97,8 @@ public void onModalSubmitted(ModalInteractionEvent event, List<String> args) {
String response = chatgptResponse.orElse(errorResponse);
SelfUser selfUser = event.getJDA().getSelfUser();

MessageEmbed responseEmbed = helper.generateGptResponseEmbed(response, selfUser, question);
MessageEmbed responseEmbed =
helper.generateGptResponseEmbed(response, selfUser, question, CHAT_GPT_MODEL);

event.getHook().sendMessageEmbeds(responseEmbed).queue();
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
package org.togetherjava.tjbot.features.chatgpt;

import com.openai.models.ChatModel;

/**
* Logical abstraction over OpenAI chat models.
* <p>
* This enum allows the application to select models based on performance/quality intent rather than
* hard-coding specific OpenAI model versions throughout the codebase.
*
*/
public enum ChatGptModel {
/**
* Fastest response time with the lowest computational cost.
*/
FASTEST(ChatModel.GPT_3_5_TURBO),

/**
* Balanced option between speed and quality.
*/
FAST(ChatModel.GPT_4_1_MINI),

/**
* Highest quality responses with increased reasoning capability.
*/
HIGH_QUALITY(ChatModel.GPT_5_MINI);

private final ChatModel chatModel;

ChatGptModel(ChatModel chatModel) {
this.chatModel = chatModel;
}

/**
* @return the underlying OpenAI model used by this enum.
*/
public ChatModel toChatModel() {
return chatModel;
}

@Override
public String toString() {
return chatModel.toString();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

import com.openai.client.OpenAIClient;
import com.openai.client.okhttp.OpenAIOkHttpClient;
import com.openai.models.ChatModel;
import com.openai.models.responses.Response;
import com.openai.models.responses.ResponseCreateParams;
import com.openai.models.responses.ResponseOutputText;
Expand Down Expand Up @@ -51,11 +50,12 @@ public ChatGptService(Config config) {
* @param question The question being asked of ChatGPT. Max is {@value MAX_TOKENS} tokens.
* @param context The category of asked question, to set the context(eg. Java, Database, Other
* etc).
* @param chatModel The AI model to use for this request.
* @return response from ChatGPT as a String.
* @see <a href="https://platform.openai.com/docs/guides/chat/managing-tokens">ChatGPT
* Tokens</a>.
*/
public Optional<String> ask(String question, @Nullable String context) {
public Optional<String> ask(String question, @Nullable String context, ChatGptModel chatModel) {
if (isDisabled) {
return Optional.empty();
}
Expand All @@ -76,7 +76,7 @@ public Optional<String> ask(String question, @Nullable String context) {
String response = null;
try {
ResponseCreateParams params = ResponseCreateParams.builder()
.model(ChatModel.GPT_5_NANO)
.model(chatModel.toChatModel())
.input(inputPrompt)
.maxOutputTokens(MAX_TOKENS)
.build();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
import org.togetherjava.tjbot.db.generated.tables.HelpThreads;
import org.togetherjava.tjbot.db.generated.tables.records.HelpThreadsRecord;
import org.togetherjava.tjbot.features.chatgpt.ChatGptCommand;
import org.togetherjava.tjbot.features.chatgpt.ChatGptModel;
import org.togetherjava.tjbot.features.chatgpt.ChatGptService;
import org.togetherjava.tjbot.features.componentids.ComponentIdInteractor;
import org.togetherjava.tjbot.features.utils.Guilds;
Expand Down Expand Up @@ -55,6 +56,7 @@
*/
public final class HelpSystemHelper {
private static final Logger logger = LoggerFactory.getLogger(HelpSystemHelper.class);
private static final ChatGptModel CHAT_GPT_MODEL = ChatGptModel.FAST;

static final Color AMBIENT_COLOR = new Color(255, 255, 165);

Expand Down Expand Up @@ -143,7 +145,7 @@ RestAction<Message> constructChatGptAttempt(ThreadChannel threadChannel,
String context =
"Category %s on a Java Q&A discord server. You may use markdown syntax for the response"
.formatted(matchingTag.getName());
chatGptAnswer = chatGptService.ask(question, context);
chatGptAnswer = chatGptService.ask(question, context, CHAT_GPT_MODEL);

if (chatGptAnswer.isEmpty()) {
return useChatGptFallbackMessage(threadChannel);
Expand All @@ -168,7 +170,8 @@ RestAction<Message> constructChatGptAttempt(ThreadChannel threadChannel,
answer = answer.substring(0, responseCharLimit);
}

MessageEmbed responseEmbed = generateGptResponseEmbed(answer, selfUser, originalQuestion);
MessageEmbed responseEmbed =
generateGptResponseEmbed(answer, selfUser, originalQuestion, CHAT_GPT_MODEL);
return post.flatMap(_ -> threadChannel.sendMessageEmbeds(responseEmbed)
.addActionRow(generateDismissButton(componentIdInteractor, messageId.get())));
}
Expand All @@ -178,11 +181,13 @@ RestAction<Message> constructChatGptAttempt(ThreadChannel threadChannel,
*
* @param answer The response text generated by AI.
* @param selfUser The SelfUser representing the bot.
* @param title The title for the MessageEmbed.
* @param title The title for the MessageEmbed
* @param model The AI model that was used for the foot notes
* @return A MessageEmbed that contains response generated by AI.
*/
public MessageEmbed generateGptResponseEmbed(String answer, SelfUser selfUser, String title) {
String responseByGptFooter = "- AI generated response";
public MessageEmbed generateGptResponseEmbed(String answer, SelfUser selfUser, String title,
ChatGptModel model) {
String responseByGptFooter = "- AI generated response using %s model".formatted(model);

int embedTitleLimit = MessageEmbed.TITLE_MAX_LENGTH;
String capitalizedTitle = Character.toUpperCase(title.charAt(0)) + title.substring(1);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.togetherjava.tjbot.features.BotCommandAdapter;
import org.togetherjava.tjbot.features.CommandVisibility;
import org.togetherjava.tjbot.features.MessageContextCommand;
import org.togetherjava.tjbot.features.chatgpt.ChatGptModel;
import org.togetherjava.tjbot.features.chatgpt.ChatGptService;
import org.togetherjava.tjbot.features.utils.StringDistances;

Expand Down Expand Up @@ -98,7 +99,8 @@ public void onMessageContext(MessageContextInteractionEvent event) {
String chatGptTitleRequest =
"Summarize the following question into a concise title or heading not more than 5 words, remove quotations if any: %s"
.formatted(originalMessage);
Optional<String> chatGptTitle = chatGptService.ask(chatGptTitleRequest, null);
Optional<String> chatGptTitle =
chatGptService.ask(chatGptTitleRequest, null, ChatGptModel.FASTEST);
String title = chatGptTitle.orElse(createTitle(originalMessage));
if (title.startsWith("\"") && title.endsWith("\"")) {
title = title.substring(1, title.length() - 1);
Expand Down
Loading