Skip to content

Commit ccbc879

Browse files
Display model name used (#705)
* Update Vertex AI prompt formatting and structure for improved RAG * fix(chatbot): update model reference from PALM2 to Gemini 2.0 Flash for improved accuracy * feat(prompt-client): add model name retrieval methods and update chatbot to display model name * Remove unnecessary blank line in vertex_prompt_client.py Signed-off-by: Spyros <northdpole@users.noreply.github.com> * Fix indentation in OpenAI prompt client Signed-off-by: Spyros <northdpole@users.noreply.github.com> * Fix formatting and whitespace in vertex_prompt_client.py Signed-off-by: Spyros <northdpole@users.noreply.github.com> * Format return statement for better readability Signed-off-by: Spyros <northdpole@users.noreply.github.com> --------- Signed-off-by: Spyros <northdpole@users.noreply.github.com> Co-authored-by: Spyros <northdpole@users.noreply.github.com>
1 parent 1898aae commit ccbc879

File tree

4 files changed

+36
-2
lines changed

4 files changed

+36
-2
lines changed

application/frontend/src/pages/chatbot/chatbot.tsx

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,21 @@ export const Chatbot = () => {
3434
const [error, setError] = useState<string>('');
3535
const [chat, setChat] = useState<ChatState>(DEFAULT_CHAT_STATE);
3636
const [user, setUser] = useState('');
37+
const [modelName, setModelName] = useState<string>('');
38+
39+
function getModelDisplayName(modelName: string): string {
40+
if (!modelName) {
41+
return 'a Large Language Model';
42+
}
43+
// Format model names for display
44+
if (modelName.startsWith('gemini')) {
45+
return `Google ${modelName.replace('gemini-', 'Gemini ').replace(/-/g, ' ')}`;
46+
} else if (modelName.startsWith('gpt')) {
47+
return `OpenAI ${modelName.toUpperCase()}`;
48+
}
49+
return modelName;
50+
}
51+
3752
const hasMessages = chatMessages.length > 0;
3853
function login() {
3954
fetch(`${apiUrl}/user`, { method: 'GET' })
@@ -104,6 +119,9 @@ export const Chatbot = () => {
104119
.then((data) => {
105120
setLoading(false);
106121
setError('');
122+
if (data.model_name) {
123+
setModelName(data.model_name);
124+
}
107125
setChatMessages((prev) => [
108126
...prev,
109127
{
@@ -211,7 +229,7 @@ export const Chatbot = () => {
211229

212230
<div className="chatbot-disclaimer">
213231
<i>
214-
Answers are generated by a Google PALM2 Large Language Model, which uses the internet as
232+
Answers are generated by {getModelDisplayName(modelName)} Large Language Model, which uses the internet as
215233
training data, plus collected key cybersecurity standards from{' '}
216234
<a href="https://opencre.org">OpenCRE</a> as the preferred source. This leads to more reliable
217235
answers and adds references, but note: it is still generative AI which is never guaranteed

application/prompt_client/openai_prompt_client.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,11 @@ class OpenAIPromptClient:
1010
def __init__(self, openai_key) -> None:
1111
self.api_key = openai_key
1212
openai.api_key = self.api_key
13+
self.model_name = "gpt-3.5-turbo"
14+
15+
def get_model_name(self) -> str:
16+
"""Return the model name being used."""
17+
return self.model_name
1318

1419
def get_text_embeddings(self, text: str, model: str = "text-embedding-ada-002"):
1520
if len(text) > 8000:

application/prompt_client/prompt_client.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -498,4 +498,10 @@ def generate_text(self, prompt: str) -> Dict[str, str]:
498498
logger.debug(f"retrieved completion for {prompt}")
499499
table = [closest_object]
500500
result = f"Answer: {answer}"
501-
return {"response": result, "table": table, "accurate": accurate}
501+
model_name = self.ai_client.get_model_name() if self.ai_client else "unknown"
502+
return {
503+
"response": result,
504+
"table": table,
505+
"accurate": accurate,
506+
"model_name": model_name,
507+
}

application/prompt_client/vertex_prompt_client.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,11 @@ class VertexPromptClient:
5454

5555
def __init__(self) -> None:
5656
self.client = genai.Client(api_key=os.environ.get("GEMINI_API_KEY"))
57+
self.model_name = "gemini-2.0-flash"
58+
59+
def get_model_name(self) -> str:
60+
"""Return the model name being used."""
61+
return self.model_name
5762

5863
def get_text_embeddings(self, text: str) -> List[float]:
5964
"""Text embedding with a Large Language Model."""

0 commit comments

Comments
 (0)