Skip to content

Commit ca0ae46

Browse files
committed
add tests; ruff, black
1 parent 1f8e166 commit ca0ae46

File tree

7 files changed

+73
-54
lines changed

7 files changed

+73
-54
lines changed

app/backend/approaches/approach.py

Lines changed: 7 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import copy
21
import os
32
from abc import ABC
43
from dataclasses import dataclass
@@ -29,7 +28,6 @@
2928
from openai.types.chat import (
3029
ChatCompletion,
3130
ChatCompletionChunk,
32-
ChatCompletionDeveloperMessageParam,
3331
ChatCompletionMessageParam,
3432
ChatCompletionReasoningEffort,
3533
ChatCompletionToolParam,
@@ -141,23 +139,14 @@ def from_completion_usage(cls, usage: CompletionUsage) -> "TokenUsageProps":
141139
# https://learn.microsoft.com/azure/ai-services/openai/how-to/reasoning
142140
@dataclass
143141
class GPTReasoningModelSupport:
144-
reasoning_effort: bool
145-
tools: bool
146-
system_messages: bool
147142
streaming: bool
148143

149144

150145
class Approach(ABC):
151146
# List of GPT reasoning models support
152147
GPT_REASONING_MODELS = {
153-
"o1": GPTReasoningModelSupport(reasoning_effort=True, tools=True, system_messages=True, streaming=False),
154-
"o1-preview": GPTReasoningModelSupport(
155-
reasoning_effort=False, tools=False, system_messages=False, streaming=False
156-
),
157-
"o1-mini": GPTReasoningModelSupport(
158-
reasoning_effort=False, tools=False, system_messages=False, streaming=False
159-
),
160-
"o3-mini": GPTReasoningModelSupport(reasoning_effort=True, tools=True, system_messages=True, streaming=True),
148+
"o1": GPTReasoningModelSupport(streaming=False),
149+
"o3-mini": GPTReasoningModelSupport(streaming=True),
161150
}
162151
# Set a higher token limit for GPT reasoning models
163152
RESPONSE_DEFAULT_TOKEN_LIMIT = 1024
@@ -379,39 +368,28 @@ def create_chat_completion(
379368
if chatgpt_model in self.GPT_REASONING_MODELS:
380369
params: Dict[str, Any] = {
381370
# max_tokens is not supported
382-
"max_completion_tokens": response_token_limit,
371+
"max_completion_tokens": response_token_limit
383372
}
384373

385374
# Adjust parameters for reasoning models
386375
supported_features = self.GPT_REASONING_MODELS[chatgpt_model]
387376
if supported_features.streaming and should_stream:
388377
params["stream"] = True
389378
params["stream_options"] = {"include_usage": True}
390-
if supported_features.tools:
391-
params["tools"] = tools
392-
if supported_features.reasoning_effort:
393-
params["reasoning_effort"] = (
394-
reasoning_effort or overrides.get("reasoning_effort") or self.reasoning_effort
395-
)
396-
397-
# For reasoning models that don't support system messages - migrate to developer messages
398-
# https://learn.microsoft.com/azure/ai-services/openai/how-to/reasoning?tabs=python-secure#developer-messages
399-
if not supported_features.system_messages:
400-
messages = copy.deepcopy(messages)
401-
developer_message = cast(ChatCompletionDeveloperMessageParam, messages[0])
402-
developer_message["role"] = "developer"
379+
params["reasoning_effort"] = reasoning_effort or overrides.get("reasoning_effort") or self.reasoning_effort
403380

404381
else:
405382
# Include parameters that may not be supported for reasoning models
406383
params = {
407384
"max_tokens": response_token_limit,
408385
"temperature": temperature or overrides.get("temperature", 0.3),
409-
"tools": tools,
410386
}
411387
if should_stream:
412388
params["stream"] = True
413389
params["stream_options"] = {"include_usage": True}
414390

391+
params["tools"] = tools
392+
415393
# Azure OpenAI takes the deployment name as the model name
416394
return self.openai_client.chat.completions.create(
417395
model=chatgpt_deployment if chatgpt_deployment else chatgpt_model,
@@ -435,7 +413,7 @@ def create_generate_thought_step(
435413
if deployment:
436414
properties["deployment"] = deployment
437415
# Only add reasoning_effort setting if the model supports it
438-
if (supported_features := self.GPT_REASONING_MODELS.get(model)) and supported_features.reasoning_effort:
416+
if model in self.GPT_REASONING_MODELS:
439417
properties["reasoning_effort"] = reasoning_effort or overrides.get(
440418
"reasoning_effort", self.reasoning_effort
441419
)

app/frontend/src/components/AnalysisPanel/AnalysisPanel.module.css

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -104,16 +104,33 @@
104104
min-width: max-content;
105105
}
106106

107+
/* Adjust tokenLabel to allow bar-specific text color overrides */
107108
.tokenLabel {
108-
color: #333232;
109109
padding-right: 4px;
110-
white-space: nowrap;
111-
overflow: visible;
112-
text-overflow: unset;
113110
}
114111

115112
.primaryBarContainer {
116113
width: fit-content;
117114
display: flex;
118115
gap: 4px;
119116
}
117+
118+
.promptBar {
119+
background-color: #A82424;
120+
color: #FFFFFF; /* White text for contrast */
121+
}
122+
123+
.reasoningBar {
124+
background-color: #265E29;
125+
color: #FFFFFF;
126+
}
127+
128+
.outputBar {
129+
background-color: #12579B;
130+
color: #FFFFFF;
131+
}
132+
133+
.totalBar {
134+
background-color: #424242;
135+
color: #FFFFFF;
136+
}

app/frontend/src/components/AnalysisPanel/TokenUsageGraph.tsx

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,20 +23,20 @@ export const TokenUsageGraph: React.FC<TokenUsageGraphProps> = ({ tokenUsage, re
2323
<div className={styles.tokenUsageGraph}>
2424
<div className={styles.header}>Token Usage</div>
2525
<div className={styles.primaryBarContainer} style={{ width: "100%" }}>
26-
<div className={styles.tokenBar} style={{ width: calcPercent(prompt_tokens) }}>
26+
<div className={`${styles.tokenBar} ${styles.promptBar}`} style={{ width: calcPercent(prompt_tokens) }}>
2727
<span className={styles.tokenLabel}>Prompt: {prompt_tokens}</span>
2828
</div>
2929
{reasoningEffort != null && reasoningEffort !== "" && (
30-
<div className={styles.tokenBar} style={{ width: calcPercent(reasoning_tokens) }}>
30+
<div className={`${styles.tokenBar} ${styles.reasoningBar}`} style={{ width: calcPercent(reasoning_tokens) }}>
3131
<span className={styles.tokenLabel}>Reasoning: {reasoning_tokens}</span>
3232
</div>
3333
)}
34-
<div className={styles.tokenBar} style={{ width: calcPercent(completion_tokens - reasoning_tokens) }}>
34+
<div className={`${styles.tokenBar} ${styles.outputBar}`} style={{ width: calcPercent(completion_tokens - reasoning_tokens) }}>
3535
<span className={styles.tokenLabel}>Output: {completion_tokens - reasoning_tokens}</span>
3636
</div>
3737
</div>
3838

39-
<div className={styles.tokenBar} style={{ width: calcPercent(total_tokens) }}>
39+
<div className={`${styles.tokenBar} ${styles.totalBar}`} style={{ width: calcPercent(total_tokens) }}>
4040
<span className={styles.tokenLabel}>Total: {total_tokens}</span>
4141
</div>
4242
</div>

tests/conftest.py

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import json
22
import os
3-
from typing import IO, Dict
3+
from typing import IO, Any, Dict
44
from unittest import mock
55

66
import aiohttp
@@ -107,21 +107,8 @@ def patch(openai_client):
107107

108108
@pytest.fixture
109109
def mock_openai_chatcompletion(monkeypatch):
110-
reasoning = os.getenv("TEST_ENABLE_REASONING") is not None
111-
completion_usage: Dict[str, any] = {
112-
"completion_tokens": 896,
113-
"prompt_tokens": 23,
114-
"total_tokens": 919,
115-
"completion_tokens_details": {
116-
"accepted_prediction_tokens": 0,
117-
"audio_tokens": 0,
118-
"reasoning_tokens": 384 if reasoning else 0,
119-
"rejected_prediction_tokens": 0,
120-
},
121-
}
122-
123110
class AsyncChatCompletionIterator:
124-
def __init__(self, answer: str):
111+
def __init__(self, answer: str, reasoning: bool, usage: Dict[str, Any]):
125112
chunk_id = "test-id"
126113
model = "gpt-4o-mini" if not reasoning else "o3-mini"
127114
self.responses = [
@@ -190,7 +177,7 @@ def __init__(self, answer: str):
190177
"id": chunk_id,
191178
"model": model,
192179
"created": 1,
193-
"usage": completion_usage,
180+
"usage": usage,
194181
}
195182
)
196183

@@ -208,6 +195,19 @@ async def mock_acreate(*args, **kwargs):
208195
assert kwargs.get("seed") is None or kwargs.get("seed") == 42
209196

210197
messages = kwargs["messages"]
198+
model = kwargs["model"]
199+
reasoning = model == "o3-mini"
200+
completion_usage: Dict[str, any] = {
201+
"completion_tokens": 896,
202+
"prompt_tokens": 23,
203+
"total_tokens": 919,
204+
"completion_tokens_details": {
205+
"accepted_prediction_tokens": 0,
206+
"audio_tokens": 0,
207+
"reasoning_tokens": 384 if reasoning else 0,
208+
"rejected_prediction_tokens": 0,
209+
},
210+
}
211211
last_question = messages[-1]["content"]
212212
if last_question == "Generate search query for: What is the capital of France?":
213213
answer = "capital of France"
@@ -220,7 +220,7 @@ async def mock_acreate(*args, **kwargs):
220220
if messages[0]["content"].find("Generate 3 very brief follow-up questions") > -1:
221221
answer = "The capital of France is Paris. [Benefit_Options-2.pdf]. <<What is the capital of Spain?>>"
222222
if "stream" in kwargs and kwargs["stream"] is True:
223-
return AsyncChatCompletionIterator(answer)
223+
return AsyncChatCompletionIterator(answer, reasoning, completion_usage)
224224
else:
225225
return ChatCompletion(
226226
object="chat.completion",
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
{"delta": {"role": "assistant"}, "context": {"data_points": {"text": ["Benefit_Options-2.pdf: There is a whistleblower policy."], "images": null}, "thoughts": [{"title": "Prompt to generate search query", "description": [{"role": "system", "content": "Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base.\nYou have access to Azure AI Search index with 100's of documents.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g. info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0."}, {"role": "user", "content": "How did crypto do last year?"}, {"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"}, {"role": "user", "content": "What are my health plans?"}, {"role": "assistant", "content": "Show available health plans"}, {"role": "user", "content": "Generate search query for: What is the capital of France?"}], "props": {"model": "o3-mini", "deployment": "o3-mini", "reasoning_effort": "low", "token_usage": {"prompt_tokens": 23, "completion_tokens": 896, "reasoning_tokens": 384, "total_tokens": 919}}}, {"title": "Search using generated search query", "description": "capital of France", "props": {"use_semantic_captions": false, "use_semantic_ranker": false, "use_query_rewriting": false, "top": 3, "filter": null, "use_vector_search": false, "use_text_search": true}}, {"title": "Search results", "description": [{"id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2", "content": "There is a whistleblower policy.", "embedding": null, "imageEmbedding": null, "category": null, "sourcepage": "Benefit_Options-2.pdf", "sourcefile": "Benefit_Options.pdf", "oids": null, "groups": null, "captions": [{"additional_properties": {}, "text": "Caption: A whistleblower policy.", "highlights": []}], "score": 0.03279569745063782, "reranker_score": 3.4577205181121826}], "props": null}, {"title": "Prompt to generate answer", "description": [{"role": "system", "content": "Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.\nAnswer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.\nIf the question is not in English, answer in the language used in the question.\nEach source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]."}, {"role": "user", "content": "What is the capital of France?\n\nSources:\n\nBenefit_Options-2.pdf: There is a whistleblower policy."}], "props": {"model": "o3-mini", "deployment": "o3-mini", "reasoning_effort": null}}], "followup_questions": null}, "session_state": null}
2+
{"delta": {"content": null, "role": "assistant"}}
3+
{"delta": {"content": "The capital of France is Paris. [Benefit_Options-2.pdf].", "role": null}}
4+
{"delta": {"role": "assistant"}, "context": {"data_points": {"text": ["Benefit_Options-2.pdf: There is a whistleblower policy."], "images": null}, "thoughts": [{"title": "Prompt to generate search query", "description": [{"role": "system", "content": "Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base.\nYou have access to Azure AI Search index with 100's of documents.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g. info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0."}, {"role": "user", "content": "How did crypto do last year?"}, {"role": "assistant", "content": "Summarize Cryptocurrency Market Dynamics from last year"}, {"role": "user", "content": "What are my health plans?"}, {"role": "assistant", "content": "Show available health plans"}, {"role": "user", "content": "Generate search query for: What is the capital of France?"}], "props": {"model": "o3-mini", "deployment": "o3-mini", "reasoning_effort": "low", "token_usage": {"prompt_tokens": 23, "completion_tokens": 896, "reasoning_tokens": 384, "total_tokens": 919}}}, {"title": "Search using generated search query", "description": "capital of France", "props": {"use_semantic_captions": false, "use_semantic_ranker": false, "use_query_rewriting": false, "top": 3, "filter": null, "use_vector_search": false, "use_text_search": true}}, {"title": "Search results", "description": [{"id": "file-Benefit_Options_pdf-42656E656669745F4F7074696F6E732E706466-page-2", "content": "There is a whistleblower policy.", "embedding": null, "imageEmbedding": null, "category": null, "sourcepage": "Benefit_Options-2.pdf", "sourcefile": "Benefit_Options.pdf", "oids": null, "groups": null, "captions": [{"additional_properties": {}, "text": "Caption: A whistleblower policy.", "highlights": []}], "score": 0.03279569745063782, "reranker_score": 3.4577205181121826}], "props": null}, {"title": "Prompt to generate answer", "description": [{"role": "system", "content": "Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.\nAnswer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.\nIf the question is not in English, answer in the language used in the question.\nEach source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf]."}, {"role": "user", "content": "What is the capital of France?\n\nSources:\n\nBenefit_Options-2.pdf: There is a whistleblower policy."}], "props": {"model": "o3-mini", "deployment": "o3-mini", "reasoning_effort": null, "token_usage": {"prompt_tokens": 23, "completion_tokens": 896, "reasoning_tokens": 384, "total_tokens": 919}}}], "followup_questions": null}, "session_state": null}

0 commit comments

Comments
 (0)