Skip to content

Commit 3cda717

Browse files
author
sd109
committed
Clean up comments
1 parent 14e2887 commit 3cda717

File tree

1 file changed

+9
-7
lines changed

1 file changed

+9
-7
lines changed

chart/web-app/app.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
# Some models disallow 'system' role's their conversation history by raising errors in their chat prompt template, e.g. see
2727
# https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/blob/cf47bb3e18fe41a5351bc36eef76e9c900847c89/tokenizer_config.json#L42
2828
# Detecting this ahead of time is difficult so for now we use a global variable which stores whether the API has
29-
# responded with a HTTP 400 error and retry request without system role replaced by
29+
# responded with a HTTP 400 error and formats all subsequent request to avoid using a system role.
3030
INCLUDE_SYSTEM_PROMPT = True
3131
class PossibleSystemPromptException(Exception):
3232
pass
@@ -48,12 +48,10 @@ class PossibleSystemPromptException(Exception):
4848

4949
def inference(latest_message, history):
5050

51-
# Allow mutating global variables
52-
global BACKEND_INITIALISED, INCLUDE_SYSTEM_PROMPT
51+
# Allow mutating global variable
52+
global BACKEND_INITIALISED
5353

5454
try:
55-
# Attempt to handle models which disallow system prompts
56-
# Construct conversation history for model prompt
5755
if INCLUDE_SYSTEM_PROMPT:
5856
context = [SystemMessage(content=settings.hf_model_instruction)]
5957
else:
@@ -84,6 +82,9 @@ def inference(latest_message, history):
8482
response += chunk.content
8583
yield response
8684

85+
# Handle any API errors here. See OpenAI Python client for possible error responses
86+
# https://github.com/openai/openai-python/tree/e8e5a0dc7ccf2db19d7f81991ee0987f9c3ae375?tab=readme-ov-file#handling-errors
87+
8788
except openai.BadRequestError as err:
8889
logger.error("Received BadRequestError from backend API: %s", err)
8990
message = err.response.json()['message']
@@ -131,14 +132,15 @@ def inference(latest_message, history):
131132
def inference_wrapper(*args):
132133
"""
133134
Simple wrapper round the `inference` function which catches certain predictable errors
134-
such as invalid prompty formats and attempts to mitigate them automatically.
135+
such as invalid prompt formats and attempts to mitigate them automatically.
135136
"""
137+
# Allow mutating global variable
138+
global INCLUDE_SYSTEM_PROMPT
136139
try:
137140
for chunk in inference(*args):
138141
yield chunk
139142
except PossibleSystemPromptException:
140143
logger.warning("Disabling system prompt and retrying previous request")
141-
global INCLUDE_SYSTEM_PROMPT
142144
INCLUDE_SYSTEM_PROMPT = False
143145
for chunk in inference(*args):
144146
yield chunk

0 commit comments

Comments
 (0)