Skip to content

Commit 9aca5a0

Browse files
author
sd109
committed
Run code formatter
1 parent c5d19a5 commit 9aca5a0

File tree

1 file changed

+12
-6
lines changed

1 file changed

+12
-6
lines changed

chart/web-app/app.py

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,12 @@
2828
# Detecting this ahead of time is difficult so for now we use a global variable which stores whether the API has
2929
# responded with a HTTP 400 error and formats all subsequent request to avoid using a system role.
3030
INCLUDE_SYSTEM_PROMPT = True
31+
32+
3133
class PossibleSystemPromptException(Exception):
3234
pass
3335

36+
3437
llm = ChatOpenAI(
3538
base_url=urljoin(backend_url, "v1"),
3639
model=settings.hf_model_name,
@@ -47,7 +50,6 @@ class PossibleSystemPromptException(Exception):
4750

4851

4952
def inference(latest_message, history):
50-
5153
# Allow mutating global variable
5254
global BACKEND_INITIALISED
5355

@@ -67,10 +69,9 @@ def inference(latest_message, history):
6769

6870
response = ""
6971
for chunk in llm.stream(context):
70-
7172
# If this is our first successful response from the backend
7273
# then update the status variable to allow future error messages
73-
# to be more informative
74+
# to be more informative
7475
if not BACKEND_INITIALISED and len(response) > 0:
7576
BACKEND_INITIALISED = True
7677

@@ -87,7 +88,7 @@ def inference(latest_message, history):
8788

8889
except openai.BadRequestError as err:
8990
logger.error("Received BadRequestError from backend API: %s", err)
90-
message = err.response.json()['message']
91+
message = err.response.json()["message"]
9192
if INCLUDE_SYSTEM_PROMPT:
9293
raise PossibleSystemPromptException()
9394
else:
@@ -98,13 +99,17 @@ def inference(latest_message, history):
9899
except openai.APIConnectionError as err:
99100
if not BACKEND_INITIALISED:
100101
logger.info("Backend API not yet ready")
101-
gr.Info("Backend not ready - model may still be initialising - please try again later")
102+
gr.Info(
103+
"Backend not ready - model may still be initialising - please try again later"
104+
)
102105
else:
103106
logger.error("Failed to connect to backend API: %s", err)
104107
gr.Warning("Failed to connect to backend API")
105108

106109
except openai.InternalServerError as err:
107-
gr.Warning("Internal server error encountered in backend API - see API logs for details.")
110+
gr.Warning(
111+
"Internal server error encountered in backend API - see API logs for details."
112+
)
108113

109114
# Catch-all for unexpected exceptions
110115
except err:
@@ -145,6 +150,7 @@ def inference_wrapper(*args):
145150
for chunk in inference(*args):
146151
yield chunk
147152

153+
148154
# Build main chat interface
149155
with gr.ChatInterface(
150156
inference_wrapper,

0 commit comments

Comments
 (0)