Skip to content

Commit 1beee32

Browse files
authored
Merge pull request #59 from stackhpc/feat/structlog
Switch to structlog for web app logging
2 parents 7cb2d79 + 11e7bfd commit 1beee32

File tree

8 files changed

+53
-38
lines changed

8 files changed

+53
-38
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ __pycache__/
55
**/*.secret
66
.DS_Store
77
.tox/
8+
**/.ruff_cache
89

910
# Ignore local dev helpers
1011
test-values.y[a]ml

charts/azimuth-llm/values.schema.json

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,9 +118,7 @@
118118
}
119119
},
120120
"required": [
121-
"model_name",
122-
"model_instruction"
123-
]
121+
"model_name" ]
124122
}
125123
}
126124
}

charts/azimuth-llm/values.yaml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,6 @@ ui:
9292
# available configuration options.
9393
appSettings:
9494
model_name: *model-name
95-
model_instruction: |
96-
You are a helpful AI assistant. Please respond appropriately.
9795
# Use local system fonts by default to avoid GDPR issues
9896
# with Gradio's defaults fonts which require fetching from
9997
# the Google fonts API. To restore default Gradio theme

web-apps/chat/app.py

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,14 @@
1-
import logging
21
import openai
3-
2+
import utils
43
import gradio as gr
54

65
from urllib.parse import urljoin
76
from langchain.schema import HumanMessage, AIMessage, SystemMessage
87
from langchain_openai import ChatOpenAI
9-
from typing import Dict, List
8+
from typing import Dict
109
from pydantic import BaseModel, ConfigDict
11-
from utils import LLMParams, load_settings
1210

13-
logging.basicConfig()
14-
logger = logging.getLogger(__name__)
15-
logger.setLevel(logging.INFO)
11+
log = utils.get_logger()
1612

1713

1814
class AppSettings(BaseModel):
@@ -22,7 +18,7 @@ class AppSettings(BaseModel):
2218
model_name: str
2319
model_instruction: str
2420
page_title: str
25-
llm_params: LLMParams
21+
llm_params: utils.LLMParams
2622
# Theme customisation
2723
theme_params: Dict[str, str | list]
2824
theme_params_extended: Dict[str, str]
@@ -32,8 +28,8 @@ class AppSettings(BaseModel):
3228
model_config = ConfigDict(protected_namespaces=(), extra="forbid")
3329

3430

35-
settings = AppSettings(**load_settings())
36-
logger.info(settings)
31+
settings = AppSettings(**utils.load_settings())
32+
log.info(settings)
3733

3834
backend_url = str(settings.backend_url)
3935
backend_health_endpoint = urljoin(backend_url, "/health")
@@ -82,7 +78,7 @@ def inference(latest_message, history):
8278
context.append(HumanMessage(content=human))
8379
context.append(AIMessage(content=(ai or "")))
8480
context.append(HumanMessage(content=latest_message))
85-
logger.debug("Chat context: %s", context)
81+
log.debug("Chat context: %s", context)
8682

8783
response = ""
8884
for chunk in llm.stream(context):
@@ -104,7 +100,7 @@ def inference(latest_message, history):
104100
# https://github.com/openai/openai-python/tree/e8e5a0dc7ccf2db19d7f81991ee0987f9c3ae375?tab=readme-ov-file#handling-errors
105101

106102
except openai.BadRequestError as err:
107-
logger.error("Received BadRequestError from backend API: %s", err)
103+
log.error("Received BadRequestError from backend API: %s", err)
108104
message = err.response.json()["message"]
109105
if INCLUDE_SYSTEM_PROMPT:
110106
raise PossibleSystemPromptException()
@@ -115,12 +111,12 @@ def inference(latest_message, history):
115111

116112
except openai.APIConnectionError as err:
117113
if not BACKEND_INITIALISED:
118-
logger.info("Backend API not yet ready")
114+
log.info("Backend API not yet ready")
119115
gr.Info(
120116
"Backend not ready - model may still be initialising - please try again later."
121117
)
122118
else:
123-
logger.error("Failed to connect to backend API: %s", err)
119+
log.error("Failed to connect to backend API: %s", err)
124120
gr.Warning("Failed to connect to backend API.")
125121

126122
except openai.InternalServerError as err:
@@ -130,7 +126,7 @@ def inference(latest_message, history):
130126

131127
# Catch-all for unexpected exceptions
132128
except Exception as err:
133-
logger.error("Unexpected error during inference: %s", err)
129+
log.error("Unexpected error during inference: %s", err)
134130
raise gr.Error("Unexpected error encountered - see logs for details.")
135131

136132

@@ -150,7 +146,7 @@ def inference_wrapper(*args):
150146
for chunk in inference(*args):
151147
yield chunk
152148
except PossibleSystemPromptException:
153-
logger.warning("Disabling system prompt and retrying previous request")
149+
log.warning("Disabling system prompt and retrying previous request")
154150
INCLUDE_SYSTEM_PROMPT = False
155151
for chunk in inference(*args):
156152
yield chunk
@@ -179,7 +175,7 @@ def inference_wrapper(*args):
179175
css=settings.css_overrides,
180176
js=settings.custom_javascript,
181177
)
182-
logger.debug("Gradio chat interface config: %s", app.config)
178+
log.debug("Gradio chat interface config: %s", app.config)
183179
app.queue(
184180
default_concurrency_limit=10,
185181
).launch(server_name=settings.host_address)

web-apps/chat/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,5 @@ openai
44
langchain
55
langchain_openai
66
pydantic
7+
structlog
78
../utils

web-apps/image-analysis/app.py

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,17 @@
11
import base64
2+
import gradio as gr
23
import logging
34
import requests
4-
5-
import gradio as gr
5+
import utils
66

77
from typing import List, Dict
88
from io import BytesIO
99
from PIL import Image
1010
from pydantic import BaseModel, ConfigDict
1111
from urllib.parse import urljoin
1212

13-
from utils import load_settings, LLMParams
1413

15-
logging.basicConfig()
16-
logger = logging.getLogger(__name__)
17-
logger.setLevel(logging.INFO)
14+
log = utils.get_logger()
1815

1916

2017
class PromptExample(BaseModel):
@@ -30,7 +27,7 @@ class AppSettings(BaseModel):
3027
page_title: str
3128
page_description: str
3229
examples: List[PromptExample]
33-
llm_params: LLMParams | None
30+
llm_params: utils.LLMParams | None
3431
# Theme customisation
3532
theme_params: Dict[str, str | list]
3633
theme_params_extended: Dict[str, str]
@@ -40,8 +37,8 @@ class AppSettings(BaseModel):
4037
model_config = ConfigDict(protected_namespaces=(), extra="forbid")
4138

4239

43-
settings = AppSettings(**load_settings())
44-
logger.info(settings)
40+
settings = AppSettings(**utils.load_settings())
41+
log.info(settings)
4542

4643

4744
# TODO: Rewrite this to stream output?
@@ -78,6 +75,7 @@ def analyze_image(image_url, prompt):
7875
payload["extra_body"] = {
7976
"top_k": settings.llm_params.top_k,
8077
}
78+
log.debug("Request payload: %s", payload)
8179

8280
# Make the API call to the vision model
8381
headers = {"Content-Type": "application/json"}
@@ -86,7 +84,16 @@ def analyze_image(image_url, prompt):
8684
json=payload,
8785
headers=headers,
8886
)
89-
response.raise_for_status()
87+
log.debug("Request payload: %s", payload)
88+
try:
89+
response.raise_for_status()
90+
except Exception as e:
91+
log.debug(
92+
"Received HTTP %s response with content: %s",
93+
response.status_code,
94+
response.json(),
95+
)
96+
raise e
9097

9198
# Extract and return the model's response
9299
result = response.json()

web-apps/image-analysis/requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,4 +3,5 @@ requests
33
gradio<5
44
gradio_client
55
pydantic
6+
structlog
67
../utils

web-apps/utils/utils.py

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,16 +3,29 @@
33
#####
44

55
import logging
6+
import os
67
import pathlib
8+
import structlog
79
import yaml
810
from typing import Annotated
911
from pydantic import BaseModel, ConfigDict, PositiveInt, Field
1012

11-
logging.basicConfig()
12-
logger = logging.getLogger(__name__)
13-
logger.setLevel(logging.INFO)
13+
LOG_LEVELS = {
14+
"debug": logging.DEBUG,
15+
"info": logging.INFO,
16+
"warn": logging.WARN,
17+
"error": logging.ERROR,
18+
}
1419

1520

21+
def get_logger():
22+
# Allow overwriting log level via env var
23+
log_level = LOG_LEVELS[os.environ.get("PYTHON_GRADIO_LOG_LEVEL", "info").lower()]
24+
structlog.configure(wrapper_class=structlog.make_filtering_bound_logger(log_level))
25+
return structlog.get_logger()
26+
27+
log = get_logger()
28+
1629
class LLMParams(BaseModel):
1730
"""
1831
Parameters for vLLM API requests. For details see
@@ -37,7 +50,7 @@ def get_k8s_namespace():
3750
try:
3851
current_k8s_namespace = open(NAMESPACE_FILE_PATH).read()
3952
return current_k8s_namespace
40-
except FileNotFoundError as err:
53+
except FileNotFoundError:
4154
return None
4255

4356

@@ -46,7 +59,7 @@ def api_address_in_cluster():
4659
if k8s_ns:
4760
return f"http://llm-backend.{k8s_ns}.svc"
4861
else:
49-
logger.warning(
62+
log.warning(
5063
"Failed to determine k8s namespace from %s - assuming non-kubernetes environment.",
5164
NAMESPACE_FILE_PATH,
5265
)
@@ -77,7 +90,7 @@ def load_settings() -> dict:
7790
# Sanity checks on settings
7891
unused_overrides = [k for k in overrides.keys() if k not in defaults.keys()]
7992
if unused_overrides:
80-
logger.warning(
93+
log.warning(
8194
f"Overrides {unused_overrides} not part of default settings so may be ignored."
8295
"Please check for typos"
8396
)

0 commit comments

Comments
 (0)