Skip to content

Commit b04678f

Browse files
committed
ruff
1 parent 1d131be commit b04678f

File tree

1 file changed

+16
-20
lines changed

1 file changed

+16
-20
lines changed

packages/sample-app/sample_app/guardrail_medical_chat_example.py

Lines changed: 16 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -6,13 +6,13 @@
66
from traceloop.sdk.evaluator import EvaluatorMadeByTraceloop
77

88

9-
Traceloop.init(
10-
app_name="medical-chat-example"
11-
)
9+
Traceloop.init(app_name="medical-chat-example")
1210

1311
api_key = os.getenv("OPENAI_API_KEY")
1412
if not api_key:
15-
raise ValueError("OPENAI_API_KEY environment variable is required. Please set it before running this example.")
13+
raise ValueError(
14+
"OPENAI_API_KEY environment variable is required. Please set it before running this example."
15+
)
1616

1717
client = AsyncOpenAI(api_key=api_key)
1818

@@ -31,16 +31,16 @@ def handle_medical_evaluation(evaluator_result, original_result):
3131
"""
3232
if not evaluator_result.success:
3333
# Return a modified dict with error message
34-
print(f"handle_medical_evaluation was activated - evaluator_result: {evaluator_result}")
35-
return {
36-
"text": "There is an issue with the request. Please try again."
37-
}
34+
print(
35+
f"handle_medical_evaluation was activated - evaluator_result: {evaluator_result}"
36+
)
37+
return {"text": "There is an issue with the request. Please try again."}
3838
return original_result
3939

4040

4141
@guardrail(
4242
evaluator=EvaluatorMadeByTraceloop.pii_detector(probability_threshold=0.8),
43-
on_evaluation_complete=handle_medical_evaluation
43+
on_evaluation_complete=handle_medical_evaluation,
4444
)
4545
async def get_doctor_response_with_pii_check(patient_message: str) -> dict:
4646
"""Get a doctor's response with PII detection guardrail and custom callback."""
@@ -73,15 +73,13 @@ async def get_doctor_response_with_pii_check(patient_message: str) -> dict:
7373
model="gpt-4o",
7474
messages=[
7575
{"role": "system", "content": system_prompt},
76-
{"role": "user", "content": patient_message}
76+
{"role": "user", "content": patient_message},
7777
],
7878
max_tokens=500,
79-
temperature=0
79+
temperature=0,
8080
)
8181

82-
return {
83-
"text": response.choices[0].message.content
84-
}
82+
return {"text": response.choices[0].message.content}
8583

8684

8785
# Main function using the simple example
@@ -98,16 +96,14 @@ async def get_doctor_response(patient_message: str) -> dict:
9896
model="gpt-4o",
9997
messages=[
10098
{"role": "system", "content": system_prompt},
101-
{"role": "user", "content": patient_message}
99+
{"role": "user", "content": patient_message},
102100
],
103101
max_tokens=500,
104-
temperature=0
102+
temperature=0,
105103
)
106104

107105
# Return dict with 'text' field
108-
return {
109-
"text": response.choices[0].message.content
110-
}
106+
return {"text": response.choices[0].message.content}
111107

112108

113109
async def medical_chat_session():
@@ -122,7 +118,7 @@ async def medical_chat_session():
122118
try:
123119
patient_input = input("Patient: ").strip()
124120

125-
if patient_input.lower() in ['quit', 'exit', 'q']:
121+
if patient_input.lower() in ["quit", "exit", "q"]:
126122
print("\n👋 Thank you for using the medical chat. Take care!")
127123
break
128124

0 commit comments

Comments
 (0)