|
| 1 | +import asyncio |
| 2 | +import os |
| 3 | +from openai import AsyncOpenAI |
| 4 | +from traceloop.sdk import Traceloop |
| 5 | +from traceloop.sdk.guardrails.guardrails import guardrail |
| 6 | +from traceloop.sdk.evaluator import EvaluatorMadeByTraceloop |
| 7 | + |
| 8 | + |
| 9 | +Traceloop.init( |
| 10 | + app_name="medical-chat-example" |
| 11 | +) |
| 12 | + |
| 13 | +api_key = os.getenv("OPENAI_API_KEY") |
| 14 | +if not api_key: |
| 15 | + raise ValueError("OPENAI_API_KEY environment variable is required. Please set it before running this example.") |
| 16 | + |
| 17 | +client = AsyncOpenAI(api_key=api_key) |
| 18 | + |
| 19 | + |
| 20 | +# Custom callback function to handle evaluation results |
| 21 | +def handle_medical_evaluation(evaluator_result, original_result): |
| 22 | + """ |
| 23 | + Custom handler for medical advice evaluation. |
| 24 | +
|
| 25 | + Args: |
| 26 | + evaluator_result: The evaluation result with 'success' and 'reason' fields |
| 27 | + original_result: The original AI response dict (e.g., {"text": "..."}) |
| 28 | +
|
| 29 | + Returns: |
| 30 | + Either the original result dict or a modified version |
| 31 | + """ |
| 32 | + if not evaluator_result.success: |
| 33 | + # Return a modified dict with error message |
| 34 | + print(f"handle_medical_evaluation was activated - evaluator_result: {evaluator_result}") |
| 35 | + return { |
| 36 | + "text": "There is an issue with the request. Please try again." |
| 37 | + } |
| 38 | + return original_result |
| 39 | + |
| 40 | + |
| 41 | +@guardrail( |
| 42 | + evaluator=EvaluatorMadeByTraceloop.pii_detector(probability_threshold=0.8), |
| 43 | + on_evaluation_complete=handle_medical_evaluation |
| 44 | +) |
| 45 | +async def get_doctor_response_with_pii_check(patient_message: str) -> dict: |
| 46 | + """Get a doctor's response with PII detection guardrail and custom callback.""" |
| 47 | + |
| 48 | + system_prompt = """You are a medical AI assistant. Provide helpful, |
| 49 | + general medical information and advice while being clear about your limitations. |
| 50 | + Always recommend consulting with qualified healthcare providers for proper diagnosis and treatment. |
| 51 | + Be empathetic and professional in your responses.""" |
| 52 | + # This is the system prompt for the personal information case |
| 53 | + personal_info_system_prompt = """You are a medical AI assistant that provides helpful, general medical information # noqa: F841 |
| 54 | + tailored to the individual user. |
| 55 | +
|
| 56 | + When personal information is available (such as age, sex, symptoms, medical history, |
| 57 | + lifestyle, medications, or concerns), actively incorporate it into your responses |
| 58 | + to make guidance more relevant and personalized. |
| 59 | +
|
| 60 | + Adapt explanations, examples, and recommendations to the user’s context whenever possible. |
| 61 | + If key personal details are missing, ask concise and relevant follow-up questions |
| 62 | + before giving advice. |
| 63 | +
|
| 64 | + Be clear about your limitations as an AI and do not provide diagnoses or definitive |
| 65 | + treatment plans. Always encourage consultation with qualified healthcare professionals |
| 66 | + for diagnosis, treatment, or urgent concerns. |
| 67 | +
|
| 68 | + Maintain a professional, empathetic, and supportive tone. |
| 69 | + Avoid assumptions, respect privacy, and clearly distinguish general information |
| 70 | + from personalized considerations.""" |
| 71 | + |
| 72 | + response = await client.chat.completions.create( |
| 73 | + model="gpt-4o", |
| 74 | + messages=[ |
| 75 | + {"role": "system", "content": system_prompt}, |
| 76 | + {"role": "user", "content": patient_message} |
| 77 | + ], |
| 78 | + max_tokens=500, |
| 79 | + temperature=0 |
| 80 | + ) |
| 81 | + |
| 82 | + return { |
| 83 | + "text": response.choices[0].message.content |
| 84 | + } |
| 85 | + |
| 86 | + |
| 87 | +# Main function using the simple example |
| 88 | +@guardrail(evaluator="medicaladvicegiven") |
| 89 | +async def get_doctor_response(patient_message: str) -> dict: |
| 90 | + """Get a doctor's response to patient input using GPT-4o.""" |
| 91 | + |
| 92 | + system_prompt = """You are a medical AI assistant. Provide helpful, |
| 93 | + general medical information and advice while being clear about your limitations. |
| 94 | + Always recommend consulting with qualified healthcare providers for proper diagnosis and treatment. |
| 95 | + Be empathetic and professional in your responses.""" |
| 96 | + |
| 97 | + response = await client.chat.completions.create( |
| 98 | + model="gpt-4o", |
| 99 | + messages=[ |
| 100 | + {"role": "system", "content": system_prompt}, |
| 101 | + {"role": "user", "content": patient_message} |
| 102 | + ], |
| 103 | + max_tokens=500, |
| 104 | + temperature=0 |
| 105 | + ) |
| 106 | + |
| 107 | + # Return dict with 'text' field |
| 108 | + return { |
| 109 | + "text": response.choices[0].message.content |
| 110 | + } |
| 111 | + |
| 112 | + |
| 113 | +async def medical_chat_session(): |
| 114 | + """Run an interactive medical chat session.""" |
| 115 | + print("🏥 Welcome to the Medical Chat") |
| 116 | + print("=" * 50) |
| 117 | + print("This example simulates a conversation between a patient and a doctor.") |
| 118 | + print("The doctor's responses are processed through guardrails to ensure safety.") |
| 119 | + print("Type 'quit' to exit the chat.\n") |
| 120 | + |
| 121 | + while True: |
| 122 | + try: |
| 123 | + patient_input = input("Patient: ").strip() |
| 124 | + |
| 125 | + if patient_input.lower() in ['quit', 'exit', 'q']: |
| 126 | + print("\n👋 Thank you for using the medical chat. Take care!") |
| 127 | + break |
| 128 | + |
| 129 | + if not patient_input: |
| 130 | + print("Please enter your symptoms or medical concern.") |
| 131 | + continue |
| 132 | + |
| 133 | + print("\n🤖 Processing your request through the medical AI system...\n") |
| 134 | + |
| 135 | + # Get the doctor's response with guardrails applied |
| 136 | + doctor_response = await get_doctor_response_with_pii_check(patient_input) |
| 137 | + |
| 138 | + # Extract text from the response dict |
| 139 | + response_text = doctor_response.get("text", str(doctor_response)) |
| 140 | + print(f"👨⚕️ Doctor response: {response_text}") |
| 141 | + |
| 142 | + print("-" * 50) |
| 143 | + |
| 144 | + except KeyboardInterrupt: |
| 145 | + print("\n\n👋 Chat session interrupted. Goodbye!") |
| 146 | + break |
| 147 | + except Exception as e: |
| 148 | + print(f"\n❌ An error occurred: {e}") |
| 149 | + print("Please try again or type 'quit' to exit.") |
| 150 | + |
| 151 | + |
| 152 | +async def main(): |
| 153 | + """Main function to run the medical chat example.""" |
| 154 | + await medical_chat_session() |
| 155 | + |
| 156 | + |
| 157 | +if __name__ == "__main__": |
| 158 | + asyncio.run(main()) |
0 commit comments