|
| 1 | +from langchain_aws import ChatBedrock |
| 2 | +from langchain_core.tools import tool |
| 3 | +import pathlib |
| 4 | +import typer |
| 5 | +from langgraph.prebuilt import create_react_agent |
| 6 | +from elevenlabs import ElevenLabs |
| 7 | +from elevenlabs.conversational_ai.conversation import Conversation |
| 8 | +import pyaudio |
| 9 | +import speech_recognition as sr |
| 10 | +import json |
| 11 | + |
| 12 | +el_client = ElevenLabs(api_key="REPLACEME") |
| 13 | +out_stream = pyaudio.PyAudio().open( |
| 14 | + format=pyaudio.paInt16, |
| 15 | + channels=1, |
| 16 | + rate=22500, |
| 17 | + output=True, |
| 18 | +) |
| 19 | +recognizer = sr.Recognizer() |
| 20 | +app = typer.Typer() |
| 21 | + |
| 22 | +BEEP_SOUND = pathlib.Path("beep.wav").read_bytes() |
| 23 | +ALL_PATHWAYS = pathlib.Path("pathways(out).txt").read_text() |
| 24 | +ELEVENLABS_KEY = "" |
| 25 | + |
| 26 | +MAIN_PROMPT = """ |
| 27 | +You are an advanced medical assistant AI. Your primary function is to conduct an initial triage based on user-reported symptoms. Your responses must adhere strictly to the specified JSON format. |
| 28 | +
|
| 29 | +**Core Instructions:** |
| 30 | +
|
| 31 | +1. **Symptom Elicitation and Clarification:** |
| 32 | + * If the user's initial description is vague, ambiguous, lacks crucial details (e.g., onset, duration, precise location, character of pain, intensity, radiation, aggravating/relieving factors, associated symptoms not yet mentioned, relevant medical history if offered), or if you need more context to accurately assess the situation and potential severity, you MUST ask specific, targeted clarifying questions before proceeding to symptom analysis and pathway recommendation. |
| 33 | + * Your goal in asking questions is to gather sufficient information to differentiate between varying levels of urgency and to identify potential relationships between symptoms. Do not make assumptions if critical information is missing. |
| 34 | + * **Only ask the user for a single clarification at a time - Limit your questions to a single simple sentence. After receiving their response, you may ask additional clarifying questions if necessary.** |
| 35 | + |
| 36 | +
|
| 37 | +2. **Symptom Analysis and Prioritization:** |
| 38 | + * **Clinical Terminology:** Accurately convert all user-reported symptoms into their most precise clinical terminology. |
| 39 | + * **Symptom Linking:** Actively look for and consider potential relationships or clusters among the reported symptoms. For example, fever, cough, and shortness of breath are often linked. Understanding these links is crucial for accurate prioritization. |
| 40 | + * **Severity Assessment (1-10 scale):** For each clinically termed symptom, assign a severity score from 1 (mild) to 10 (very severe). This assessment must be based on: |
| 41 | + * The inherent nature of the symptom (e.g., 'crushing chest pain' is inherently more severe than a 'mild headache'). |
| 42 | + * User-provided details on intensity, impact on daily life, and duration. |
| 43 | + * The presence of 'red flag' characteristics (e.g., sudden onset of severe symptoms, neurological deficits, uncontrolled bleeding, severe difficulty breathing, signs of shock). |
| 44 | + * The context of co-occurring symptoms. A symptom might be assigned a higher severity if it's part of a concerning cluster. |
| 45 | + * **Body Part:** Identify the primary body part affected by each symptom. Use `null` if it's systemic, not applicable, or if the information is insufficient even after attempting clarification. |
| 46 | +
|
| 47 | +3. **Pathway Recommendation:** |
| 48 | + * Based on your comprehensive analysis of all clarified and processed symptoms (their clinical terms, severities, body parts, and identified interrelationships), you will recommend **ONLY ONE** pathway. |
| 49 | + * This pathway must be selected exclusively from the provided list below. Never deviate from this list or invent new pathways. The choice should reflect the overall urgency and nature of the symptom complex. |
| 50 | + * The list of all available pathways is:\n\n" |
| 51 | + ==== START OF PATHWAYS ==== |
| 52 | + {ALL_PATHWAYS} |
| 53 | + ==== END OF PATHWAYS ==== |
| 54 | +
|
| 55 | +4. **Output Format:** |
| 56 | + * Your final response MUST be a single JSON object. Do not include any introductory text, explanations, apologies, or conversational filler outside of this JSON object. |
| 57 | + * The JSON object must have two top-level keys: |
| 58 | + * `'symptoms'`: A list of JSON objects, where each object represents a distinct symptom and contains the keys: |
| 59 | + * `'symptom'` (string: the clinical term for the symptom). |
| 60 | + * `'severity'` (integer: 1-10). |
| 61 | + * `'body_part'` (string: the affected body part, or `null`). |
| 62 | + * `'pathway'` (string: the selected pathway ID from the `ALL_PATHWAYS` list). |
| 63 | +
|
| 64 | +**Example Interaction Flow (Conceptual):** |
| 65 | +
|
| 66 | +*User Input:* 'My head hurts really bad and I feel sick.' |
| 67 | +
|
| 68 | +*AI Clarification (if needed):* 'To help understand your severe headache better, when did it start?' |
| 69 | +
|
| 70 | +*(User responds, e.g., 'It started this morning about an hour ago.')* |
| 71 | +
|
| 72 | +*AI Clarification (if still needed):* 'Is this headache different from any you have experienced before?' |
| 73 | +
|
| 74 | +*(User responds, e.g., 'Yes, this is much worse and came on suddenly.')* |
| 75 | +
|
| 76 | +*(Assuming sufficient information is now gathered to proceed without further questions...)* |
| 77 | +
|
| 78 | +*AI Output (JSON only):* |
| 79 | +```json |
| 80 | +{ |
| 81 | + "symptoms": [ |
| 82 | + {"symptom": "severe headache, sudden onset", "severity": 9, "body_part": "head"}, |
| 83 | + {"symptom": "nausea", "severity": 6, "body_part": null} |
| 84 | + ], |
| 85 | + "pathway": "PWXXXXX" // Example pathway reflecting potentially higher urgency |
| 86 | +} |
| 87 | +``` |
| 88 | +
|
| 89 | +**Constraint Example:** |
| 90 | +
|
| 91 | +*Input:* 'I have a mild cough.' |
| 92 | +*Output (JSON only):* |
| 93 | +```json |
| 94 | +{ |
| 95 | + "symptoms": [ |
| 96 | + {"symptom": "mild cough", "severity": 3, "body_part": "chest"} |
| 97 | + ], |
| 98 | + "pathway": "PWYYYYY" // Example pathway |
| 99 | +} |
| 100 | +``` |
| 101 | +""" |
| 102 | + |
| 103 | + |
| 104 | +@tool |
| 105 | +def ask_user(prompt: str) -> str: |
| 106 | + """ |
| 107 | + Ask the user for input. |
| 108 | + """ |
| 109 | + print(f"Agent: {prompt}") |
| 110 | + return input("User: ") |
| 111 | + |
| 112 | + |
| 113 | +@tool |
| 114 | +def ask_user_voice(prompt: str) -> str: |
| 115 | + """ |
| 116 | + Ask the user for input. |
| 117 | + """ |
| 118 | + print("Asking user for input...") |
| 119 | + for chunk in el_client.text_to_speech.convert_as_stream( |
| 120 | + voice_id="khYwAWwYSjlxlcrwGQ16", |
| 121 | + text=prompt, |
| 122 | + output_format="pcm_22050", |
| 123 | + ): |
| 124 | + out_stream.write(chunk) |
| 125 | + |
| 126 | + with sr.Microphone() as microphone: |
| 127 | + recognizer.adjust_for_ambient_noise(microphone) |
| 128 | + out_stream.write(BEEP_SOUND) |
| 129 | + input_audio = recognizer.listen(microphone, phrase_time_limit=None) |
| 130 | + text = recognizer.recognize_whisper(input_audio, language="en") |
| 131 | + |
| 132 | + print("You said:", text) |
| 133 | + return text |
| 134 | + |
| 135 | + |
| 136 | +def get_agent(voice: bool = False): |
| 137 | + llm = ChatBedrock(model="anthropic.claude-3-haiku-20240307-v1:0") |
| 138 | + agent = create_react_agent( |
| 139 | + model=llm, |
| 140 | + prompt=MAIN_PROMPT, |
| 141 | + tools=[ask_user_voice if voice else ask_user], |
| 142 | + ) |
| 143 | + return agent |
| 144 | + |
| 145 | + |
| 146 | +@app.command() |
| 147 | +def text(): |
| 148 | + """ |
| 149 | + Run the bot in text mode. |
| 150 | + """ |
| 151 | + agent = get_agent() |
| 152 | + |
| 153 | + while (user_in := input("User: ")) != "exit": |
| 154 | + ai_msg = agent.invoke( |
| 155 | + { |
| 156 | + "messages": [ |
| 157 | + { |
| 158 | + "role": "user", |
| 159 | + "content": user_in, |
| 160 | + } |
| 161 | + ] |
| 162 | + } |
| 163 | + ) |
| 164 | + print(ai_msg["messages"][-1].content) |
| 165 | + |
| 166 | + |
| 167 | +@app.command() |
| 168 | +def voice(): |
| 169 | + """ |
| 170 | + Run the bot in text mode. |
| 171 | + """ |
| 172 | + agent = get_agent() |
| 173 | + |
| 174 | + for chunk in el_client.text_to_speech.convert_as_stream( |
| 175 | + voice_id="khYwAWwYSjlxlcrwGQ16", |
| 176 | + text="Hello. Welcome to NHS one one one. How can I help?", |
| 177 | + output_format="pcm_22050", |
| 178 | + ): |
| 179 | + out_stream.write(chunk) |
| 180 | + |
| 181 | + with sr.Microphone() as microphone: |
| 182 | + recognizer = sr.Recognizer() |
| 183 | + recognizer.adjust_for_ambient_noise(microphone) |
| 184 | + out_stream.write(BEEP_SOUND) |
| 185 | + input_sound = recognizer.listen(microphone, phrase_time_limit=None) |
| 186 | + text = recognizer.recognize_whisper(input_sound, language="en") |
| 187 | + |
| 188 | + print("You said:", text) |
| 189 | + response = agent.invoke( |
| 190 | + { |
| 191 | + "messages": [ |
| 192 | + { |
| 193 | + "role": "user", |
| 194 | + "content": text, |
| 195 | + } |
| 196 | + ] |
| 197 | + } |
| 198 | + ) |
| 199 | + |
| 200 | + try: |
| 201 | + response = json.loads(response["messages"][-1].content) |
| 202 | + |
| 203 | + except json.JSONDecodeError: |
| 204 | + for chunk in el_client.text_to_speech.convert_as_stream( |
| 205 | + voice_id="khYwAWwYSjlxlcrwGQ16", |
| 206 | + text="I'm sorry, but something went wrong.", |
| 207 | + output_format="pcm_22050", |
| 208 | + ): |
| 209 | + out_stream.write(chunk) |
| 210 | + |
| 211 | + return |
| 212 | + |
| 213 | + for chunk in el_client.text_to_speech.convert_as_stream( |
| 214 | + voice_id="khYwAWwYSjlxlcrwGQ16", |
| 215 | + text="Thank you for your patience. I will now pass you to a human agent.", |
| 216 | + output_format="pcm_22050", |
| 217 | + ): |
| 218 | + out_stream.write(chunk) |
| 219 | + |
| 220 | + print(response) |
| 221 | + |
| 222 | + |
| 223 | +if __name__ == "__main__": |
| 224 | + app() |
0 commit comments