Skip to content

Commit ecc2988

Browse files
committed
Final push
1 parent 2c0b2ab commit ecc2988

30 files changed

+14128
-1
lines changed

.gitignore

Lines changed: 137 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -179,4 +179,140 @@ cython_debug/
179179
# exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
180180
# refer to https://docs.cursor.com/context/ignore-files
181181
.cursorignore
182-
.cursorindexingignore
182+
.cursorindexingignore
183+
# Logs
184+
logs
185+
*.log
186+
npm-debug.log*
187+
yarn-debug.log*
188+
yarn-error.log*
189+
lerna-debug.log*
190+
.pnpm-debug.log*
191+
192+
# Diagnostic reports (https://nodejs.org/api/report.html)
193+
report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
194+
195+
# Runtime data
196+
pids
197+
*.pid
198+
*.seed
199+
*.pid.lock
200+
201+
# Directory for instrumented libs generated by jscoverage/JSCover
202+
lib-cov
203+
204+
# Coverage directory used by tools like istanbul
205+
coverage
206+
*.lcov
207+
208+
# nyc test coverage
209+
.nyc_output
210+
211+
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
212+
.grunt
213+
214+
# Bower dependency directory (https://bower.io/)
215+
bower_components
216+
217+
# node-waf configuration
218+
.lock-wscript
219+
220+
# Compiled binary addons (https://nodejs.org/api/addons.html)
221+
build/Release
222+
223+
# Dependency directories
224+
node_modules/
225+
jspm_packages/
226+
227+
# Snowpack dependency directory (https://snowpack.dev/)
228+
web_modules/
229+
230+
# TypeScript cache
231+
*.tsbuildinfo
232+
233+
# Optional npm cache directory
234+
.npm
235+
236+
# Optional eslint cache
237+
.eslintcache
238+
239+
# Optional stylelint cache
240+
.stylelintcache
241+
242+
# Microbundle cache
243+
.rpt2_cache/
244+
.rts2_cache_cjs/
245+
.rts2_cache_es/
246+
.rts2_cache_umd/
247+
248+
# Optional REPL history
249+
.node_repl_history
250+
251+
# Output of 'npm pack'
252+
*.tgz
253+
254+
# Yarn Integrity file
255+
.yarn-integrity
256+
257+
# dotenv environment variable files
258+
.env
259+
.env.development.local
260+
.env.test.local
261+
.env.production.local
262+
.env.local
263+
264+
# parcel-bundler cache (https://parceljs.org/)
265+
.cache
266+
.parcel-cache
267+
268+
# Next.js build output
269+
.next
270+
out
271+
272+
# Nuxt.js build / generate output
273+
.nuxt
274+
dist
275+
276+
# Gatsby files
277+
.cache/
278+
# Comment in the public line in if your project uses Gatsby and not Next.js
279+
# https://nextjs.org/blog/next-9-1#public-directory-support
280+
# public
281+
282+
# vuepress build output
283+
.vuepress/dist
284+
285+
# vuepress v2.x temp and cache directory
286+
.temp
287+
.cache
288+
289+
# vitepress build output
290+
**/.vitepress/dist
291+
292+
# vitepress cache directory
293+
**/.vitepress/cache
294+
295+
# Docusaurus cache and generated files
296+
.docusaurus
297+
298+
# Serverless directories
299+
.serverless/
300+
301+
# FuseBox cache
302+
.fusebox/
303+
304+
# DynamoDB Local files
305+
.dynamodb/
306+
307+
# TernJS port file
308+
.tern-port
309+
310+
# Stores VSCode versions used for testing VSCode extensions
311+
.vscode-test
312+
313+
# yarn v2
314+
.yarn/cache
315+
.yarn/unplugged
316+
.yarn/build-state.yml
317+
.yarn/install-state.gz
318+
.pnp.*

README.md

Whitespace-only changes.

api/__init__.py

Whitespace-only changes.

api/app.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
from fastapi import FastAPI, Body
2+
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
3+
4+
from fastapi.middleware.cors import CORSMiddleware
5+
from api.llm import get_agent, parse_response
6+
7+
app = FastAPI()
8+
app.add_middleware(
9+
CORSMiddleware,
10+
allow_origins=["*"],
11+
allow_credentials=True,
12+
allow_methods=["*"],
13+
allow_headers=["*"],
14+
)
15+
16+
global USER_MESSAGES
17+
USER_MESSAGES = []
18+
19+
20+
@app.post("/whats-wrong")
21+
async def whats_wrong(user_input: str = Body()) -> dict:
22+
"""
23+
Process the user's input and return the JSON response.
24+
"""
25+
agent = get_agent()
26+
27+
USER_MESSAGES.append(HumanMessage(content=user_input))
28+
29+
response = agent.invoke({"messages": USER_MESSAGES})
30+
USER_MESSAGES.append(response["messages"][-1])
31+
32+
response = parse_response(response["messages"][-1])
33+
return response
34+
35+
36+
@app.get("/reset")
37+
async def reset():
38+
"""
39+
Root endpoint.
40+
"""
41+
USER_MESSAGES.clear()
42+
43+
44+
@app.get("/outcomes")
45+
async def outcomes() -> list[dict]:
46+
"""
47+
Root endpoint.
48+
"""
49+
messages = []
50+
51+
for message in USER_MESSAGES:
52+
if isinstance(message, HumanMessage):
53+
messages.append({"role": "user", "content": message.content})
54+
elif isinstance(message, AIMessage):
55+
messages.append({"role": "assistant", "content": message.content})
56+
elif isinstance(message, SystemMessage):
57+
messages.append({"role": "system", "content": message.content})
58+
59+
return messages

api/cli.py

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,114 @@
1+
import typer
2+
from api.llm import get_agent
3+
from langchain_core.messages import HumanMessage
4+
from api.llm import parse_response
5+
import rich
6+
from rich.prompt import Prompt
7+
from api.speech import recognizer
8+
9+
import speech_recognition as sr
10+
from api.tts import speak
11+
from api.console import console
12+
13+
cli = typer.Typer()
14+
15+
16+
@cli.command()
17+
def text():
18+
agent = get_agent()
19+
user_input = Prompt.ask("[bold blue]What are your symptoms?[/bold blue]")
20+
21+
messages = []
22+
23+
while user_input != "exit":
24+
messages.append(HumanMessage(content=user_input))
25+
26+
response = agent.invoke({"messages": messages})
27+
messages.append(response["messages"][-1])
28+
29+
response = parse_response(response["messages"][-1])
30+
31+
if response["type"] == "question":
32+
rich.print("[green bold]Assistant[/green bold]:", response["content"])
33+
34+
if response["type"] == "outcome":
35+
print("----- PATHWAY REACHED -----")
36+
rich.print_json(response, indent=2)
37+
print("----- END OF PATHWAY -----")
38+
break
39+
40+
if response["type"] == "emergency":
41+
print("----- EMERGENCY -----")
42+
rich.print_json(response, indent=2)
43+
print("----- END OF EMERGENCY -----")
44+
break
45+
46+
user_input = Prompt.ask("[bold cyan]User[/bold cyan]")
47+
48+
49+
@cli.command()
50+
def voice(intro: bool = True, calibrate: bool = True):
51+
agent = get_agent(enable_voice_tool=False)
52+
messages = []
53+
54+
if calibrate:
55+
with console.status("[bold]Calibrating microphone...[/bold]") as status:
56+
with sr.Microphone() as source:
57+
recognizer.adjust_for_ambient_noise(source)
58+
59+
console.print("[bold green]Microphone calibrated![/bold green]")
60+
61+
if intro:
62+
speak(
63+
"Good afternoon. Welcome to NHS one one one. Can you tell me why you are contacting us today?"
64+
)
65+
66+
with sr.Microphone() as source:
67+
with console.status("[bold cyan]Listening for input[/bold cyan]") as status:
68+
audio = recognizer.listen(source, phrase_time_limit=None)
69+
status.update("[bold green]Recognizing...[/bold green]")
70+
text = recognizer.recognize_whisper(audio, language="en")
71+
72+
rich.print("[bold]You said:[/bold]", text)
73+
messages.append(HumanMessage(content=text))
74+
75+
with console.status("[bold cyan]Running Agent...[/bold cyan]"):
76+
response = agent.invoke({"messages": messages})
77+
messages.append(response["messages"][-1])
78+
79+
parsed_response = parse_response(response["messages"][-1])
80+
81+
while parsed_response.get("type") == "question":
82+
speak(parsed_response["content"])
83+
84+
with console.status("[bold cyan]Listening for input[/bold cyan]") as status:
85+
with sr.Microphone() as source:
86+
audio = recognizer.listen(source, phrase_time_limit=None)
87+
status.update("[bold green]Recognizing...[/bold green]")
88+
text = recognizer.recognize_whisper(audio, language="en")
89+
90+
rich.print("[bold]You said:[/bold]", text)
91+
messages.append(HumanMessage(content=text))
92+
93+
with console.status("[bold cyan]Running Agent...[/bold cyan]"):
94+
response = agent.invoke({"messages": messages})
95+
messages.append(response["messages"][-1])
96+
97+
parsed_response = parse_response(response["messages"][-1])
98+
99+
rich.print_json(data=parsed_response, indent=2)
100+
if parsed_response["type"] == "outcome":
101+
rich.print("[bold blue]EXIT - PATHWAY REACHED[/bold blue]")
102+
speak(
103+
"Thank you. Your pathway has been selected and you will shortly be transferred to an agent."
104+
)
105+
106+
if parsed_response["type"] == "emergency":
107+
rich.print("[bold red]EXIT - EMERGENCY[/bold red]")
108+
speak(
109+
"Your symptoms indicate a medical emergency. Please call nine nine nine or go to your nearest A&E."
110+
)
111+
112+
113+
if __name__ == "__main__":
114+
cli()

api/console.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from rich.console import Console
2+
3+
console = Console()

api/llm.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
from langgraph.prebuilt import create_react_agent
2+
from langchain_aws import ChatBedrock
3+
import json
4+
from api.prompt import MAIN_PROMPT
5+
from langchain_core.messages import AIMessage
6+
from langchain_core.tools import tool
7+
from api.tts import speak
8+
import speech_recognition as sr
9+
from api.speech import ask_user
10+
11+
12+
def get_agent(enable_voice_tool: bool = False):
13+
llm = ChatBedrock(
14+
model="anthropic.claude-3-haiku-20240307-v1:0",
15+
temperature=0.5,
16+
max_tokens=48000,
17+
)
18+
19+
print("Prompt Tokens", llm.get_num_tokens(MAIN_PROMPT))
20+
21+
tools = []
22+
if enable_voice_tool:
23+
tools.append(ask_user)
24+
25+
agent = create_react_agent(
26+
model=llm,
27+
prompt=MAIN_PROMPT,
28+
tools=tools,
29+
)
30+
31+
return agent
32+
33+
34+
def parse_response(response: AIMessage):
35+
content = getattr(response, "content")
36+
# Strip everything before the first '{' and after the last '}'
37+
content = content[content.find("{") : content.rfind("}") + 1]
38+
39+
# Parse the JSON content
40+
response = json.loads(content)
41+
return response

0 commit comments

Comments
 (0)