1+ import asyncio
12import logging
2- import random
33from typing import Annotated
44
55import aiohttp
1111 WorkerOptions ,
1212 cli ,
1313 llm ,
14+ metrics ,
1415)
15- from livekit .agents .pipeline import AgentCallContext , VoicePipelineAgent
16+ from livekit .agents .pipeline import VoicePipelineAgent
1617from livekit .plugins import deepgram , openai , silero
1718
1819load_dotenv ()
@@ -50,25 +51,21 @@ async def get_weather(
5051 # that it might take awhile:
5152 # Option 1: you can use .say filler message immediately after the call is triggered
5253 # Option 2: you can prompt the agent to return a text response when it's making a function call
53- agent = AgentCallContext .get_current ().agent
54-
55- if (
56- not agent .chat_ctx .messages
57- or agent .chat_ctx .messages [- 1 ].role != "assistant"
58- ):
59- # skip if assistant already said something
60- filler_messages = [
61- "Let me check the weather in {location} for you." ,
62- "Let me see what the weather is like in {location} right now." ,
63- # LLM will complete this sentence if it is added to the end of the chat context
64- "The current weather in {location} is " ,
65- ]
66- message = random .choice (filler_messages ).format (location = location )
67- logger .info (f"saying filler message: { message } " )
68-
69- # NOTE: set add_to_chat_ctx=True will add the message to the end
70- # of the chat context of the function call for answer synthesis
71- speech_handle = await agent .say (message , add_to_chat_ctx = True ) # noqa: F841
54+
55+ # uncomment for option 1
56+ # agent = AgentCallContext.get_current().agent
57+ # filler_messages = [
58+ # "Let me check the weather in {location} for you.",
59+ # "Let me see what the weather is like in {location} right now.",
60+ # # LLM will complete this sentence if it is added to the end of the chat context
61+ # "The current weather in {location} is ",
62+ # ]
63+ # message = random.choice(filler_messages).format(location=location)
64+ # logger.info(f"saying filler message: {message}")
65+
66+ # NOTE: set add_to_chat_ctx=True will add the message to the end
67+ # of the chat context of the function call for answer synthesis
68+ # speech_handle = await agent.say(message, add_to_chat_ctx=True) # noqa: F841
7269
7370 logger .info (f"getting weather for { latitude } , { longitude } " )
7471 url = f"https://api.open-meteo.com/v1/forecast?latitude={ latitude } &longitude={ longitude } ¤t=temperature_2m"
@@ -82,13 +79,17 @@ async def get_weather(
8279 "temperature" : data ["current" ]["temperature_2m" ],
8380 "temperature_unit" : "Celsius" ,
8481 }
85- logger .info (f"weather data: { weather_data } " )
8682 else :
8783 raise Exception (
8884 f"Failed to get weather data, status code: { response .status } "
8985 )
9086
87+ # artificially delay the function call for testing
88+ await asyncio .sleep (2 )
89+ logger .info (f"weather data: { weather_data } " )
90+
9191 # (optional) To wait for the speech to finish before giving results of the function call
92+ # without waiting, the new speech result will be queued and played after current speech is finished
9293 # await speech_handle.join()
9394 return weather_data
9495
@@ -106,26 +107,37 @@ async def entrypoint(ctx: JobContext):
106107 "You are a weather assistant created by LiveKit. Your interface with users will be voice. "
107108 "You will provide weather information for a given location. "
108109 # when using option 1, you can suppress from the agent with prompt
109- "do not return any text while calling the function."
110- # uncomment this to use option 2
111- # "when performing function calls, let user know that you are checking the weather."
110+ # "do not return any text while calling the function."
111+ # option 2 - using LLM to generate text for the function call
112+ "when performing function calls, let user know that you are checking the weather."
112113 ),
113114 role = "system" ,
114115 )
115116 participant = await ctx .wait_for_participant ()
116117 agent = VoicePipelineAgent (
117118 vad = ctx .proc .userdata ["vad" ],
118119 stt = deepgram .STT (),
119- llm = openai .LLM (model = "gpt-4o-mini " ),
120+ llm = openai .LLM (model = "gpt-4o" ),
120121 tts = openai .TTS (),
121122 fnc_ctx = fnc_ctx ,
122123 chat_ctx = initial_chat_ctx ,
123124 )
124125
126+ usage_collector = metrics .UsageCollector ()
127+
128+ @agent .on ("metrics_collected" )
129+ def _on_metrics_collected (mtrcs : metrics .AgentMetrics ):
130+ metrics .log_metrics (mtrcs )
131+ usage_collector .collect (mtrcs )
132+
133+ async def log_usage ():
134+ summary = usage_collector .get_summary ()
135+ logger .info (f"Usage: ${ summary } " )
136+
125137 # Start the assistant. This will automatically publish a microphone track and listen to the participant.
126138 agent .start (ctx .room , participant )
127139 await agent .say (
128- "Hello from the weather station. Would you like to know the weather? If so, tell me your location ."
140+ "Hello from the weather station. Tell me your location to check the weather."
129141 )
130142
131143
0 commit comments