1919from pipecat .pipeline .task import PipelineParams , PipelineTask
2020from pipecat .processors .aggregators .llm_context import LLMContext
2121from pipecat .processors .aggregators .llm_response_universal import LLMContextAggregatorPair
22+ from pipecat .processors .frameworks .rtvi import RTVIConfig , RTVIObserver , RTVIProcessor
2223from pipecat .runner .types import RunnerArguments
2324from pipecat .runner .utils import create_transport
2425from pipecat .services .deepgram .stt import DeepgramSTTService
@@ -78,9 +79,12 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
7879 context = LLMContext (messages )
7980 context_aggregator = LLMContextAggregatorPair (context )
8081
82+ rtvi = RTVIProcessor (config = RTVIConfig (config = []))
83+
8184 pipeline = Pipeline (
8285 [
8386 transport .input (), # Transport user input
87+ rtvi ,
8488 stt ,
8589 context_aggregator .user (), # User responses
8690 llm , # LLM
@@ -98,8 +102,13 @@ async def run_bot(transport: BaseTransport, runner_args: RunnerArguments):
98102 audio_out_sample_rate = HUME_SAMPLE_RATE ,
99103 ),
100104 idle_timeout_secs = runner_args .pipeline_idle_timeout_secs ,
105+ observers = [RTVIObserver (rtvi )],
101106 )
102107
108+ @rtvi .event_handler ("on_client_ready" )
109+ async def on_client_ready (rtvi ):
110+ await rtvi .set_bot_ready ()
111+
103112 @transport .event_handler ("on_client_connected" )
104113 async def on_client_connected (transport , client ):
105114 logger .info (f"Client connected" )
0 commit comments