forked from livekit/agents
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtext_only.py
More file actions
51 lines (40 loc) · 1.6 KB
/
text_only.py
File metadata and controls
51 lines (40 loc) · 1.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import logging
from dotenv import load_dotenv
from livekit.agents import (
Agent,
AgentSession,
JobContext,
RoomInputOptions,
RoomOutputOptions,
WorkerOptions,
cli,
)
from livekit.plugins import openai
logger = logging.getLogger("text-only")
logger.setLevel(logging.INFO)
load_dotenv()
## This example demonstrates a text-only agent.
## When using with LiveKit SDKs, this agent is automatically wired up to text input and output:
## - Send text input using TextStream to topic `lk.chat` (https://docs.livekit.io/home/client/data/text-streams)
## - The agent output is sent through TextStream to the `lk.transcription` topic
## You can also transport text via other means and directly send them to the agent
## - Send text input via: `generate_reply(user_input="user's input text")`
## - Receive agent's response via `session.on("conversation_item_added", ev)`. docs: https://docs.livekit.io/agents/build/events/#conversation_item_added
class MyAgent(Agent):
def __init__(self) -> None:
super().__init__(
instructions="You are a helpful assistant.",
)
async def entrypoint(ctx: JobContext):
session = AgentSession(
llm=openai.LLM(model="gpt-4o-mini"),
# note that no TTS or STT are needed here
)
await session.start(
agent=MyAgent(),
room=ctx.room,
room_input_options=RoomInputOptions(text_enabled=True, audio_enabled=False),
room_output_options=RoomOutputOptions(transcription_enabled=True, audio_enabled=False),
)
if __name__ == "__main__":
cli.run_app(WorkerOptions(entrypoint_fnc=entrypoint))