Skip to content
This repository was archived by the owner on Aug 5, 2025. It is now read-only.

Commit ff7f64a

Browse files
feat: added workflow test
1 parent 1cdace9 commit ff7f64a

File tree

1 file changed

+48
-0
lines changed

1 file changed

+48
-0
lines changed

examples/llamaindex_workflow.py

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
import asyncio
2+
from llama_index.core.workflow import (
3+
Event,
4+
StartEvent,
5+
StopEvent,
6+
Workflow,
7+
step,
8+
)
9+
from llama_index.llms.openai import OpenAI
10+
from literalai.client import LiteralClient
11+
12+
lai_client = LiteralClient()
13+
lai_client.initialize()
14+
15+
16+
class JokeEvent(Event):
17+
joke: str
18+
19+
20+
class JokeFlow(Workflow):
21+
llm = OpenAI()
22+
23+
@step()
24+
async def generate_joke(self, ev: StartEvent) -> JokeEvent:
25+
topic = ev.topic
26+
27+
prompt = f"Write your best joke about {topic}."
28+
response = await self.llm.acomplete(prompt)
29+
return JokeEvent(joke=str(response))
30+
31+
@step()
32+
async def critique_joke(self, ev: JokeEvent) -> StopEvent:
33+
joke = ev.joke
34+
35+
prompt = f"Give a thorough analysis and critique of the following joke: {joke}"
36+
response = await self.llm.acomplete(prompt)
37+
return StopEvent(result=str(response))
38+
39+
40+
@lai_client.thread(name="JokeFlow")
41+
async def main():
42+
w = JokeFlow(timeout=60, verbose=False)
43+
result = await w.run(topic="pirates")
44+
print(str(result))
45+
46+
47+
if __name__ == "__main__":
48+
asyncio.run(main())

0 commit comments

Comments
 (0)