-
-
Notifications
You must be signed in to change notification settings - Fork 1.1k
Expand file tree
/
Copy pathall_providers.py
More file actions
117 lines (96 loc) Β· 3.54 KB
/
all_providers.py
File metadata and controls
117 lines (96 loc) Β· 3.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
"""All compute providers β comprehensive test across Local, Docker, E2B, and Modal.
This example demonstrates local agent loops with various compute backends for tool sandboxing.
Uses the new LocalAgent class to clearly indicate local execution with optional cloud compute.
Requires:
- Docker running locally
- E2B_API_KEY set
- modal CLI configured (modal token set)
"""
import asyncio
from praisonai import Agent
from praisonai.integrations import LocalAgent, LocalAgentConfig
async def test_provider(name, compute, extra_provision_kwargs=None):
"""Test a single compute provider end-to-end."""
print(f"\n{'='*60}")
print(f" PROVIDER: {name}")
print(f"{'='*60}")
managed = LocalAgent(
compute=compute,
config=LocalAgentConfig(
model="gpt-4o-mini",
system="You are a helpful assistant. Be concise.",
name=f"{name}Agent",
),
)
agent = Agent(name=f"{name}-test", backend=managed)
# 1. Provision
print(f"\n [1] Provisioning {name}...")
provision_kwargs = extra_provision_kwargs or {}
info = await managed.provision_compute(**provision_kwargs)
print(f" Instance: {info.instance_id}")
print(f" Status: {info.status}")
# 2. Execute command
print(f"\n [2] Execute in {name}...")
result = await managed.execute_in_compute("python3 -c 'print(42 * 13)'")
stdout = result["stdout"].strip()
assert "546" in stdout, f"Expected 546, got: {stdout}"
print(f" 42 * 13 = {stdout} β")
# 3. Execute echo
result = await managed.execute_in_compute(f"echo 'Hello from {name}'")
print(f" Echo: {result['stdout'].strip()}")
# 4. Agent LLM
print(f"\n [3] Agent LLM via {name}...")
llm_result = agent.start("What is 9 * 8? Just the number.", stream=True)
print(f" LLM result: {llm_result}")
# 5. Multi-turn
print("\n [4] Multi-turn...")
llm_result = agent.start("Add 10 to that. Just the number.", stream=True)
print(f" Follow-up: {llm_result}")
# 6. Usage
session = managed.retrieve_session()
usage = session["usage"]
print(f"\n [5] Usage: in={usage['input_tokens']}, out={usage['output_tokens']}")
# 7. Shutdown
print(f"\n [6] Shutdown {name}...")
await managed.shutdown_compute()
print(" Done β")
return {
"name": name,
"input_tokens": usage["input_tokens"],
"output_tokens": usage["output_tokens"],
}
async def main():
results = []
# Docker
try:
r = await test_provider("Docker", "docker", {"image": "python:3.12-slim"})
results.append(r)
except Exception as e:
print(f"\n Docker FAILED: {e}")
# E2B
try:
r = await test_provider("E2B", "e2b", {"idle_timeout_s": 120})
results.append(r)
except Exception as e:
print(f"\n E2B FAILED: {e}")
# Modal
try:
r = await test_provider("Modal", "modal", {"idle_timeout_s": 120})
results.append(r)
except Exception as e:
print(f"\n Modal FAILED: {e}")
# Summary
print(f"\n\n{'='*60}")
print(" SUMMARY")
print(f"{'='*60}")
total_in = 0
total_out = 0
for r in results:
total_in += r["input_tokens"]
total_out += r["output_tokens"]
print(f" {r['name']:15s} | in: {r['input_tokens']:6d} | out: {r['output_tokens']:6d}")
print(f" {'TOTAL':15s} | in: {total_in:6d} | out: {total_out:6d}")
print(f"{'='*60}")
print(f" {len(results)}/{3} providers passed")
if __name__ == "__main__":
asyncio.run(main())