Skip to content

Commit c49ad94

Browse files
committed
Release v2.5.0
1 parent 998c89b commit c49ad94

33 files changed

+2593
-61
lines changed

.gitignore

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,4 +80,6 @@ CopilotKit*
8080
issue_details.json
8181

8282
# MCP Registry tokens
83-
.mcpregistry_*
83+
.mcpregistry_*
84+
85+
*.log

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -261,6 +261,10 @@ npm install praisonai
261261
| ↳ 100+ LLM Support | [Example](examples/python/providers/openai/openai_gpt4_example.py) | [📖](https://docs.praison.ai/models) |
262262
| ↳ Callback Agents | [Example](examples/python/general/advanced-callback-systems.py) | [📖](https://docs.praison.ai/features/callbacks) |
263263
| ↳ Hooks | [Example](#9-hooks) | [📖](https://docs.praison.ai/features/hooks) |
264+
| ↳ Middleware System | [Example](examples/middleware/basic_middleware.py) | [📖](https://docs.praison.ai/features/middleware) |
265+
| ↳ Configurable Model | [Example](examples/middleware/configurable_model.py) | [📖](https://docs.praison.ai/features/configurable-model) |
266+
| ↳ Rate Limiter | [Example](examples/middleware/rate_limiter.py) | [📖](https://docs.praison.ai/features/rate-limiter) |
267+
| ↳ Injected Tool State | [Example](examples/middleware/injected_state.py) | [📖](https://docs.praison.ai/features/injected-state) |
264268
| ↳ Shadow Git Checkpoints | [Example](#10-shadow-git-checkpoints) | [📖](https://docs.praison.ai/features/checkpoints) |
265269
| ↳ Background Tasks | [Example](examples/background/basic_background.py) | [📖](https://docs.praison.ai/features/background-tasks) |
266270
| ↳ Policy Engine | [Example](examples/policy/basic_policy.py) | [📖](https://docs.praison.ai/features/policy-engine) |

docker/Dockerfile.chat

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.4.4" \
19+
"praisonai>=2.5.0" \
2020
"praisonai[chat]" \
2121
"embedchain[github,youtube]"
2222

docker/Dockerfile.dev

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ RUN mkdir -p /root/.praison
2020
# Install Python packages (using latest versions)
2121
RUN pip install --no-cache-dir \
2222
praisonai_tools \
23-
"praisonai>=2.4.4" \
23+
"praisonai>=2.5.0" \
2424
"praisonai[ui]" \
2525
"praisonai[chat]" \
2626
"praisonai[realtime]" \

docker/Dockerfile.ui

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ RUN mkdir -p /root/.praison
1616
# Install Python packages (using latest versions)
1717
RUN pip install --no-cache-dir \
1818
praisonai_tools \
19-
"praisonai>=2.4.4" \
19+
"praisonai>=2.5.0" \
2020
"praisonai[ui]" \
2121
"praisonai[crewai]"
2222

Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
"""
2+
Basic Middleware Example - PraisonAI Agents
3+
4+
Demonstrates before/after hooks and wrap decorators for model and tool calls.
5+
"""
6+
7+
from praisonaiagents import Agent, tool
8+
from praisonaiagents.hooks import (
9+
before_model, after_model, wrap_model_call,
10+
before_tool, after_tool, wrap_tool_call,
11+
InvocationContext, ModelRequest, ModelResponse
12+
)
13+
14+
# Simple tool for demonstration
15+
@tool
16+
def get_weather(city: str) -> str:
17+
"""Get weather for a city."""
18+
return f"Sunny, 22°C in {city}"
19+
20+
# Before model hook - adds context
21+
@before_model
22+
def add_context(request: ModelRequest) -> ModelRequest:
23+
print(f"[before_model] Adding context to request")
24+
return request
25+
26+
# After model hook - logs response
27+
@after_model
28+
def log_response(response: ModelResponse) -> ModelResponse:
29+
print(f"[after_model] Response received")
30+
return response
31+
32+
# Wrap tool call - retry on error
33+
@wrap_tool_call
34+
def retry_on_error(tool_call, call_next):
35+
print(f"[wrap_tool_call] Executing tool")
36+
try:
37+
return call_next(tool_call)
38+
except Exception as e:
39+
print(f"[wrap_tool_call] Retrying after error: {e}")
40+
return call_next(tool_call)
41+
42+
# Create agent with hooks
43+
agent = Agent(
44+
name="WeatherBot",
45+
instructions="You help with weather queries.",
46+
tools=[get_weather],
47+
hooks=[add_context, log_response, retry_on_error]
48+
)
49+
50+
if __name__ == "__main__":
51+
# Test the tool directly
52+
result = get_weather("London")
53+
print(f"Weather: {result}")
54+
55+
print("\n✓ Middleware example complete")
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
"""
2+
Configurable Model Example - PraisonAI Agents
3+
4+
Demonstrates runtime model switching without recreating the agent.
5+
"""
6+
7+
from praisonaiagents import Agent
8+
9+
# Create agent with configurable model enabled
10+
agent = Agent(
11+
name="FlexBot",
12+
instructions="You are a helpful assistant.",
13+
llm="gpt-4o-mini",
14+
llm_config={"configurable": True}
15+
)
16+
17+
if __name__ == "__main__":
18+
# Default model call
19+
print("Using default model (gpt-4o-mini)...")
20+
# response = agent.chat("Say hello in 5 words")
21+
22+
# Override model per-call
23+
print("\nOverriding to different model...")
24+
# response = agent.chat("Say hello in 5 words", config={"model": "gpt-4o"})
25+
26+
# Override temperature
27+
print("\nOverriding temperature...")
28+
# response = agent.chat("Say hello creatively", config={"temperature": 0.9})
29+
30+
print("\n✓ Configurable model example complete")
31+
print("Note: Uncomment agent.chat() calls to run with API key")
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
"""
2+
Injected State Example - PraisonAI Agents
3+
4+
Demonstrates injecting agent state into tools without exposing it in the schema.
5+
"""
6+
7+
from praisonaiagents import Agent, tool
8+
from praisonaiagents.tools import Injected
9+
from praisonaiagents.tools.injected import AgentState, with_injection_context
10+
11+
# Tool with injected state - state param is NOT in the public schema
12+
@tool
13+
def show_context(query: str, state: Injected[dict]) -> str:
14+
"""Show the current agent context."""
15+
session_id = state.get('session_id', 'unknown')
16+
agent_id = state.get('agent_id', 'unknown')
17+
return f"Query: {query}, Session: {session_id}, Agent: {agent_id}"
18+
19+
# Create agent with the tool
20+
agent = Agent(
21+
name="ContextBot",
22+
instructions="You help show context information.",
23+
tools=[show_context],
24+
session_id="my-session-123"
25+
)
26+
27+
if __name__ == "__main__":
28+
# Verify injected param is not in schema
29+
schema = show_context.get_schema()
30+
params = schema['function']['parameters']['properties']
31+
print(f"Schema params: {list(params.keys())}")
32+
assert 'state' not in params, "state should NOT be in schema"
33+
print("✓ 'state' correctly excluded from schema")
34+
35+
# Test with manual injection context
36+
mock_state = AgentState(
37+
agent_id="test-agent",
38+
run_id="run-1",
39+
session_id="session-abc"
40+
)
41+
42+
with with_injection_context(mock_state):
43+
result = show_context(query="hello")
44+
print(f"Result: {result}")
45+
46+
# Test via agent.execute_tool
47+
result = agent.execute_tool("show_context", {"query": "test"})
48+
print(f"Agent result: {result}")
49+
50+
print("\n✓ Injected state example complete")
Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
"""
2+
Rate Limiter Example - PraisonAI Agents
3+
4+
Demonstrates token bucket rate limiting for LLM API calls.
5+
"""
6+
7+
from praisonaiagents import Agent
8+
from praisonaiagents.llm import RateLimiter
9+
10+
# Create rate limiter: 60 requests per minute
11+
limiter = RateLimiter(requests_per_minute=60, burst=5)
12+
13+
# Create agent with rate limiter
14+
agent = Agent(
15+
name="RateLimitedBot",
16+
instructions="You are a helpful assistant.",
17+
rate_limiter=limiter
18+
)
19+
20+
if __name__ == "__main__":
21+
print(f"Rate limiter: {limiter}")
22+
print(f"Available tokens: {limiter.available_tokens}")
23+
24+
# Demonstrate rate limiting
25+
for i in range(3):
26+
limiter.acquire()
27+
print(f"Request {i+1} acquired, tokens left: {limiter.available_tokens:.1f}")
28+
29+
print("\n✓ Rate limiter example complete")

examples/persistence/simple_db_agent.py

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,23 +26,18 @@
2626
Messages persisted: 2
2727
"""
2828

29-
from praisonaiagents import Agent
30-
from praisonai.db import PostgresDB
31-
32-
# Create database adapter
33-
db = PostgresDB(
34-
host="localhost",
35-
port=5432,
36-
database="praisonai",
37-
user="postgres",
38-
password="praison123"
29+
from praisonaiagents import Agent, db
30+
31+
# Create database adapter (simplified import)
32+
db_instance = db(
33+
database_url="postgresql://postgres:praison123@localhost:5432/praisonai"
3934
)
4035

4136
# Create agent with persistence enabled
4237
agent = Agent(
4338
name="Assistant",
4439
instructions="You are a helpful assistant. Keep responses brief.",
45-
db=db,
40+
db=db_instance,
4641
session_id="demo-session-001", # Same session_id = resume conversation
4742
verbose=False
4843
)
@@ -57,7 +52,7 @@
5752
print(f"Messages in history: {len(agent.chat_history)}")
5853

5954
# Close DB connection
60-
db.close()
55+
db_instance.close()
6156

6257
print("\n=== Session persisted! ===")
6358
print("Run again with same session_id to resume.")

0 commit comments

Comments
 (0)