Agent OS provides one-line integrations with popular AI agent frameworks.
Don't rewrite your code. Just wrap it.
Every integration follows the same pattern:
- Create a kernel with your policy
- Wrap your existing agent
- Continue using your agent as normal
All operations now go through Agent OS governance.
| Framework | Adapter | Status |
|---|---|---|
| LangChain | LangChainKernel |
✅ Stable |
| LlamaIndex | LlamaIndexKernel |
✅ Stable |
| CrewAI | CrewAIKernel |
✅ Stable |
| AutoGen | AutoGenKernel |
✅ Stable |
| OpenAI Assistants | OpenAIKernel |
✅ Stable |
| OpenAI Agents SDK | OpenAIAgentsKernel |
✅ Stable |
| Semantic Kernel | SemanticKernelWrapper |
✅ Stable |
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_openai_functions_agent
from agent_os.integrations import LangChainKernel, GovernancePolicy
# Create your LangChain agent
llm = ChatOpenAI(model="gpt-4")
agent = create_openai_functions_agent(llm, tools, prompt)
# Wrap with Agent OS
kernel = LangChainKernel(policy=GovernancePolicy(
max_tool_calls=10,
blocked_patterns=["password", "secret"]
))
governed_agent = kernel.wrap(agent)
# Use as normal - now governed!
result = governed_agent.invoke({"input": "Analyze this data"})Supported methods:
invoke()/ainvoke()- Single executionrun()/arun()- Agent executionbatch()/abatch()- Batch executionstream()/astream()- Streaming
from llama_index.core import VectorStoreIndex
from agent_os.integrations import LlamaIndexKernel, GovernancePolicy
# Create your LlamaIndex query engine
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
# Wrap with Agent OS
kernel = LlamaIndexKernel(policy=GovernancePolicy(
max_tool_calls=20,
blocked_patterns=["password", "secret"]
))
governed_engine = kernel.wrap(query_engine)
# Use as normal - now governed!
result = governed_engine.query("What are the key findings?")Supported methods:
query()/aquery()- Query executionchat()/achat()- Chat enginestream_chat()- Streaming chatretrieve()- Retriever
Signal handling:
kernel.signal("llamaindex-engine-id", "SIGSTOP") # Pause
kernel.signal("llamaindex-engine-id", "SIGCONT") # Resume
kernel.signal("llamaindex-engine-id", "SIGKILL") # Terminatefrom openai import OpenAI
from agent_os.integrations import OpenAIKernel, GovernancePolicy
client = OpenAI()
# Create your assistant
assistant = client.beta.assistants.create(
name="Trading Bot",
instructions="You analyze market data",
model="gpt-4-turbo",
tools=[{"type": "code_interpreter"}]
)
# Wrap with Agent OS
kernel = OpenAIKernel(policy=GovernancePolicy(
max_tokens=10000,
allowed_tools=["code_interpreter"], # Only allow code interpreter
max_tool_calls=5
))
governed = kernel.wrap_assistant(assistant, client)
# Create thread and run - now governed!
thread = governed.create_thread()
governed.add_message(thread.id, "Analyze AAPL stock")
run = governed.run(thread.id)Features:
- Token limit enforcement
- Tool call validation
- Real-time run monitoring
- SIGKILL support (cancel run)
- Full audit trail
Signal handling:
# Cancel a run (SIGKILL)
governed.sigkill(thread.id, run.id)from semantic_kernel import Kernel
from agent_os.integrations import SemanticKernelWrapper, GovernancePolicy
# Create your Semantic Kernel
sk = Kernel()
sk.add_plugin(MyPlugin(), "my_plugin")
# Wrap with Agent OS
wrapper = SemanticKernelWrapper(policy=GovernancePolicy(
allowed_tools=["my_plugin.*"], # Allow all functions in plugin
blocked_patterns=["password"]
))
governed = wrapper.wrap(sk)
# Use as normal - now governed!
result = await governed.invoke("my_plugin", "analyze", input="data")Supported operations:
invoke()- Function invocationadd_plugin()- Plugin managementmemory_save()/memory_search()- Memory operationsinvoke_prompt()- Direct chat completioncreate_plan()- Planner with step validation
Signal handling:
# Pause execution
governed.sigstop()
# Resume execution
governed.sigcont()
# Terminate
governed.sigkill()from crewai import Crew, Agent, Task
from agent_os.integrations import CrewAIKernel, GovernancePolicy
# Create your crew
agent = Agent(role="Analyst", goal="Analyze data")
task = Task(description="Analyze market trends", agent=agent)
crew = Crew(agents=[agent], tasks=[task])
# Wrap with Agent OS
kernel = CrewAIKernel(policy=GovernancePolicy(
timeout_seconds=600,
max_tool_calls=50
))
governed_crew = kernel.wrap(crew)
# Kickoff - now governed!
result = governed_crew.kickoff()from autogen import AssistantAgent, UserProxyAgent
from agent_os.integrations import AutoGenKernel, GovernancePolicy
# Create your agents
assistant = AssistantAgent("assistant", llm_config={"model": "gpt-4"})
user_proxy = UserProxyAgent("user_proxy", human_input_mode="NEVER")
# Wrap with Agent OS
kernel = AutoGenKernel(policy=GovernancePolicy(
max_tokens=50000,
confidence_threshold=0.9
))
kernel.govern(assistant, user_proxy)
# Chat - now governed!
user_proxy.initiate_chat(assistant, message="Solve this problem")Signal handling:
kernel.signal("assistant", "SIGSTOP") # Pause agent
kernel.signal("assistant", "SIGCONT") # Resume agent
kernel.signal("assistant", "SIGKILL") # Unwrap agent
# Restore original ungoverned agent
kernel.unwrap(assistant)from agents import Agent, Runner
from agent_os.integrations.openai_agents_sdk import OpenAIAgentsKernel
# Create your OpenAI Agent
agent = Agent(name="analyst", instructions="You analyze data")
# Wrap with Agent OS governance
kernel = OpenAIAgentsKernel(policy={
"blocked_patterns": ["password", "secret"],
"allowed_tools": ["file_search", "code_interpreter"],
"max_tool_calls": 10,
})
# Add tool guards
@kernel.tool_guard
async def safe_query(sql: str):
return db.execute(sql)
# Use GovernedRunner for automatic governance
governed = kernel.wrap_runner(Runner)
result = await governed.run(agent, "Analyze Q4 revenue")
# Access audit log
for entry in kernel.get_audit_log():
print(f"{entry['event']}: {entry['timestamp']}")All integrations use the same GovernancePolicy class:
from agent_os.integrations import GovernancePolicy
policy = GovernancePolicy(
# Limits
max_tokens=10000, # Token limit
max_tool_calls=20, # Tool call limit
timeout_seconds=300, # Timeout
# Permissions
allowed_tools=["safe_tool"], # Whitelist tools
blocked_patterns=["secret"], # Block content
require_human_approval=False, # Human-in-loop
# Thresholds
confidence_threshold=0.8, # Min confidence
drift_threshold=0.15, # CMVK drift
# Audit
log_all_calls=True, # Full logging
checkpoint_frequency=5 # Checkpoint every N calls
)# Strict policy (production)
strict = GovernancePolicy(
max_tokens=5000,
max_tool_calls=5,
blocked_patterns=["password", "secret", "api_key", "token"],
confidence_threshold=0.95
)
# Permissive policy (development)
permissive = GovernancePolicy(
max_tokens=100000,
max_tool_calls=100,
confidence_threshold=0.5
)from agent_os.integrations.langchain_adapter import PolicyViolationError
try:
result = governed_agent.invoke(input_data)
except PolicyViolationError as e:
print(f"Policy blocked: {e}")
# Handle violation (log, alert, fallback)# Get execution context
ctx = governed.get_context()
print(f"Tool calls: {ctx.call_count}")
print(f"Checkpoints: {ctx.checkpoints}")
# For OpenAI Assistants
usage = governed.get_token_usage()
print(f"Tokens used: {usage['total_tokens']}")
# For Semantic Kernel
audit = governed.get_audit_log()
print(f"Functions invoked: {audit['functions_invoked']}")| Operation | Pre-Check | Post-Check | Signals |
|---|---|---|---|
| LLM call | ✅ | ✅ | ✅ |
| Tool use | ✅ | ✅ | ✅ |
| Memory access | ✅ | - | ✅ |
| File operations | ✅ | - | ✅ |
| Network calls | ✅ | ✅ | ✅ |
Pre-Check:
- Blocked patterns in input
- Tool allowlist
- Call count limit
- Timeout check
Post-Check:
- Output validation
- Checkpoint creation
- Audit logging
Signals:
- SIGSTOP - Pause execution
- SIGCONT - Resume execution
- SIGKILL - Terminate immediately