diff --git a/python_a2a/__init__.py b/python_a2a/__init__.py
index 4924650..3e41a74 100644
--- a/python_a2a/__init__.py
+++ b/python_a2a/__init__.py
@@ -21,7 +21,7 @@
A2AValidationError,
A2AAuthenticationError,
A2AConfigurationError,
- A2AStreamingError
+ A2AStreamingError,
)
# All core models - these should be available with basic install
@@ -35,7 +35,7 @@
FunctionCallContent,
FunctionResponseContent,
ErrorContent,
- Metadata
+ Metadata,
)
from .models.agent import AgentCard, AgentSkill
from .models.task import Task, TaskStatus, TaskState
@@ -58,7 +58,7 @@
run_registry,
DiscoveryClient,
enable_discovery,
- RegistryAgent
+ RegistryAgent,
)
# Utility functions
@@ -66,13 +66,13 @@
format_message_as_text,
format_conversation_as_text,
pretty_print_message,
- pretty_print_conversation
+ pretty_print_conversation,
)
from .utils.validation import (
validate_message,
validate_conversation,
is_valid_message,
- is_valid_conversation
+ is_valid_conversation,
)
from .utils.conversion import (
create_text_message,
@@ -80,7 +80,7 @@
create_function_response,
create_error_message,
format_function_params,
- conversation_to_messages
+ conversation_to_messages,
)
from .utils.decorators import skill, agent
@@ -96,7 +96,7 @@
ConditionStep,
ParallelStep,
ParallelBuilder,
- StepType
+ StepType,
)
# MCP integration
@@ -106,7 +106,7 @@
MCPConnectionError,
MCPTimeoutError,
MCPToolError,
- MCPTools
+ MCPTools,
)
from .mcp.agent import MCPEnabledAgent
from .mcp.fastmcp import (
@@ -116,12 +116,9 @@
error_response,
image_response,
multi_content_response,
- ContentType as MCPContentType
-)
-from .mcp.integration import (
- FastMCPAgent,
- A2AMCPAgent
+ ContentType as MCPContentType,
)
+from .mcp.integration import FastMCPAgent, A2AMCPAgent
from .mcp.proxy import create_proxy_server
from .mcp.transport import create_fastapi_app
@@ -130,7 +127,7 @@
to_a2a_server,
to_langchain_agent,
to_mcp_server,
- to_langchain_tool
+ to_langchain_tool,
)
from .langchain.exceptions import (
LangChainIntegrationError,
@@ -138,15 +135,25 @@
LangChainToolConversionError,
MCPToolConversionError,
LangChainAgentConversionError,
- A2AAgentConversionError
+ A2AAgentConversionError,
+)
+
+HAS_LANGCHAIN = (
+ importlib.util.find_spec("langchain") is not None
+ or importlib.util.find_spec("langchain_core") is not None
)
-HAS_LANGCHAIN = importlib.util.find_spec("langchain") is not None or importlib.util.find_spec("langchain_core") is not None
# Optional integration with LLM providers
# These might not be available if the specific provider packages are not installed
try:
- from .client.llm import OpenAIA2AClient, AnthropicA2AClient
- from .server.llm import OpenAIA2AServer, AnthropicA2AServer, BedrockA2AServer
+ from .client.llm import OpenAIA2AClient, OllamaA2AClient, AnthropicA2AClient
+ from .server.llm import (
+ OpenAIA2AServer,
+ OllamaA2AServer,
+ AnthropicA2AServer,
+ BedrockA2AServer,
+ )
+
HAS_LLM_CLIENTS = True
HAS_LLM_SERVERS = True
except ImportError:
@@ -156,6 +163,7 @@
# Optional doc generation
try:
from .docs import generate_a2a_docs, generate_html_docs
+
HAS_DOCS = True
except ImportError:
HAS_DOCS = False
@@ -163,6 +171,7 @@
# Optional CLI
try:
from .cli import main as cli_main
+
HAS_CLI = True
except ImportError:
HAS_CLI = False
@@ -170,6 +179,7 @@
# Agent Flow import - optional but integrated by default
try:
from .agent_flow import models, engine, server, storage
+
HAS_AGENT_FLOW_IMPORT = True
except ImportError:
HAS_AGENT_FLOW_IMPORT = False
@@ -193,130 +203,123 @@
# Define __all__ for explicit exports
__all__ = [
# Version
- '__version__',
-
+ "__version__",
# Exceptions
- 'A2AError',
- 'A2AImportError',
- 'A2AConnectionError',
- 'A2AResponseError',
- 'A2ARequestError',
- 'A2AValidationError',
- 'A2AAuthenticationError',
- 'A2AConfigurationError',
- 'A2AStreamingError',
-
+ "A2AError",
+ "A2AImportError",
+ "A2AConnectionError",
+ "A2AResponseError",
+ "A2ARequestError",
+ "A2AValidationError",
+ "A2AAuthenticationError",
+ "A2AConfigurationError",
+ "A2AStreamingError",
# Models
- 'BaseModel',
- 'Message',
- 'MessageRole',
- 'Conversation',
- 'ContentType',
- 'TextContent',
- 'FunctionParameter',
- 'FunctionCallContent',
- 'FunctionResponseContent',
- 'ErrorContent',
- 'Metadata',
- 'AgentCard',
- 'AgentSkill',
- 'Task',
- 'TaskStatus',
- 'TaskState',
-
+ "BaseModel",
+ "Message",
+ "MessageRole",
+ "Conversation",
+ "ContentType",
+ "TextContent",
+ "FunctionParameter",
+ "FunctionCallContent",
+ "FunctionResponseContent",
+ "ErrorContent",
+ "Metadata",
+ "AgentCard",
+ "AgentSkill",
+ "Task",
+ "TaskStatus",
+ "TaskState",
# Client
- 'BaseA2AClient',
- 'A2AClient',
- 'AgentNetwork',
- 'AIAgentRouter',
- 'StreamingClient',
-
+ "BaseA2AClient",
+ "A2AClient",
+ "AgentNetwork",
+ "AIAgentRouter",
+ "StreamingClient",
# Server
- 'BaseA2AServer',
- 'A2AServer',
- 'run_server',
-
+ "BaseA2AServer",
+ "A2AServer",
+ "run_server",
# Discovery
- 'AgentRegistry',
- 'run_registry',
- 'DiscoveryClient',
- 'enable_discovery',
- 'RegistryAgent',
-
+ "AgentRegistry",
+ "run_registry",
+ "DiscoveryClient",
+ "enable_discovery",
+ "RegistryAgent",
# Utilities
- 'format_message_as_text',
- 'format_conversation_as_text',
- 'pretty_print_message',
- 'pretty_print_conversation',
- 'validate_message',
- 'validate_conversation',
- 'is_valid_message',
- 'is_valid_conversation',
- 'create_text_message',
- 'create_function_call',
- 'create_function_response',
- 'create_error_message',
- 'format_function_params',
- 'conversation_to_messages',
- 'skill',
- 'agent',
-
+ "format_message_as_text",
+ "format_conversation_as_text",
+ "pretty_print_message",
+ "pretty_print_conversation",
+ "validate_message",
+ "validate_conversation",
+ "is_valid_message",
+ "is_valid_conversation",
+ "create_text_message",
+ "create_function_call",
+ "create_function_response",
+ "create_error_message",
+ "format_function_params",
+ "conversation_to_messages",
+ "skill",
+ "agent",
# Workflow
- 'Flow',
- 'WorkflowContext',
- 'WorkflowStep',
- 'QueryStep',
- 'AutoRouteStep',
- 'FunctionStep',
- 'ConditionalBranch',
- 'ConditionStep',
- 'ParallelStep',
- 'ParallelBuilder',
- 'StepType',
-
+ "Flow",
+ "WorkflowContext",
+ "WorkflowStep",
+ "QueryStep",
+ "AutoRouteStep",
+ "FunctionStep",
+ "ConditionalBranch",
+ "ConditionStep",
+ "ParallelStep",
+ "ParallelBuilder",
+ "StepType",
# MCP
- 'MCPClient',
- 'MCPError',
- 'MCPConnectionError',
- 'MCPTimeoutError',
- 'MCPToolError',
- 'MCPTools',
- 'MCPEnabledAgent',
- 'FastMCP',
- 'MCPResponse',
- 'text_response',
- 'error_response',
- 'image_response',
- 'multi_content_response',
- 'MCPContentType',
- 'FastMCPAgent',
- 'A2AMCPAgent',
- 'create_proxy_server',
- 'create_fastapi_app',
-
+ "MCPClient",
+ "MCPError",
+ "MCPConnectionError",
+ "MCPTimeoutError",
+ "MCPToolError",
+ "MCPTools",
+ "MCPEnabledAgent",
+ "FastMCP",
+ "MCPResponse",
+ "text_response",
+ "error_response",
+ "image_response",
+ "multi_content_response",
+ "MCPContentType",
+ "FastMCPAgent",
+ "A2AMCPAgent",
+ "create_proxy_server",
+ "create_fastapi_app",
# LangChain Integration (always included)
- 'to_a2a_server',
- 'to_langchain_agent',
- 'to_mcp_server',
- 'to_langchain_tool',
- 'LangChainIntegrationError',
- 'LangChainNotInstalledError',
- 'LangChainToolConversionError',
- 'MCPToolConversionError',
- 'LangChainAgentConversionError',
- 'A2AAgentConversionError',
+ "to_a2a_server",
+ "to_langchain_agent",
+ "to_mcp_server",
+ "to_langchain_tool",
+ "LangChainIntegrationError",
+ "LangChainNotInstalledError",
+ "LangChainToolConversionError",
+ "MCPToolConversionError",
+ "LangChainAgentConversionError",
+ "A2AAgentConversionError",
]
# Conditionally add LLM clients/servers
if HAS_LLM_CLIENTS:
- __all__.extend(['OpenAIA2AClient', 'AnthropicA2AClient'])
+ __all__.extend(["OpenAIA2AClient", "OllamaA2AClient", "AnthropicA2AClient"])
if HAS_LLM_SERVERS:
- __all__.extend(['OpenAIA2AServer', 'AnthropicA2AServer', 'BedrockA2AServer'])
+ __all__.extend(
+ ["OpenAIA2AServer", "OllamaA2AServer", "AnthropicA2AServer", "BedrockA2AServer"]
+ )
# Conditionally add docs
if HAS_DOCS:
- __all__.extend(['generate_a2a_docs', 'generate_html_docs'])
+ __all__.extend(["generate_a2a_docs", "generate_html_docs"])
# Conditionally add CLI
if HAS_CLI:
- __all__.append('cli_main')
\ No newline at end of file
+ __all__.append("cli_main")
diff --git a/python_a2a/agent_flow/server/static/js/flow-builder.js b/python_a2a/agent_flow/server/static/js/flow-builder.js
index 0e8d152..9605036 100644
--- a/python_a2a/agent_flow/server/static/js/flow-builder.js
+++ b/python_a2a/agent_flow/server/static/js/flow-builder.js
@@ -7,12 +7,12 @@ document.addEventListener('DOMContentLoaded', function() {
const connectionTooltip = document.getElementById('connection-tooltip');
const emptyCanvasHelp = document.getElementById('empty-canvas-help');
- // Mark non-OpenAI agents and all tools as "coming soon"
+ // Mark non-OpenAI or Ollama agents and all tools as "coming soon"
const markComingSoonFeatures = () => {
- // Apply to agent types (except OpenAI)
+ // Apply to agent types (except OpenAI & Ollama)
const agentCards = document.querySelectorAll('.agent-card');
agentCards.forEach(card => {
- if (card.getAttribute('data-type') !== 'openai') {
+ if (!['openai', 'ollama'].includes(card.getAttribute('data-type'))) {
card.classList.add('feature-showcase');
// Add the badge with icon
@@ -91,7 +91,7 @@ document.addEventListener('DOMContentLoaded', function() {
const newAgentTypeSelect = document.getElementById('new-agent-type');
if (newAgentTypeSelect) {
Array.from(newAgentTypeSelect.options).forEach(option => {
- if (option.value !== 'openai') {
+ if (!['openai', 'ollama'].includes(option.value)) {
option.disabled = true;
option.style.color = 'rgba(160, 160, 160, 0.6)';
option.style.fontStyle = 'italic';
@@ -479,6 +479,9 @@ document.addEventListener('DOMContentLoaded', function() {
case 'openai':
typeName = 'OpenAI';
break;
+ case 'ollama':
+ typeName = 'Ollama';
+ break;
case 'anthropic':
typeName = 'Claude';
break;
@@ -1413,6 +1416,11 @@ document.addEventListener('DOMContentLoaded', function() {
document.getElementById('openai-api-key').value = nodeData.config.apiKey || '';
document.getElementById('openai-model').value = nodeData.config.model || 'gpt-4o';
document.getElementById('openai-system-message').value = nodeData.config.systemMessage || 'You are a helpful AI assistant.';
+ } else if (nodeData.subType === 'ollama') {
+ document.getElementById('ollama-api-url').value = nodeData.config.apiUrl || '';
+ document.getElementById('ollama-api-key').value = nodeData.config.apiKey || '';
+ document.getElementById('ollama-model').value = nodeData.config.model || 'deepseek-r1:latest';
+ document.getElementById('ollama-system-message').value = nodeData.config.systemMessage || 'You are a helpful AI assistant.';
} else if (nodeData.subType === 'anthropic') {
document.getElementById('anthropic-api-key').value = nodeData.config.apiKey || '';
document.getElementById('anthropic-model').value = nodeData.config.model || 'claude-3-opus';
@@ -2599,6 +2607,31 @@ document.addEventListener('DOMContentLoaded', function() {
selectedNode.config.model = model;
selectedNode.config.systemMessage = systemMessage;
}
+ } else if (selectedNode.subType === 'ollama') {
+ const apiUrl = document.getElementById('ollama-api-url').value;
+ const apiKey = document.getElementById('ollama-api-key').value;
+ const model = document.getElementById('ollama-model').value;
+ const systemMessage = document.getElementById('ollama-system-message').value;
+
+ if (!apiUrl || apiUrl.trim() === '') {
+ validationErrors.push('Ollama API url is required');
+ }
+
+ if (!apiKey || apiKey.trim() === '') {
+ validationErrors.push('Ollama API Key is required');
+ }
+
+ if (!model) {
+ validationErrors.push('Please define a model');
+ }
+
+ // Store values if validation passes
+ if (validationErrors.length === 0) {
+ selectedNode.config.apiUrl = apiUrl;
+ selectedNode.config.apiKey = apiKey;
+ selectedNode.config.model = model;
+ selectedNode.config.systemMessage = systemMessage;
+ }
} else if (selectedNode.subType === 'anthropic') {
const apiKey = document.getElementById('anthropic-api-key').value;
const model = document.getElementById('anthropic-model').value;
@@ -2824,6 +2857,8 @@ document.addEventListener('DOMContentLoaded', function() {
if (selectedNode.subType === 'openai' && selectedNode.config.model) {
modelText = selectedNode.config.model;
+ } else if (selectedNode.subType === 'ollama' && selectedNode.config.model) {
+ modelText = selectedNode.config.model;
} else if (selectedNode.subType === 'anthropic' && selectedNode.config.model) {
modelText = selectedNode.config.model;
} else if (selectedNode.subType === 'bedrock' && selectedNode.config.model) {
@@ -2891,6 +2926,21 @@ document.addEventListener('DOMContentLoaded', function() {
`;
+ } else if (agentType === 'ollama') {
+ configHtml = `
+
+
+
+
+
+
+
+
+
+
+
+
+ `;
} else if (agentType === 'anthropic') {
configHtml = `
+
+
+
+
Ollama Agent
+
Ollama models
+
+
@@ -203,6 +210,26 @@
Configure Agent
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -362,6 +389,7 @@
Create New Agent