Skip to content

Commit f9a95a3

Browse files
authored
Merge pull request #4 from signnow/examples-from-main
add examples
2 parents 17fc467 + 1877262 commit f9a95a3

File tree

6 files changed

+176
-20
lines changed

6 files changed

+176
-20
lines changed

.env.example

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,8 @@ ALLOWED_REDIRECTS=http://localhost, http://127.0.0.1, http://127.0.0.1:8000
2121
# OAuth RSA key configuration
2222
OAUTH_RSA_PRIVATE_PEM=your_rsa_private_key_here
2323
OAUTH_JWK_KID=mcp-dev-key
24+
25+
#For examples
26+
LLM_API_HOST=https://api.openai.com/v1
27+
LLM_MODEL=gpt-4o-mini
28+
LLM_KEY=XXXXXXXXXXXXX

README.md

Lines changed: 57 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,39 @@ A Model Context Protocol (MCP) server that provides SignNow API integration capa
99
- Python 3.11+ installed on your system
1010
- Environment variables configured
1111

12-
### 1. Clone and Setup
12+
### 1. Setup Environment Variables
1313

1414
```bash
15-
git clone <your-repo>
16-
cd sn-mcp-server
17-
cp env.example .env
18-
# Edit .env file with your actual values
15+
# Create .env file with your SignNow credentials
16+
# You can copy from env.example if you have the source code
17+
# Or create .env file manually with required variables (see Environment Variables section below)
1918
```
2019

2120
### 2. Install and Run
2221

22+
#### Option A: Install from PyPI (Recommended)
23+
24+
```bash
25+
# Install the package from PyPI
26+
pip install signnow-mcp-server
27+
28+
# Run MCP server in standalone mode
29+
sn-mcp serve
30+
31+
# Run HTTP server with MCP endpoints
32+
sn-mcp http
33+
34+
# Run HTTP server on custom host/port
35+
sn-mcp http --host 127.0.0.1 --port 8080
36+
37+
# Run HTTP server with auto-reload (for development)
38+
sn-mcp http --reload
39+
```
40+
41+
#### Option B: Install from Source (Development)
42+
2343
```bash
24-
# Install the package
44+
# Install the package in development mode
2545
pip install -e .
2646

2747
# Run MCP server in standalone mode
@@ -135,6 +155,8 @@ For production environments:
135155
**Security Note**: Never commit RSA private keys to version control. Always use environment variables or secure secret management systems.
136156

137157
## MCP Tools
158+
<details>
159+
<summary>Tools list</summary>
138160

139161
The server exposes the following tools (brief purpose-oriented descriptions):
140162

@@ -183,6 +205,33 @@ Returns a complete, normalized structure of a document or group, including field
183205
### update_document_fields
184206
Prefills text fields in one or more individual documents (not groups). Use it to populate values before sending invites.
185207

186-
## License
208+
</details>
209+
210+
## Examples
211+
212+
The `examples/` directory contains working examples of how to integrate the SignNow MCP Server with popular AI agent frameworks:
213+
214+
- **[LangChain](examples/langchain/langchain_example.py)** - Integration with LangChain agents using `langchain-mcp-adapters`
215+
- **[LlamaIndex](examples/llamaindex/llamaindex_example.py)** - Integration with LlamaIndex agents using `llama-index-tools-mcp`
216+
- **[SmolAgents](examples/smolagents/stdio_demo.py)** - Integration with SmolAgents framework using native MCP support
217+
218+
Each example demonstrates how to:
219+
- Start the MCP server as a subprocess
220+
- Convert MCP tools to framework-specific tool formats
221+
- Create agents that can use SignNow functionality
222+
- Handle environment variable configuration
223+
224+
To run an example:
225+
```bash
226+
# Make sure you have the required dependencies installed
227+
pip install langchain-openai langchain-mcp-adapters # for LangChain example
228+
pip install llama-index-tools-mcp # for LlamaIndex example
229+
pip install smolagents # for SmolAgents example
230+
231+
# Set up your .env file with SignNow credentials and LLM configuration
232+
# Then run the example
233+
python examples/langchain/langchain_example.py
234+
python examples/llamaindex/llamaindex_example.py
235+
python examples/smolagents/stdio_demo.py
236+
```
187237

188-
MIT License - see LICENSE file for details.
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
import asyncio
2+
import os
3+
4+
from dotenv import dotenv_values, find_dotenv
5+
from langchain.agents import AgentExecutor, create_openai_tools_agent
6+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
7+
from langchain_mcp_adapters.client import MultiServerMCPClient
8+
from langchain_openai import ChatOpenAI
9+
10+
11+
async def main() -> None:
12+
env_path = find_dotenv(usecwd=True)
13+
if env_path:
14+
os.environ.update(dotenv_values(env_path))
15+
16+
# MCP server as subprocess (example: your sn-mcp serve)
17+
client = MultiServerMCPClient(
18+
{
19+
"sn": {
20+
"transport": "stdio",
21+
"command": "sn-mcp",
22+
"args": ["serve"],
23+
# "cwd": "/path/to/dir",
24+
# "env": {"VAR": "value"},
25+
# "allowed_tools": ["list_templates", "get_template"],
26+
}
27+
}
28+
)
29+
30+
tools = await client.get_tools() # MCP → LangChain tools
31+
32+
llm = ChatOpenAI(
33+
model=os.getenv("LLM_MODEL"),
34+
api_key=os.getenv("LLM_KEY"),
35+
base_url=os.getenv("LLM_API_HOST"),
36+
temperature=0,
37+
)
38+
39+
prompt = ChatPromptTemplate.from_messages(
40+
[
41+
("system", "Be helpful."),
42+
("human", "{input}"),
43+
MessagesPlaceholder("agent_scratchpad"),
44+
]
45+
)
46+
agent = create_openai_tools_agent(llm, tools, prompt)
47+
execu = AgentExecutor(agent=agent, tools=tools, verbose=True)
48+
49+
out = await execu.ainvoke({"input": "Show me list of templates and its names"})
50+
print(out.get("output", out))
51+
52+
53+
asyncio.run(main())
Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,40 @@
1+
import asyncio
2+
import os
3+
4+
from dotenv import dotenv_values, find_dotenv
5+
from llama_index.core.agent.workflow import FunctionAgent
6+
from llama_index.llms.openai import OpenAI
7+
from llama_index.tools.mcp import BasicMCPClient, McpToolSpec
8+
9+
10+
async def main() -> None:
11+
# Load environment variables (as in your example)
12+
env_path = find_dotenv(usecwd=True)
13+
if env_path:
14+
os.environ.update(dotenv_values(env_path))
15+
16+
# 1) Start your MCP server as a separate process via STDIO (analog of StdioServerParameters)
17+
mcp_client = BasicMCPClient("sn-mcp", args=["serve"]) # reads os.environ
18+
19+
# Alternatives (if remote server is needed):
20+
# mcp_client = BasicMCPClient("http://host:port/sse") # SSE
21+
# mcp_client = BasicMCPClient("https://host/mcp") # Streamable HTTP
22+
23+
# 2) Auto-conversion of tools from MCP → FunctionTool (descriptions/JSON schemas will be pulled)
24+
spec = McpToolSpec(client=mcp_client, include_resources=False) # allowed_tools=[...] if needed
25+
tools = await spec.to_tool_list_async()
26+
27+
# 3) LlamaIndex Agent
28+
agent = FunctionAgent(
29+
name="MCP Agent",
30+
description="Agent with tools from sn-mcp",
31+
llm=OpenAI(model=os.environ["LLM_MODEL"], api_base=os.environ["LLM_API_HOST"], api_key=os.environ["LLM_KEY"]),
32+
tools=tools,
33+
system_prompt="Be helpful.",
34+
)
35+
36+
resp = await agent.run("Show me list of templates and its names")
37+
print(resp)
38+
39+
40+
asyncio.run(main())

examples/smolagents/stdio_demo.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,21 +9,19 @@
99
)
1010

1111

12-
def main():
13-
# Model: HF Inference providers; just set HF_TOKEN for private/gated models
14-
model = OpenAIServerModel(
15-
model_id="gpt-4o-mini", # can use "gpt-4o-mini" for cheaper
16-
api_base="https://api.openai.com/v1",
17-
api_key=os.environ["OPENAI_API_KEY"],
18-
# organization="org_...", # optional
19-
# project="proj_...", # optional
20-
)
21-
12+
def main() -> None:
2213
env_path = find_dotenv(usecwd=True) # searches upward from current cwd
2314
env = dict(os.environ)
2415
if env_path:
2516
env.update(dotenv_values(env_path))
2617

18+
model = OpenAIServerModel(
19+
model_id=os.environ["LLM_MODEL"], # can use "gpt-4o-mini" for cheaper
20+
api_base=os.environ["LLM_API_HOST"],
21+
api_key=os.environ["LLM_KEY"],
22+
# organization="org_...", # optional
23+
# project="proj_...", # optional
24+
)
2725
# Start local MCP server via your CLI (STDIO mode)
2826
params = StdioServerParameters(command="sn-mcp", args=["serve"], env=env) # this is your MCP(stdio) command # pass ENV
2927

pyproject.toml

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,18 @@ smolagents = [
2929
"smolagents>=1.20",
3030
"mcpadapt>=0.1.11",
3131
]
32+
llamaindex = [
33+
"llama-index-tools-mcp>=0.1.0",
34+
"llama-index-core>=0.10.0",
35+
"llama-index-llms-openai>=0.1.0",
36+
"python-dotenv>=1.0.0",
37+
]
38+
langchain = [
39+
"langchain>=0.1.0",
40+
"langchain-openai>=0.1.0",
41+
"langchain-mcp-adapters>=0.1.0",
42+
"langchain-core>=0.1.0",
43+
]
3244
test = [
3345
"pytest>=7.0",
3446
"pytest-asyncio>=0.21",
@@ -54,11 +66,10 @@ warn_unused_ignores = true
5466
warn_redundant_casts = true
5567
warn_return_any = true
5668
disallow_untyped_defs = true
57-
plugins = ["pydantic.mypy"]
5869

5970
[tool.hatch.version]
6071
source = "vcs"
6172
tag-pattern = "^v?(?P<version>.*)$"
6273

6374
[tool.hatch.build.hooks.vcs]
64-
version-file = "src/sn_mcp_server/_version.py"
75+
version-file = "src/sn_mcp_server/_version.py"

0 commit comments

Comments
 (0)