Skip to content

Commit d270280

Browse files
cauchyturingclaude
andcommitted
feat: Docker mcp-core deployment + Smithery one-click install (Path B)
Make pipeline torch-optional so mcp-core Docker (1.52GB, no torch) works: - 6 files: wrap `import torch` in try/except, guard cuda checks - Lazy OllamaClient import (requests not in core deps) - Remove dead distutils/sympy imports (Python 3.12 compat) - Fix Dockerfiles: healthcheck start-period, remove LaTeX from mcp-full - Add .dockerignore (5.98GB → 1.52GB image) - Fix smithery.yaml: PYTHONPATH, llmModel config - Clean requirements_mcp_core.txt - Add GitHub Actions CI: tests + Docker build + smoke test All 4 tools verified in container: discover, inspect_graph, diagnose_data, run_algorithm. Host tests: 48 passed. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent fb4640b commit d270280

File tree

15 files changed

+222
-47
lines changed

15 files changed

+222
-47
lines changed

.dockerignore

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Version control
2+
.git
3+
.gitmodules
4+
5+
# Virtual environments
6+
.venv
7+
venv
8+
env
9+
10+
# IDE / editor
11+
.vscode
12+
.idea
13+
*.swp
14+
*.swo
15+
16+
# Data files (users provide via CSV)
17+
data/
18+
output/
19+
20+
# Build artifacts
21+
__pycache__
22+
*.pyc
23+
*.pyo
24+
*.egg-info
25+
dist/
26+
build/
27+
*.whl
28+
29+
# Docs and plans (not needed at runtime)
30+
docs/
31+
*.md
32+
!README.md
33+
34+
# CI/CD configs
35+
.github/
36+
.claude/
37+
38+
# Misc
39+
.env
40+
.env.*
41+
*.log
42+
.pytest_cache
43+
.coverage
44+
htmlcov/

.github/workflows/mcp-ci.yml

Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
name: MCP CI
2+
3+
on:
4+
push:
5+
branches: [main, feat/mcp-*]
6+
paths:
7+
- 'causal_copilot/mcp/**'
8+
- 'causal_discovery/**'
9+
- 'tests/test_mcp*.py'
10+
- 'Dockerfile.mcp-*'
11+
- 'requirements_mcp_core.txt'
12+
- '.github/workflows/mcp-ci.yml'
13+
pull_request:
14+
branches: [main]
15+
paths:
16+
- 'causal_copilot/mcp/**'
17+
- 'causal_discovery/**'
18+
- 'tests/test_mcp*.py'
19+
- 'Dockerfile.mcp-*'
20+
- 'requirements_mcp_core.txt'
21+
22+
jobs:
23+
test:
24+
runs-on: ubuntu-latest
25+
steps:
26+
- uses: actions/checkout@v4
27+
with:
28+
submodules: recursive
29+
30+
- uses: actions/setup-python@v5
31+
with:
32+
python-version: '3.12'
33+
cache: pip
34+
35+
- name: Install dependencies
36+
run: |
37+
pip install -r requirements_mcp_core.txt
38+
pip install pytest
39+
40+
- name: Run MCP unit tests
41+
env:
42+
PYTHONPATH: ${{ github.workspace }}:${{ github.workspace }}/externals:${{ github.workspace }}/externals/causal-learn
43+
run: |
44+
pytest tests/test_mcp.py tests/test_mcp_golden.py tests/test_mcp_integration.py tests/test_mcp_resources.py -x -q
45+
46+
docker:
47+
runs-on: ubuntu-latest
48+
needs: test
49+
steps:
50+
- uses: actions/checkout@v4
51+
with:
52+
submodules: recursive
53+
54+
- name: Build mcp-core image
55+
run: docker build -f Dockerfile.mcp-core -t causal-copilot-mcp:core .
56+
57+
- name: Verify image starts
58+
run: |
59+
docker run -d --name mcp-test -p 8000:8000 causal-copilot-mcp:core
60+
# Poll until MCP server responds (up to 60s)
61+
for i in $(seq 1 12); do
62+
SESSION_ID=$(curl -sf -D /dev/stderr \
63+
-X POST http://localhost:8000/mcp \
64+
-H 'Content-Type: application/json' \
65+
-H 'Accept: application/json, text/event-stream' \
66+
-d '{"jsonrpc":"2.0","id":0,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ci","version":"0.1"}}}' \
67+
2>&1 | grep -oP 'mcp-session-id: \K\S+') && break || sleep 5
68+
done
69+
echo "Session: $SESSION_ID"
70+
test -n "$SESSION_ID" || (docker logs mcp-test && exit 1)
71+
72+
- name: Smoke test discover tool
73+
run: |
74+
SESSION_ID=$(curl -sf -D /dev/stderr \
75+
-X POST http://localhost:8000/mcp \
76+
-H 'Content-Type: application/json' \
77+
-H 'Accept: application/json, text/event-stream' \
78+
-d '{"jsonrpc":"2.0","id":0,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ci","version":"0.1"}}}' \
79+
2>&1 | grep -oP 'mcp-session-id: \K\S+')
80+
RESP=$(curl -sf -X POST http://localhost:8000/mcp \
81+
-H 'Content-Type: application/json' \
82+
-H 'Accept: application/json, text/event-stream' \
83+
-H "Mcp-Session-Id: $SESSION_ID" \
84+
-d '{"jsonrpc":"2.0","id":1,"method":"tools/call","params":{"name":"discover","arguments":{"csv_data":"A,B,C\n1,2,3\n2,4,5\n3,6,8\n4,8,10\n5,10,13\n6,12,15\n7,14,18\n8,16,20\n9,18,23\n10,20,25\n11,22,27\n12,24,30","algorithm":"PC","seed":42}}}')
85+
echo "$RESP"
86+
echo "$RESP" | grep -q '"ok"' || (echo "FAIL: discover did not return ok" && exit 1)
87+
88+
- name: Cleanup
89+
if: always()
90+
run: docker rm -f mcp-test 2>/dev/null || true

Dockerfile.mcp-core

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
# Dockerfile.mcp-core — Lightweight MCP server (~800MB)
2-
# No torch, no LaTeX. Algorithms that don't need torch.
2+
# No torch, no LaTeX. PC, GES, FCI, DirectLiNGAM, etc. work.
3+
# LLM pipeline falls back to rule-based offline mode.
34
FROM python:3.12-slim AS base
45

56
WORKDIR /app
67

7-
# System deps
8+
# System deps (graphviz needed by causal-learn graph rendering)
89
RUN apt-get update && apt-get install -y --no-install-recommends \
910
build-essential graphviz git && \
1011
rm -rf /var/lib/apt/lists/*
@@ -21,8 +22,9 @@ ENV PYTHONPATH=/app:/app/externals:/app/externals/causal-learn
2122
RUN useradd -m mcp && chown -R mcp:mcp /app
2223
USER mcp
2324

24-
HEALTHCHECK --interval=30s --timeout=10s --retries=3 \
25-
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"
25+
# Healthcheck with startup grace period
26+
HEALTHCHECK --interval=30s --timeout=10s --start-period=15s --retries=3 \
27+
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1
2628

2729
EXPOSE 8000
2830
ENTRYPOINT ["python", "-m", "causal_copilot.mcp", "--http"]

Dockerfile.mcp-full

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,19 +1,18 @@
1-
# Dockerfile.mcp-full — Full MCP server (~4GB)
2-
# All algorithms including torch-based + LaTeX reports.
1+
# Dockerfile.mcp-full — Full MCP server with all algorithms (~3GB)
2+
# Includes torch-based algorithms (NOTEARS, GOLEM, etc.).
3+
# No LaTeX — MCP server doesn't generate PDF reports.
34
FROM pytorch/pytorch:2.2.2-cuda11.8-cudnn8-runtime AS base
45

56
WORKDIR /app
67

78
# System deps
89
RUN apt-get update && apt-get install -y --no-install-recommends \
9-
build-essential graphviz git \
10-
texlive-latex-base texlive-latex-extra texlive-fonts-recommended && \
10+
build-essential graphviz git && \
1111
rm -rf /var/lib/apt/lists/*
1212

13-
# Python deps
13+
# Python deps (full algorithm suite)
1414
COPY requirements_cpu.txt .
1515
RUN pip install --no-cache-dir -r requirements_cpu.txt
16-
RUN pip install --no-cache-dir "fastmcp>=3.0"
1716

1817
# Application
1918
COPY . .
@@ -23,8 +22,9 @@ ENV PYTHONPATH=/app:/app/externals:/app/externals/causal-learn
2322
RUN useradd -m mcp && chown -R mcp:mcp /app
2423
USER mcp
2524

26-
HEALTHCHECK --interval=30s --timeout=10s --retries=3 \
27-
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"
25+
# Healthcheck with startup grace period
26+
HEALTHCHECK --interval=30s --timeout=10s --start-period=15s --retries=3 \
27+
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')" || exit 1
2828

2929
EXPOSE 8000
3030
ENTRYPOINT ["python", "-m", "causal_copilot.mcp", "--http"]

causal_discovery/filter.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
import json
22
import os
3-
import torch
3+
try:
4+
import torch
5+
except ImportError:
6+
torch = None
47
from utils.logger import logger
58
from llm import LLMClient
69

@@ -70,7 +73,7 @@ def create_prompt(self, global_state):
7073
"[COLUMNS]": ', '.join(global_state.user_data.processed_data.columns),
7174
"[STATISTICS_DESC]": global_state.statistics.description,
7275
"[ALGO_CONTEXT]": algo_context,
73-
"[CUDA_WARNING]": "Current machine supports CUDA, some algorithms can be accelerated by GPU if needed." if torch.cuda.is_available() else "\nCurrent machine doesn't support CUDA, do not choose any GPU-powered algorithms.",
76+
"[CUDA_WARNING]": "Current machine supports CUDA, some algorithms can be accelerated by GPU if needed." if (torch is not None and torch.cuda.is_available()) else "\nCurrent machine doesn't support CUDA, do not choose any GPU-powered algorithms.",
7477
"[TOP_K]": str(TOP_K),
7578
"[ACCEPT_CPDAG]": "The user accepts the output graph including undirected edges/undeterministic directions (CPDAG/PAG)" if global_state.user_data.accept_CPDAG else "The user does not accept the output graph including undirected edges/undeterministic directions (CPDAG/PAG), so the output graph should be a DAG."
7679
}

causal_discovery/hyperparameter_selector.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
import json
2-
import torch
2+
try:
3+
import torch
4+
except ImportError:
5+
torch = None
36
import causal_discovery.wrappers as wrappers
47
from llm import LLMClient
58
from utils.logger import logger
@@ -97,7 +100,7 @@ def create_prompt(self, global_state, selected_algo, hp_context, algorithm_optim
97100
hp_prompt = hp_prompt.replace("[COLUMNS]", table_columns)
98101
hp_prompt = hp_prompt.replace("[KNOWLEDGE_INFO]", knowledge_info)
99102
hp_prompt = hp_prompt.replace("[STATISTICS INFO]", global_state.statistics.description)
100-
hp_prompt = hp_prompt.replace("[CUDA_WARNING]", "Current machine supports CUDA, some algorithms can be accelerated by GPU if needed." if torch.cuda.is_available() else "\nCurrent machine doesn't support CUDA, do not choose any GPU-powered algorithms.")
103+
hp_prompt = hp_prompt.replace("[CUDA_WARNING]", "Current machine supports CUDA, some algorithms can be accelerated by GPU if needed." if (torch is not None and torch.cuda.is_available()) else "\nCurrent machine doesn't support CUDA, do not choose any GPU-powered algorithms.")
101104
# hp_prompt = hp_prompt.replace("[ALGORITHM_DESCRIPTION]", algorithm_optimum_reason)
102105
hp_prompt = hp_prompt.replace("[PRIMARY_HYPERPARAMETERS]", ', '.join(primary_params))
103106
hp_prompt = hp_prompt.replace("[HYPERPARAMETER_INFO]", hp_info_str)

causal_discovery/rerank.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
11
import json
2-
import torch
2+
try:
3+
import torch
4+
except ImportError:
5+
torch = None
36
from .wrappers import __all__ as all_algos
47
from .hyperparameter_selector import HyperparameterSelector
58
from .runtime_estimators.runtime_estimator import RuntimeEstimator
@@ -61,7 +64,7 @@ def create_prompt(self, global_state, algo_info, time_info):
6164
"[DIMENSIONS]": str(global_state.user_data.processed_data.shape[1]),
6265
"[KNOWLEDGE_INFO]": str(global_state.user_data.knowledge_docs),
6366
"[STATISTICS_INFO]": global_state.statistics.description,
64-
"[CUDA_WARNING]": "Current machine supports CUDA, some algorithms can be accelerated by GPU if needed." if torch.cuda.is_available() else "\nCurrent machine doesn't support CUDA, do not choose any GPU-powered algorithms.",
67+
"[CUDA_WARNING]": "Current machine supports CUDA, some algorithms can be accelerated by GPU if needed." if (torch is not None and torch.cuda.is_available()) else "\nCurrent machine doesn't support CUDA, do not choose any GPU-powered algorithms.",
6568
"[ALGORITHM_CANDIDATES]": str(list(global_state.algorithm.algorithm_candidates.keys())),
6669
"[WAIT_TIME]": str(global_state.algorithm.waiting_minutes),
6770
"[TIME_INFO]": time_info,

causal_discovery/wrappers/cdnod.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,12 @@
2424
from causal_discovery.wrappers.pc import PC
2525
from causal_discovery.evaluation.evaluator import GraphEvaluator
2626

27-
import torch
28-
cuda_available = torch.cuda.is_available()
27+
try:
28+
import torch
29+
cuda_available = torch.cuda.is_available()
30+
except ImportError:
31+
torch = None
32+
cuda_available = False
2933
try:
3034
from externals.acceleration.cdnod.cdnod import accelerated_cdnod
3135
except ImportError:

causal_discovery/wrappers/var_lingam.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,12 @@
2020
from causal_discovery.evaluation.evaluator import GraphEvaluator
2121
from causal_discovery.wrappers.utils.ts_utils import generate_stationary_linear
2222

23-
import torch
24-
cuda_available = torch.cuda.is_available()
23+
try:
24+
import torch
25+
cuda_available = torch.cuda.is_available()
26+
except ImportError:
27+
torch = None
28+
cuda_available = False
2529
try:
2630
from culingam.varlingam import VARLiNGAM as AcVarLiNGAM
2731
from culingam.utils import check_array

global_setting/Initialize_state.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,10 @@
66
import numpy as np
77
import os
88
import json
9-
import torch
9+
try:
10+
import torch
11+
except ImportError:
12+
torch = None
1013
from datetime import datetime
1114
from global_setting.state import GlobalState
1215
from data.simulator.simulation import SimulationManager
@@ -184,7 +187,7 @@ def global_state_initialization(args: argparse.Namespace = None) -> GlobalState:
184187
global_state.algorithm.selected_algorithm = info_extracted["selected_algorithm"]
185188
global_state.statistics.time_series = info_extracted["time_series"]
186189
# GPU availability
187-
global_state.statistics.gpu_available = torch.cuda.is_available()
190+
global_state.statistics.gpu_available = torch is not None and torch.cuda.is_available()
188191

189192
if info_extracted["waiting_minutes"] is not None:
190193
global_state.algorithm.waiting_minutes = info_extracted["waiting_minutes"]

0 commit comments

Comments
 (0)