-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmcp.compose.yml
More file actions
59 lines (58 loc) · 1.5 KB
/
mcp.compose.yml
File metadata and controls
59 lines (58 loc) · 1.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
---
services:
github-mcp:
image: ghcr.io/github/github-mcp-server:latest
container_name: github-mcp
hostname: github-mcp
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
stdin_open: true
tty: true
ports:
- "8787:8787"
environment:
GITHUB_PERSONAL_ACCESS_TOKEN: ${GITHUB_ACCESS_TOKEN}
github-agent:
# image: docker.io/knucklessg1/github:latest
build:
context: . # Debug
dockerfile: debug.Dockerfile
container_name: github-agent
hostname: github-agent
command: [ "github-agent" ]
extra_hosts:
- "host.docker.internal:host-gateway"
depends_on:
- github-mcp
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
restart: always
env_file:
- .env
environment:
- "HOST=0.0.0.0"
- "PORT=9016"
- "MCP_URL=http://github-mcp:8787/mcp"
- "PROVIDER=openai"
- "LLM_BASE_URL=${LLM_BASE_URL:-http://host.docker.internal:1234/v1}"
- "LLM_API_KEY=${LLM_API_KEY:-llama}"
- "MODEL_ID=${MODEL_ID:-nvidia/nemotron-3-super}"
- "DEBUG=False"
- "ENABLE_WEB_UI=True"
- "ENABLE_OTEL=True"
- "SSL_VERIFY=True"
- "MAX_TOKENS=16384"
ports:
- "9016:9016"
healthcheck:
test: [ "CMD", "python3", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:9016/health')" ]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s