diff --git a/.cursor/rules/mcp-auth.mdc b/.cursor/rules/mcp-auth.mdc
new file mode 100644
index 0000000..62860f5
--- /dev/null
+++ b/.cursor/rules/mcp-auth.mdc
@@ -0,0 +1,21 @@
+---
+description: MCP OAuth auth verification and CIMD conventions
+globs: subgraphs/users/src/index.ts, deploy/apollo-mcp-server/mcp.yaml, tests/mcp-auth-verification.md
+alwaysApply: false
+---
+
+# MCP Auth Configuration
+
+## After making changes to OAuth or MCP auth
+
+When modifying `subgraphs/users/src/index.ts` (the authorization server) or `deploy/apollo-mcp-server/mcp.yaml`, verify changes are working by following `tests/mcp-auth-verification.md`.
+
+To run verification: port-forwards must be active on `localhost:5001` (MCP) and `localhost:4001` (auth). Use the curl commands from the test doc to validate each section. All checks should pass before considering the change complete.
+
+## Key architecture facts
+
+- The Apollo MCP Server binary handles Protected Resource Metadata (RFC 9728) automatically via the `resource` config field in `mcp.yaml`. Do not add `/.well-known/oauth-protected-resource` to the users subgraph.
+- The authorization server supports both Client ID Metadata Documents (CIMD) and Dynamic Client Registration (RFC 7591). CIMD is the preferred approach per the MCP spec.
+- URL-formatted `client_id` values (HTTPS with a path) trigger the CIMD flow. Non-URL values fall back to the `registeredClients` map.
+- `allow_anonymous_mcp_discovery: true` lets clients call `initialize` and `tools/list` without auth. All tool invocations still require a valid OAuth token.
+- After rebuilding the users subgraph image, tag it to match the deployment tag in `.image-tag` and restart the deployment.
diff --git a/README.md b/README.md
index 303f8b0..4ab84bc 100644
--- a/README.md
+++ b/README.md
@@ -140,6 +140,14 @@ Learn about the authorization implementation, including:
- Resource-level authorization patterns
- Testing authorization scenarios
+### [MCP Production Guide](/docs/mcp-production.md)
+
+Guidance for deploying the Apollo MCP Server in production with a real OAuth 2.1 identity provider:
+- Configuring Auth0, Okta, Keycloak, or other IdPs
+- Scope strategy and per-operation access control
+- Security considerations (HTTPS, token passthrough, audience validation)
+- Networking and DNS (no `/etc/hosts` workarounds)
+
### [Response Caching Guide](/docs/response-caching-guide.md)
Learn about response caching in this architecture, including:
diff --git a/deploy/apollo-mcp-server/Chart.yaml b/deploy/apollo-mcp-server/Chart.yaml
new file mode 100644
index 0000000..a58a85a
--- /dev/null
+++ b/deploy/apollo-mcp-server/Chart.yaml
@@ -0,0 +1,6 @@
+apiVersion: v2
+name: apollo-mcp-server
+description: A Helm chart for the Apollo MCP Server in the reference architecture
+type: application
+version: 0.1.0
+appVersion: "latest"
diff --git a/deploy/apollo-mcp-server/mcp.yaml b/deploy/apollo-mcp-server/mcp.yaml
new file mode 100644
index 0000000..ca4c969
--- /dev/null
+++ b/deploy/apollo-mcp-server/mcp.yaml
@@ -0,0 +1,32 @@
+endpoint: ${env.ROUTER_ENDPOINT:-http://reference-architecture-dev.apollo.svc.cluster.local:80}
+
+transport:
+ type: streamable_http
+ port: 8000
+ stateful_mode: false # Required for mcp-remote compatibility; can be enabled in production
+ host_validation:
+ enabled: false # Local dev only — enable with allowed_hosts in production
+ auth:
+ servers:
+ - http://graphql.users.svc.cluster.local:4001
+ audiences:
+ - apollo-mcp
+ allow_any_audience: false
+ resource: ${env.MCP_RESOURCE_URL:-http://localhost:5001/mcp}
+ scopes:
+ - user:read:email
+ scope_mode: require_any
+ allow_anonymous_mcp_discovery: true
+
+logging:
+ level: debug
+
+introspection:
+ introspect:
+ enabled: true
+
+operations:
+ source: local
+ paths:
+ - /data/operations/myCart.graphql
+ - /data/operations/myProfileDetails.graphql
diff --git a/deploy/apollo-mcp-server/operations/myCart.graphql b/deploy/apollo-mcp-server/operations/myCart.graphql
new file mode 100644
index 0000000..4557552
--- /dev/null
+++ b/deploy/apollo-mcp-server/operations/myCart.graphql
@@ -0,0 +1,31 @@
+# Fetches the authenticated user's shopping cart with full product details.
+# Requires an Authorization header with a valid Bearer token.
+query MyCart {
+ me {
+ id
+ cart {
+ items {
+ product {
+ id
+ upc
+ title
+ description
+ mediaUrl
+ releaseDate
+ variants {
+ id
+ price
+ colorway
+ size
+ dimensions
+ weight
+ }
+ reviews {
+ id
+ body
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/deploy/apollo-mcp-server/operations/myProfileDetails.graphql b/deploy/apollo-mcp-server/operations/myProfileDetails.graphql
new file mode 100644
index 0000000..30e830b
--- /dev/null
+++ b/deploy/apollo-mcp-server/operations/myProfileDetails.graphql
@@ -0,0 +1,12 @@
+# Fetches the authenticated user's profile information.
+# Requires an Authorization header with a valid Bearer token.
+query MyProfileDetails {
+ me {
+ id
+ shippingAddress
+ username
+ email
+ previousSessions
+ loyaltyPoints
+ }
+}
diff --git a/deploy/apollo-mcp-server/templates/configmap-mcp-config.yaml b/deploy/apollo-mcp-server/templates/configmap-mcp-config.yaml
new file mode 100644
index 0000000..23b7bb7
--- /dev/null
+++ b/deploy/apollo-mcp-server/templates/configmap-mcp-config.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: apollo-mcp-config
+data:
+ mcp.yaml: |-
+{{ .Files.Get "mcp.yaml" | indent 4 }}
diff --git a/deploy/apollo-mcp-server/templates/configmap-operations.yaml b/deploy/apollo-mcp-server/templates/configmap-operations.yaml
new file mode 100644
index 0000000..c85531e
--- /dev/null
+++ b/deploy/apollo-mcp-server/templates/configmap-operations.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: apollo-mcp-operations
+data:
+{{- range $path, $_ := .Files.Glob "operations/**.graphql" }}
+ {{ base $path }}: |-
+{{ $.Files.Get $path | indent 4 }}
+{{- end }}
diff --git a/deploy/apollo-mcp-server/templates/deployment.yaml b/deploy/apollo-mcp-server/templates/deployment.yaml
new file mode 100644
index 0000000..ea43046
--- /dev/null
+++ b/deploy/apollo-mcp-server/templates/deployment.yaml
@@ -0,0 +1,45 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: apollo-mcp-server
+ labels:
+ app: apollo-mcp-server
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: apollo-mcp-server
+ template:
+ metadata:
+ labels:
+ app: apollo-mcp-server
+ annotations:
+ checksum/config: {{ .Files.Get "mcp.yaml" | sha256sum }}
+ spec:
+ enableServiceLinks: false
+ containers:
+ - name: apollo-mcp-server
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ command: ["apollo-mcp-server", "/data/mcp.yaml"]
+ ports:
+ - containerPort: {{ .Values.service.port }}
+ protocol: TCP
+ envFrom:
+ - secretRef:
+ name: apollo-mcp-credentials
+ volumeMounts:
+ - name: config-volume
+ mountPath: /data/mcp.yaml
+ subPath: mcp.yaml
+ - name: operations-volume
+ mountPath: /data/operations
+ resources:
+ {{- toYaml .Values.resources | nindent 12 }}
+ volumes:
+ - name: config-volume
+ configMap:
+ name: apollo-mcp-config
+ - name: operations-volume
+ configMap:
+ name: apollo-mcp-operations
diff --git a/deploy/apollo-mcp-server/templates/service.yaml b/deploy/apollo-mcp-server/templates/service.yaml
new file mode 100644
index 0000000..ad93a87
--- /dev/null
+++ b/deploy/apollo-mcp-server/templates/service.yaml
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: apollo-mcp-server
+ labels:
+ app: apollo-mcp-server
+spec:
+ type: {{ .Values.service.type }}
+ ports:
+ - port: {{ .Values.service.port }}
+ targetPort: {{ .Values.service.port }}
+ protocol: TCP
+ selector:
+ app: apollo-mcp-server
diff --git a/deploy/apollo-mcp-server/values.yaml b/deploy/apollo-mcp-server/values.yaml
new file mode 100644
index 0000000..71cef50
--- /dev/null
+++ b/deploy/apollo-mcp-server/values.yaml
@@ -0,0 +1,16 @@
+image:
+ repository: ghcr.io/apollographql/apollo-mcp-server
+ tag: latest
+ pullPolicy: Always
+
+service:
+ type: ClusterIP
+ port: 8000
+
+resources:
+ limits:
+ cpu: 500m
+ memory: 512Mi
+ requests:
+ cpu: 100m
+ memory: 128Mi
diff --git a/docs/TODO-split-auth-from-users.md b/docs/TODO-split-auth-from-users.md
new file mode 100644
index 0000000..2d39de8
--- /dev/null
+++ b/docs/TODO-split-auth-from-users.md
@@ -0,0 +1,76 @@
+# TODO: Split Auth Service from Users Subgraph
+
+## Problem
+
+The users subgraph (`subgraphs/users/`) currently serves three distinct roles: user data, identity/login, and OAuth 2.0 authorization server. The `index.ts` is 287 lines, ~60% auth infrastructure. In-memory OAuth state forces `replicaCount: 1`.
+
+## Architecture After Split
+
+```mermaid
+graph TB
+ subgraph authService["auth subgraph (new)"]
+ direction TB
+ oauthServer["OAuth 2.0 Server /register, /authorize, /token"]
+ jwksEndpoint["JWKS + OAuth metadata"]
+ loginMutation["login mutation"]
+ end
+
+ subgraph usersSubgraph["users subgraph (cleaned)"]
+ direction TB
+ userType["User type, me/user queries __resolveReference"]
+ end
+
+ Router -->|"JWKS"| jwksEndpoint
+ MCPServer -->|"OAuth flow"| oauthServer
+ OrdersSubgraph -->|"JWKS"| jwksEndpoint
+ CheckoutSubgraph -->|"JWKS"| jwksEndpoint
+ loginMutation -.->|"federation entity ref"| userType
+```
+
+The auth subgraph participates in the supergraph (contributes `login` mutation, `LoginResponse` types) so the client app needs zero changes. It references `User` via `@key(fields: "id", resolvable: false)` -- the users subgraph resolves the full entity.
+
+## Tasks
+
+### 1. Create the auth subgraph (`subgraphs/auth/`)
+
+- [ ] Create `subgraphs/auth/` with `schema.graphql` (login mutation, LoginResponse types, User entity stub), `src/index.ts` (Express with all OAuth routes moved from users, login mutation resolver, JWKS endpoint, renderLoginPage), `keys/` (copy from users), `package.json`, `tsconfig.json`, `Dockerfile`, `deploy/` Helm chart (port 4011, replicaCount 1)
+- [ ] Create `subgraphs/auth/src/credentials.ts` with minimal user credential data (id, username, scopes only) for login validation -- keeps domain boundary clean vs importing full user profile data
+- [ ] Carry over Client ID Metadata Document (CIMD) support: the `isUrlClientId`, `fetchClientMetadata`, `cimdCache`, SSRF guards, `CimdDisplayInfo`, and the CIMD-aware logic in `/authorize` and `/token` handlers. Ensure `client_id_metadata_document_supported: true` is included in the AS metadata endpoint
+
+### 2. Clean up the users subgraph
+
+- [ ] Clean `subgraphs/users/src/index.ts`: remove all OAuth routes, renderLoginPage, getIssuer, OAuthParams, in-memory OAuth stores, crypto/readFile/createPrivateKey imports, users data import. Revert from Express to `startStandaloneServer`. Keep JWT verification in context middleware (same keys work)
+- [ ] Clean `subgraphs/users/src/resolvers/index.ts`: remove login mutation resolver, LoginResponse type resolver, jose/readFile/createPrivateKey imports. Keep `Query.user`, `Query.me`, `User.__resolveReference`
+- [ ] Clean `subgraphs/users/schema.graphql`: remove Mutation type (login), LoginResponse union, LoginSuccessful, LoginFailed types
+- [ ] Set users subgraph back to `replicaCount: 3` in `values.yaml` since it no longer holds in-memory OAuth state
+
+### 3. Update JWKS and auth references
+
+- [ ] Update JWKS URL from `graphql.users.svc.cluster.local:4001` to `graphql.auth.svc.cluster.local:4011` in: `deploy/operator-resources/supergraph-dev.yaml`, `deploy/operator-resources/supergraph-prod.yaml`, `subgraphs/orders/src/index.ts`, `subgraphs/checkout/src/index.ts`
+- [ ] Update `deploy/apollo-mcp-server/mcp.yaml` `auth.servers` and `scripts/minikube/12-deploy-mcp-server.sh` to reference auth service instead of users
+
+### 4. Update deployment scripts
+
+- [ ] Add `auth` to SUBGRAPHS array in `scripts/minikube/05-deploy-subgraphs.sh` and image build list in `scripts/minikube/04-build-images.sh`
+
+### 5. Update documentation
+
+- [ ] Update `docs/setup.md` (port-forward auth:4011 instead of users:4001, `/etc/hosts` entry), `docs/mcp-production.md`, `README.md` references
+
+## Key Files Reference
+
+| File | Change |
+|------|--------|
+| `subgraphs/users/src/index.ts` | Remove OAuth routes, revert to `startStandaloneServer` |
+| `subgraphs/users/src/resolvers/index.ts` | Remove `login` mutation, `LoginResponse` |
+| `subgraphs/users/schema.graphql` | Remove `login`, `LoginResponse`, `LoginSuccessful`, `LoginFailed` |
+| `deploy/operator-resources/supergraph-dev.yaml` | Change JWKS URL to auth service |
+| `deploy/operator-resources/supergraph-prod.yaml` | Change JWKS URL to auth service |
+| `deploy/apollo-mcp-server/mcp.yaml` | Change `auth.servers` to auth service |
+| `subgraphs/orders/src/index.ts` | Update `JWKS_URL` |
+| `subgraphs/checkout/src/index.ts` | Update `JWKS_URL` |
+| `scripts/minikube/04-build-images.sh` | Add `auth` to build list |
+| `scripts/minikube/05-deploy-subgraphs.sh` | Add `auth` to SUBGRAPHS array |
+| `scripts/minikube/12-deploy-mcp-server.sh` | Update port-forward instructions |
+| `docs/setup.md` | Update `/etc/hosts` and port-forward instructions |
+| `docs/mcp-production.md` | Update references |
diff --git a/docs/debugging.md b/docs/debugging.md
index cb7e012..7ff2f74 100644
--- a/docs/debugging.md
+++ b/docs/debugging.md
@@ -10,6 +10,7 @@ This guide covers common issues and debugging steps for the reference architectu
- [Schema Not Pushed to Registry](#schema-not-pushed-to-registry)
- [Image Tag Issues](#image-tag-issues)
- [Network and DNS Issues](#network-and-dns-issues)
+- [Composition Failures](#composition-failures)
- [Quick Debug Scripts](#quick-debug-scripts)
## Registry Setup Issues
@@ -455,6 +456,85 @@ Response caching requires the router to have a configured TTL **and** for subgra
- [Response Caching Quickstart](https://www.apollographql.com/docs/graphos/routing/performance/caching/response-caching/quickstart)
- [Response Cache Customization](https://www.apollographql.com/docs/graphos/routing/performance/caching/response-caching/customization)
+## Composition Failures
+
+### `MISSING_TRANSITIVE_AUTH_REQUIREMENTS`
+
+**Symptoms:**
+- `SupergraphSchema` condition shows `MalformedSchema` / `CompositionPending: False`
+- `kubectl describe supergraphschema reference-architecture-dev -n apollo` shows an error like:
+ ```
+ composition failures: [CompositionError { message: "[shipping-shipping] Field \"Order.shippingCost\"
+ does not specify necessary @authenticated, @requiresScopes and/or @policy auth requirements to
+ access the transitive field \"Order.buyer\" data from @requires selection set.",
+ code: Some("MISSING_TRANSITIVE_AUTH_REQUIREMENTS") }]
+ ```
+
+**Cause:**
+
+Federation composition enforces a transitive authorization rule: if a field uses `@requires` to read data that is transitively protected by `@authenticated`, `@requiresScopes`, or `@policy` in another subgraph, the field itself must declare the matching auth directive.
+
+In this case:
+- `users` subgraph declares `type User @authenticated` — the entire `User` type is auth-gated.
+- `shipping` subgraph's `Order.shippingCost` uses `@requires(fields: "... buyer { shippingAddress }")`, which reads `User.shippingAddress` from the auth-protected `User` entity.
+- Because `shippingCost` reads through an `@authenticated` boundary, it must also declare `@authenticated`.
+
+**Debug Steps:**
+
+1. **Identify the failing subgraph and field from the error message** — the format is `[subgraph-name] Field "Type.field" ...`.
+
+2. **Inspect the `@requires` selection set on the failing field:**
+ ```bash
+ cat subgraphs/shipping/schema.graphql
+ ```
+ Look for the field's `@requires(fields: "...")` — note which external types/fields it reads.
+
+3. **Find where those referenced fields are defined and check their auth directives:**
+ ```bash
+ # Example: check if User type has @authenticated in users subgraph
+ grep -n "@authenticated\|@requiresScopes\|@policy" subgraphs/users/schema.graphql
+ ```
+
+4. **Check the `SupergraphSchema` status for the full composition error:**
+ ```bash
+ kubectl describe supergraphschema reference-architecture-dev -n apollo
+ ```
+
+**Solution:**
+
+Add the matching auth directive to the field that has `@requires`, and import it in the subgraph's `@link` declaration.
+
+Example fix for `Order.shippingCost` in `subgraphs/shipping/schema.graphql`:
+
+```graphql
+# Before
+extend schema
+ @link(
+ url: "https://specs.apollo.dev/federation/v2.5"
+ import: ["@key", "@external", "@requires"]
+ )
+
+type Order @key(fields: "id") {
+ shippingCost: Float
+ @requires(fields: "items { weight } buyer { shippingAddress }")
+}
+
+# After
+extend schema
+ @link(
+ url: "https://specs.apollo.dev/federation/v2.5"
+ import: ["@key", "@external", "@requires", "@authenticated"]
+ )
+
+type Order @key(fields: "id") {
+ shippingCost: Float
+ @authenticated
+ @requires(fields: "items { weight } buyer { shippingAddress }")
+}
+```
+
+After updating the schema, rebuild and redeploy the affected subgraph. The operator will detect the new SDL hash on the `Subgraph` CRD, re-run composition, and the `SupergraphSchema` condition should transition from `MalformedSchema` to `Available`.
+
## Quick Debug Scripts
### Complete Registry Debug
@@ -568,6 +648,10 @@ cat .image-tag 2>/dev/null || echo ".image-tag file not found"
**Cause:** Tag in `.image-tag` is empty or too short
**Solution:** Re-run `04-build-images.sh` to regenerate tag
+### `MISSING_TRANSITIVE_AUTH_REQUIREMENTS` (composition error)
+**Cause:** A field's `@requires` selection set transitively reads data protected by `@authenticated`, `@requiresScopes`, or `@policy` in another subgraph, but the field itself does not declare the same auth directive.
+**Solution:** Add the matching auth directive to the field and import it in the subgraph's `@link`. See [Composition Failures](#composition-failures).
+
## Getting Help
If you're still stuck after trying these debugging steps:
diff --git a/docs/mcp-production.md b/docs/mcp-production.md
new file mode 100644
index 0000000..357b95d
--- /dev/null
+++ b/docs/mcp-production.md
@@ -0,0 +1,560 @@
+# MCP Server: Production Guide
+
+This guide covers deploying the Apollo MCP Server in production with a real OAuth 2.1 identity provider, replacing the demo auto-approval flow used in local development.
+
+- [MCP Server: Production Guide](#mcp-server-production-guide)
+ - [Architecture Overview](#architecture-overview)
+ - [What Changes from Local Dev](#what-changes-from-local-dev)
+ - [Step 1: Choose an Identity Provider](#step-1-choose-an-identity-provider)
+ - [Step 2: Configure the Identity Provider](#step-2-configure-the-identity-provider)
+ - [Step 3: Configure the Apollo MCP Server](#step-3-configure-the-apollo-mcp-server)
+ - [Step 4: Configure the Apollo Router](#step-4-configure-the-apollo-router)
+ - [Step 5: Deploy](#step-5-deploy)
+ - [Security Considerations](#security-considerations)
+ - [Scope Strategy](#scope-strategy)
+ - [Per-Operation Scope Requirements](#per-operation-scope-requirements)
+ - [Networking and DNS](#networking-and-dns)
+ - [Troubleshooting](#troubleshooting)
+
+## Architecture Overview
+
+In production, a dedicated identity provider (IdP) handles authentication and token issuance. The MCP server validates tokens from the IdP and forwards them to the Router. The Router independently validates the same tokens and enforces authorization directives.
+
+```
+MCP Client MCP Server Identity Provider Router
+ | | | |
+ |-- discover auth server --->| | |
+ |<-- IdP URL ----------------| | |
+ | | | |
+ |-- OAuth 2.1 flow (PKCE) ----------------------->| |
+ |<-- access token (JWT) --------------------------| |
+ | | | |
+ |-- POST /mcp + Bearer ----->| | |
+ | |-- validate JWT (JWKS) -->| |
+ | |-- GraphQL + Bearer ------|----------------> |
+ | | | validate JWT |
+ | | | enforce @auth |
+ |<-- tool results -----------| | |
+```
+
+## What Changes from Local Dev
+
+| Aspect | Local Dev | Production |
+|--------|-----------|------------|
+| Identity Provider | Users subgraph (built-in OAuth endpoints) | External IdP (Auth0, Okta, Keycloak, etc.) |
+| Authorization | Login form on users subgraph (any non-empty password) | Real user login via IdP consent screen |
+| Token Issuance | In-memory auth codes, local key signing | IdP-managed token lifecycle |
+| Client Registration | Client ID Metadata Documents + Dynamic (RFC 7591) fallback | Pre-registered in IdP dashboard or Client ID Metadata Documents |
+| HTTPS | Not required (localhost) | Required for all endpoints |
+| DNS | `/etc/hosts` workaround for port-forward | Real DNS records |
+| Host Validation | Disabled | Enabled with explicit allowed hosts |
+
+## Step 1: Choose an Identity Provider
+
+The Apollo MCP Server requires an OAuth 2.1-compliant IdP that supports:
+
+- **Authorization Code flow with PKCE** (required by the MCP specification)
+- **JWT access tokens** with configurable claims (`aud`, `scope`, `sub`)
+- **JWKS endpoint** for token signature verification
+- **OAuth 2.0 Authorization Server Metadata** (RFC 8414) or **OpenID Connect Discovery**
+
+Tested providers:
+
+| Provider | Discovery | Notes |
+|----------|-----------|-------|
+| [Auth0](https://auth0.com) | OIDC | See [Apollo's Auth0 guide](https://www.apollographql.com/docs/apollo-mcp-server/guides/auth-auth0) |
+| [Okta](https://www.okta.com) | OIDC | Supports custom authorization servers |
+| [Keycloak](https://www.keycloak.org) | OIDC | Self-hosted, good for air-gapped environments |
+| [Microsoft Entra ID](https://www.microsoft.com/security/business/identity-access/microsoft-entra-id) | OIDC | Azure-native |
+| [Google Identity](https://developers.google.com/identity) | OIDC | Limited scope customization |
+
+## Step 2: Configure the Identity Provider
+
+### Create an Application/Client
+
+In your IdP, create a new application with these settings:
+
+- **Application type:** Single Page Application or Native (public client)
+- **Grant type:** Authorization Code with PKCE
+- **Redirect URIs:** Add the callback URLs for your MCP clients (see provider-specific examples below)
+- **Allowed Logout URIs:** Add any post-logout redirect URLs your clients need
+- **Scopes:** Define custom scopes that match your GraphQL authorization requirements
+
+#### Auth0
+
+1. Go to **Applications > Create Application**, select **Single Page Application**, and click **Create**.
+
+2. In the **Settings** tab, configure these fields:
+
+ | Field | Value | Notes |
+ |-------|-------|-------|
+ | **Allowed Callback URLs** | `https://mcp-client.yourdomain.com/callback` | The URL your MCP client redirects to after authorization. For `mcp-remote` during local testing, add `http://localhost:/callback` and `http://127.0.0.1:/callback` as additional entries. |
+ | **Allowed Logout URLs** | `https://mcp-client.yourdomain.com` | Optional — needed only if your client supports logout flows. |
+ | **Allowed Web Origins** | `https://mcp-client.yourdomain.com` | Required for silent token refresh via CORS. |
+
+ > **Local testing:** You can add `http://localhost` entries alongside production URLs. Auth0 accepts comma-separated lists. Remove localhost entries before going live.
+
+3. Under **Advanced Settings > Grant Types**, ensure **Authorization Code** is enabled (PKCE is automatic for SPAs in Auth0).
+
+4. Note the **Domain** (e.g., `your-tenant.auth0.com`) and **Client ID** from the top of the Settings page.
+
+5. To define custom scopes, go to **Applications > APIs**, select your API (or create one), and add scopes under the **Permissions** tab. Enter each scope as a **Permission** value with a description:
+
+ | Permission (Scope) | Description |
+ |--------------------|-------------|
+ | `user:read:email` | Read user email addresses |
+ | `inventory:read` | Read inventory levels |
+ | `order:read` | Read order data |
+ | `cart:write` | Modify shopping cart |
+
+ The **Identifier** you set for the API becomes the `audience` value used in token requests and `mcp.yaml`.
+
+For a complete walkthrough, see [Apollo's Auth0 guide](https://www.apollographql.com/docs/apollo-mcp-server/guides/auth-auth0).
+
+#### Okta
+
+1. Go to **Applications > Create App Integration**, select **OIDC** and **Single-Page Application**.
+
+2. Configure:
+ - **Sign-in redirect URIs:** `https://mcp-client.yourdomain.com/callback`
+ - **Sign-out redirect URIs:** `https://mcp-client.yourdomain.com` (optional)
+ - **Controlled access:** Assign to the relevant groups or allow everyone
+
+ > **Local testing:** Add `http://localhost:/callback` as an additional redirect URI during development.
+
+3. To define scopes, go to **Security > API > Authorization Servers**, select your server (or use `default`), and add scopes under the **Scopes** tab. The authorization server's **Issuer URI** is your IdP URL.
+
+#### Keycloak
+
+1. Go to your realm, then **Clients > Create client**. Set the client type to **OpenID Connect** and enable **Standard flow**.
+
+2. Configure:
+ - **Valid redirect URIs:** `https://mcp-client.yourdomain.com/*`
+ - **Valid post logout redirect URIs:** `https://mcp-client.yourdomain.com` (optional)
+ - **Web origins:** `https://mcp-client.yourdomain.com`
+
+ > **Local testing:** Add `http://localhost:*` as an additional valid redirect URI during development. Remove it before going live.
+
+3. To define scopes, go to **Client scopes**, create each scope, then assign them to your client under the **Client scopes** tab.
+
+### Define Scopes
+
+Map your GraphQL authorization scopes to IdP scopes. This reference architecture uses:
+
+| Scope | Purpose | Used By |
+|-------|---------|---------|
+| `user:read:email` | Read user email addresses | `@requiresScopes` on `User.email` |
+| `inventory:read` | Read inventory levels | `@requiresScopes` on inventory fields |
+| `order:read` | Read order data | Resolver-level checks |
+| `cart:write` | Modify cart contents | Resolver-level checks |
+
+These scopes must be defined in your IdP (see the provider-specific instructions above) and included in the token's `scope` claim. The Router's `@requiresScopes` directives and the MCP server's `scopes` configuration both reference these values.
+
+### Configure the Audience
+
+Set the audience (`aud` claim) to a value that identifies your MCP server. For example:
+
+- `https://mcp.yourdomain.com`
+- `apollo-mcp` (used in this reference architecture)
+
+The same audience must be configured on both the MCP server and the Router.
+
+| Provider | Where to Set Audience |
+|----------|----------------------|
+| Auth0 | **Applications > APIs** — the API **Identifier** becomes the `aud` claim |
+| Okta | **Security > API > Authorization Servers** — add an **Audience** restriction |
+| Keycloak | **Client Scopes** — configure an audience mapper on the client or use a hardcoded audience protocol mapper |
+
+### Note the IdP URL
+
+Record the base URL of your IdP. This is the issuer URL that appears in the OAuth/OIDC metadata document:
+
+| Provider | IdP URL Format | Where to Find |
+|----------|---------------|---------------|
+| Auth0 | `https://your-tenant.auth0.com` | **Settings > General > Domain** |
+| Okta | `https://your-org.okta.com/oauth2/default` | **Security > API > Authorization Servers > Issuer URI** |
+| Keycloak | `https://keycloak.yourdomain.com/realms/your-realm` | Realm settings; the OIDC discovery endpoint is at `{issuer}/.well-known/openid-configuration` |
+
+### Client Registration Approach
+
+The [MCP authorization specification](https://modelcontextprotocol.io/specification/draft/basic/authorization#client-registration-approaches) defines three client registration mechanisms. Choose based on your scenario:
+
+| Approach | When to Use | Spec Priority |
+|----------|-------------|---------------|
+| **Client ID Metadata Documents** | Client and server have no prior relationship (most common for MCP) | 1st (recommended) |
+| **Pre-registration** | Client is known to the IdP ahead of time | 2nd |
+| **Dynamic Client Registration (RFC 7591)** | Backwards compatibility or specific requirements | 3rd (fallback) |
+
+**Client ID Metadata Documents** (CIMD) allow MCP clients to use an HTTPS URL as their `client_id`. The URL points to a JSON document describing the client (name, redirect URIs, grant types). The authorization server fetches and validates this document during the OAuth flow, eliminating the need for pre-registration or dynamic registration.
+
+This reference architecture's built-in authorization server supports CIMD out of the box. It advertises `client_id_metadata_document_supported: true` in its [Authorization Server Metadata](https://datatracker.ietf.org/doc/html/rfc8414). When a URL-formatted `client_id` is presented during authorization, the server fetches the metadata document, validates the redirect URI against the document's `redirect_uris`, and displays the `client_name` on the consent screen.
+
+Example metadata document hosted by an MCP client:
+
+```json
+{
+ "client_id": "https://app.example.com/oauth/client-metadata.json",
+ "client_name": "Example MCP Client",
+ "client_uri": "https://app.example.com",
+ "redirect_uris": [
+ "http://127.0.0.1:3000/callback",
+ "http://localhost:3000/callback"
+ ],
+ "grant_types": ["authorization_code"],
+ "response_types": ["code"],
+ "token_endpoint_auth_method": "none"
+}
+```
+
+For production IdPs (Auth0, Okta, etc.), check whether your IdP supports CIMD natively. If not, pre-register your MCP clients in the IdP dashboard.
+
+### Protected Resource Metadata (RFC 9728)
+
+The Apollo MCP Server binary automatically serves [Protected Resource Metadata](https://datatracker.ietf.org/doc/html/rfc9728) using the `resource` field in `mcp.yaml`. No additional configuration is needed. When a client sends an unauthenticated request, the MCP server returns a `401` with a `WWW-Authenticate` header containing the `resource_metadata` URL, which clients use to discover the authorization server:
+
+```http
+HTTP/1.1 401 Unauthorized
+WWW-Authenticate: Bearer resource_metadata="https://mcp.yourdomain.com/.well-known/oauth-protected-resource",
+ scope="user:read:email"
+```
+
+## Step 3: Configure the Apollo MCP Server
+
+Replace the local dev auth configuration in `mcp.yaml`:
+
+```yaml
+endpoint: http://your-router-service:80
+
+transport:
+ type: streamable_http
+ port: 8000
+ host_validation:
+ allowed_hosts:
+ - "mcp.yourdomain.com"
+ auth:
+ servers:
+ - https://your-idp.example.com
+ audiences:
+ - https://mcp.yourdomain.com
+ allow_any_audience: false
+ resource: https://mcp.yourdomain.com/mcp
+ scopes:
+ - user:read:email
+ scope_mode: require_any
+
+logging:
+ level: info
+
+introspection:
+ introspect:
+ enabled: true
+
+operations:
+ source: local
+ paths:
+ - /data/operations/myCart.graphql
+ - /data/operations/myProfileDetails.graphql
+```
+
+Key differences from the local dev config:
+
+- **`host_validation`**: Enabled with explicit allowed hosts instead of disabled
+- **`auth.servers`**: Points to your external IdP instead of the users subgraph
+- **`auth.audiences`**: Uses your production audience value
+- **`auth.resource`**: Uses your production MCP URL
+- **`logging.level`**: Set to `info` instead of `debug`
+
+### Anonymous MCP Discovery
+
+The local dev config enables `allow_anonymous_mcp_discovery`, which lets MCP clients call `initialize`, `tools/list`, and `resources/list` without a Bearer token. This lets users browse available tools before authenticating. All other MCP methods still require a valid OAuth token.
+
+In production, consider whether exposing your tool catalog to unauthenticated callers is acceptable. If your tool names and descriptions are not sensitive, enabling this improves client compatibility (some agent frameworks need to discover tools before initiating OAuth). If tool metadata is confidential, leave it disabled (the default):
+
+```yaml
+transport:
+ auth:
+ allow_anonymous_mcp_discovery: false # default; require auth for all methods
+```
+
+### Disabling Token Passthrough
+
+By default, the MCP server forwards the client's OAuth token to the Router. If your Router uses a different authentication mechanism (e.g., API keys, a service-to-service token), you can disable passthrough:
+
+```yaml
+transport:
+ auth:
+ disable_auth_token_passthrough: true
+```
+
+### Per-Operation Scopes
+
+For finer-grained access control, require specific scopes for specific operations:
+
+```yaml
+overrides:
+ required_scopes:
+ MyProfileDetails:
+ - user:read:email
+ MyCart:
+ - cart:read
+```
+
+When a client calls a tool without the required scopes, the MCP server returns HTTP 403 with a `WWW-Authenticate` header indicating which scopes are needed. Clients can re-authorize with elevated scopes and retry.
+
+## Step 4: Configure the Apollo Router
+
+The Router must validate tokens from the same IdP. Update the `routerConfig` in your Supergraph CRD:
+
+```yaml
+routerConfig:
+ authentication:
+ router:
+ jwt:
+ jwks:
+ - url: https://your-idp.example.com/.well-known/jwks.json
+ authorization:
+ directives:
+ enabled: true
+```
+
+The Router:
+1. Extracts the JWT from the `Authorization: Bearer ` header
+2. Validates the signature using the IdP's JWKS endpoint
+3. Extracts claims (`sub`, `scope`, `aud`) into the request context
+4. Enforces `@authenticated` and `@requiresScopes` directives
+5. Forwards the token and request to subgraphs
+
+### Matching Audiences
+
+If your IdP issues tokens with a specific audience, ensure the Router's JWT configuration accepts that audience. The Router validates the `aud` claim by default.
+
+## Step 5: Deploy
+
+### Update the Kubernetes Secret
+
+Replace the credentials secret with production values:
+
+```bash
+kubectl create secret generic apollo-mcp-credentials \
+ --namespace apollo \
+ --from-literal=APOLLO_GRAPH_REF="your-graph-id@production" \
+ --from-literal=APOLLO_KEY="your-apollo-key" \
+ --from-literal=ROUTER_ENDPOINT="http://your-router-service:80" \
+ --from-literal=MCP_RESOURCE_URL="https://mcp.yourdomain.com/mcp" \
+ --dry-run=client -o yaml | kubectl apply -f -
+```
+
+Note that `AUTH_SERVER_URL` is no longer needed since the IdP URL is configured directly in `mcp.yaml`.
+
+### Deploy via Helm
+
+```bash
+helm upgrade --install apollo-mcp-server \
+ deploy/apollo-mcp-server \
+ --namespace apollo \
+ --wait
+```
+
+### Expose the MCP Server
+
+In production, expose the MCP server via an Ingress or LoadBalancer with TLS termination:
+
+```yaml
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ name: apollo-mcp-server
+ namespace: apollo
+ annotations:
+ cert-manager.io/cluster-issuer: letsencrypt
+spec:
+ tls:
+ - hosts:
+ - mcp.yourdomain.com
+ secretName: mcp-tls
+ rules:
+ - host: mcp.yourdomain.com
+ http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: apollo-mcp-server
+ port:
+ number: 8000
+```
+
+## Security Considerations
+
+### HTTPS Requirement
+
+All production endpoints must use HTTPS:
+- MCP server endpoint
+- OAuth authorization server
+- Token endpoint
+- JWKS endpoint
+
+### Token Passthrough Risks
+
+The MCP server forwards client OAuth tokens to the Router by default. Be aware of:
+
+- **Confused deputy attacks**: If the token's audience is too broad, it could be used to access services beyond the Router
+- **Audience confusion**: Ensure the `aud` claim is specific to your API
+
+Mitigations:
+- Set `allow_any_audience: false` (default) and configure specific `audiences`
+- Use a dedicated audience for your MCP server / GraphQL API
+- Enable `disable_auth_token_passthrough` if the Router uses separate auth
+
+### Token Lifetime
+
+Configure short-lived access tokens (15–60 minutes) with refresh token rotation in your IdP. The demo uses 2-hour tokens for convenience, which is too long for production.
+
+### Scope Least Privilege
+
+Only request the scopes needed for the MCP tools being called. Use `scope_mode: require_any` with per-operation scopes for granular control rather than requiring all scopes on every request.
+
+## Scope Strategy
+
+### Mapping GraphQL Directives to OAuth Scopes
+
+The scopes enforced by the MCP server should align with the authorization directives in your supergraph schema:
+
+```graphql
+# Schema directive → MCP scope
+type User @authenticated {
+ email: String @requiresScopes(scopes: [["user:read:email"]])
+}
+
+# MCP config
+transport:
+ auth:
+ scopes:
+ - user:read:email
+ scope_mode: require_any
+```
+
+### Scope Hierarchies
+
+For larger APIs, consider hierarchical scopes:
+
+```yaml
+overrides:
+ required_scopes:
+ MyProfileDetails:
+ - user:read
+ UpdateProfile:
+ - user:write
+ DeleteAccount:
+ - user:write
+ - admin
+```
+
+## Per-Operation Scope Requirements
+
+The global `scopes` and `scope_mode` apply to every request. For step-up authorization, use `overrides.required_scopes`:
+
+```yaml
+transport:
+ auth:
+ scopes:
+ - user:read:email
+ scope_mode: require_any
+
+overrides:
+ required_scopes:
+ SensitiveOperation:
+ - admin
+ - user:write
+```
+
+When a tool call is missing required scopes, the server returns:
+
+```http
+HTTP/1.1 403 Forbidden
+WWW-Authenticate: Bearer error="insufficient_scope", scope="admin user:write"
+```
+
+Clients use this response to re-authorize with the needed scopes.
+
+## Networking and DNS
+
+### No `/etc/hosts` Workaround
+
+In production, the MCP server, Router, and IdP all have real DNS names:
+
+| Component | URL |
+|-----------|-----|
+| MCP Server (external) | `https://mcp.yourdomain.com/mcp` |
+| Router (internal) | `http://router-service.apollo.svc.cluster.local:80` |
+| Identity Provider | `https://your-idp.example.com` |
+
+The MCP server runs inside the cluster and reaches the Router via in-cluster DNS. External clients reach the MCP server via its public URL. The IdP is accessible from both locations since it's an external service.
+
+### Host Validation
+
+Enable host validation in production to prevent DNS rebinding attacks:
+
+```yaml
+transport:
+ host_validation:
+ allowed_hosts:
+ - "mcp.yourdomain.com"
+```
+
+## Troubleshooting
+
+### Token Validation Failures
+
+**Symptom:** `401 Unauthorized` on every MCP request after successful OAuth flow.
+
+**Checks:**
+1. Verify the MCP server can reach the IdP's JWKS endpoint from inside the cluster:
+ ```bash
+ kubectl exec -n apollo deployment/apollo-mcp-server -- \
+ wget -qO- https://your-idp.example.com/.well-known/jwks.json
+ ```
+2. Verify the `aud` claim in the token matches `auth.audiences` in `mcp.yaml`
+3. Check that the token hasn't expired
+
+### Scope Mismatches
+
+**Symptom:** `403 Forbidden` when calling MCP tools.
+
+**Checks:**
+1. Decode the JWT and inspect the `scope` claim (use [jwt.io](https://jwt.io))
+2. Compare with the scopes configured in `mcp.yaml`
+3. Check `scope_mode` — `require_all` (default) is stricter than `require_any`
+
+### Discovery Failures
+
+**Symptom:** MCP client can't find the authorization server.
+
+**Checks:**
+1. Verify the IdP serves metadata at one of:
+ - `/.well-known/oauth-authorization-server`
+ - `/.well-known/openid-configuration`
+2. If the IdP is slow, increase `discovery_timeout`:
+ ```yaml
+ transport:
+ auth:
+ discovery_timeout: 10s
+ ```
+
+### Router Rejects Forwarded Tokens
+
+**Symptom:** MCP tools return GraphQL errors with `UNAUTHENTICATED` or fields return `null`.
+
+**Checks:**
+1. Ensure the Router's JWKS URL points to the same IdP as the MCP server
+2. Verify the Router accepts the token's audience
+3. Check that header propagation is configured:
+ ```yaml
+ routerConfig:
+ headers:
+ all:
+ request:
+ - propagate:
+ matching: ".*"
+ ```
diff --git a/docs/setup.md b/docs/setup.md
index d21bf7c..11be766 100644
--- a/docs/setup.md
+++ b/docs/setup.md
@@ -10,6 +10,8 @@ This guide will walk you through setting up the Apollo Federation Supergraph ref
- [Step 2: Configure Environment Variables](#step-2-configure-environment-variables)
- [Step 3: Run Setup Scripts](#step-3-run-setup-scripts)
- [Step 4: Access Your Supergraph](#step-4-access-your-supergraph)
+ - [Step 5: Logging Into the Client Application](#step-5-logging-into-the-client-application)
+ - [Step 6: Connect AI Agents via MCP](#step-6-connect-ai-agents-via-mcp)
- [Creating Additional Environments](#creating-additional-environments)
## Prerequisites
@@ -238,6 +240,39 @@ kubectl port-forward -n monitoring svc/zipkin 9411:9411
Then open http://localhost:9411 in your browser to view traces.
+### Script 12: Deploy Apollo MCP Server (Optional)
+
+Deploy the [Apollo MCP Server](https://www.apollographql.com/docs/apollo-mcp-server) to expose your supergraph to AI agents and LLM tools via the [Model Context Protocol](https://modelcontextprotocol.io/):
+
+```bash
+./scripts/minikube/12-deploy-mcp-server.sh
+```
+
+This script:
+- Creates a Kubernetes secret with Apollo GraphOS credentials and endpoint configuration
+- Deploys the Apollo MCP Server via Helm into the `apollo` namespace
+- Configures the MCP server to connect to the local Router instance
+- Enables OAuth 2.1 authentication using the users subgraph as the authorization server
+- Waits for the MCP server pod to be ready
+
+**Prerequisites:** The router (script 08) and subgraphs (script 05) must be deployed first. The MCP server connects to the router and uses the users subgraph for authentication.
+
+### Script 12a: Start MCP Port Forwards (Optional)
+
+After deploying the MCP server, start the required port-forwards for local access:
+
+```bash
+./scripts/minikube/12a-mcp-port-forwards.sh
+```
+
+This script:
+- Starts port-forwards for the MCP server (localhost:5001) and the OAuth auth server (localhost:4001)
+- Adds the required `/etc/hosts` entry for the OAuth flow (prompts for sudo)
+- Verifies connectivity to both services
+- Keeps running until you press Ctrl+C
+
+After running this script, see [Step 6: Connect AI Agents via MCP](#step-6-connect-ai-agents-via-mcp) for MCP client configuration.
+
## Step 4: Access Your Supergraph
After running all scripts, you can access your supergraph in several ways:
@@ -345,6 +380,188 @@ Available test users and their scopes:
- `user1`, `user2`, `user3` - Have `user:read:email` scope
- `inventoryManager` - Has `user:read:email` and `inventory:read` scopes
+## Step 6: Connect AI Agents via MCP
+
+If you deployed the Apollo MCP Server (script 12), you can connect AI agents and LLM tools to your supergraph. This requires a few networking steps because both the MCP server and its OAuth authorization server run inside the Minikube cluster.
+
+> For production deployment guidance — including provider-specific IdP configuration (redirect URLs, logout URLs, scopes, audience) for Auth0, Okta, and Keycloak — see the [MCP Production Guide](./mcp-production.md#step-2-configure-the-identity-provider).
+
+### Prerequisites
+
+- Apollo MCP Server deployed (script 12)
+- `npx` available (comes with Node.js)
+- Two available terminal windows for port-forwarding
+
+### Step 6a: Start Port Forwards
+
+The MCP server and the OAuth authorization server (users subgraph) both need to be reachable from your local machine.
+
+**Option A: Use the helper script (recommended)**
+
+```bash
+./scripts/minikube/12a-mcp-port-forwards.sh
+```
+
+This starts both port-forwards, adds the `/etc/hosts` entry (Step 6b), and verifies connectivity. Keep the script running — press Ctrl+C to stop.
+
+**Option B: Manual port-forwards**
+
+Open two terminal windows:
+
+**Terminal 1 — MCP Server:**
+
+```bash
+kubectl port-forward -n apollo svc/apollo-mcp-server 5001:8000
+```
+
+**Terminal 2 — OAuth Authorization Server:**
+
+```bash
+kubectl port-forward -n users svc/graphql 4001:4001
+```
+
+Keep both terminals running. If either port-forward drops (e.g., after a pod restart), restart it.
+
+### Step 6b: Add DNS Entry for the Authorization Server
+
+> If you used the helper script (12a) in Step 6a, this was already handled for you. Skip to Step 6c.
+
+The MCP server's OAuth configuration references the users subgraph by its in-cluster DNS name (`graphql.users.svc.cluster.local`). For the OAuth flow to work from your local machine, this hostname must resolve to `localhost` where the port-forward is listening.
+
+Add this entry to your `/etc/hosts` file:
+
+```bash
+echo '127.0.0.1 graphql.users.svc.cluster.local' | sudo tee -a /etc/hosts
+```
+
+Verify it works:
+
+```bash
+curl -s http://graphql.users.svc.cluster.local:4001/.well-known/oauth-authorization-server | python3 -m json.tool
+```
+
+You should see OAuth metadata including `authorization_endpoint`, `token_endpoint`, and `registration_endpoint`.
+
+**Why is this needed?** The MCP server advertises its authorization server URL using the in-cluster DNS name. MCP clients (like `mcp-remote`) follow this URL to start the OAuth flow. Without the hosts entry, your local machine can't resolve the cluster-internal hostname. Inside the cluster, the same hostname resolves normally via Kubernetes DNS, so the MCP server can validate tokens by fetching JWKS from the same URL.
+
+### Step 6c: Configure Your MCP Client
+
+#### Claude Desktop
+
+Edit `~/Library/Application Support/Claude/claude_desktop_config.json`:
+
+```json
+{
+ "mcpServers": {
+ "apollo-reference-arch": {
+ "command": "npx",
+ "args": [
+ "mcp-remote",
+ "http://localhost:5001/mcp",
+ "--transport",
+ "http-only"
+ ]
+ }
+ }
+}
+```
+
+Restart Claude Desktop. Your browser will open a login page where you enter your username and password (same test users as the client app — see [Step 5](#step-5-logging-into-the-client-application)). After signing in, the MCP tools should appear in the tool list.
+
+**Troubleshooting Claude Desktop:**
+
+- If you see `EADDRINUSE` errors, kill stale `mcp-remote` processes:
+
+```bash
+pkill -f "mcp-remote.*localhost:5001"
+```
+
+- If authorization fails, clear the `mcp-remote` auth cache and restart:
+
+```bash
+rm -rf ~/.mcp-auth/mcp-remote-*/
+```
+
+#### Claude Code (CLI)
+
+```bash
+claude mcp add --transport http apollo-mcp -- npx mcp-remote http://localhost:5001/mcp --transport http-only
+```
+
+#### Cursor
+
+Add to your MCP settings (`.cursor/mcp.json`):
+
+```json
+{
+ "mcpServers": {
+ "apollo-reference-arch": {
+ "command": "npx",
+ "args": [
+ "mcp-remote",
+ "http://localhost:5001/mcp",
+ "--transport",
+ "http-only"
+ ]
+ }
+ }
+}
+```
+
+### Step 6d: Verify the Connection
+
+Use MCP Inspector to verify tools are available:
+
+```bash
+npx @modelcontextprotocol/inspector http://localhost:5001/mcp --transport http
+```
+
+This opens a browser at `http://127.0.0.1:6274` where you can click **Connect** and then **List Tools** to verify the available operations.
+
+### How Authentication Works
+
+The MCP server uses [OAuth 2.1](https://www.apollographql.com/docs/apollo-mcp-server/auth) with the users subgraph acting as the authorization server. The full flow:
+
+```
+MCP Client (mcp-remote) MCP Server Users Subgraph
+ | | |
+ |-- GET /.well-known/oauth-auth... ---->| |
+ |<-- auth server URL -------------------| (graphql.users.svc...:4001) |
+ | | |
+ |-- POST /register ---------------------------------------->|
+ |<-- client_id, client_secret ----------------------------- |
+ | | |
+ |-- GET /authorize (browser) -------------------------------->|
+ |<-- redirect with auth code -------------------------------- |
+ | | |
+ |-- POST /token (exchange code) ------------------------------>|
+ |<-- JWT access token ---------------------------------------- |
+ | | |
+ |-- POST /mcp + Authorization: Bearer -->| |
+ | |-- validate JWT (JWKS) ------>|
+ | |-- GraphQL query + token ---> Router
+ |<-- tool results ----------------------| |
+```
+
+1. **Tool Discovery** — The MCP server allows unauthenticated `initialize` and `tools/list` calls (`allow_anonymous_mcp_discovery: true`), so MCP clients can display available tools before the user signs in
+2. **Auth Server Discovery** — When a tool is invoked, `mcp-remote` gets a `401` with a `WWW-Authenticate` header pointing to the Protected Resource Metadata, which in turn references the users subgraph as the authorization server
+3. **Client Registration** — `mcp-remote` registers itself either via [Client ID Metadata Documents](https://modelcontextprotocol.io/specification/draft/basic/authorization#client-id-metadata-documents) (if supported) or dynamically via RFC 7591. The authorization server advertises `client_id_metadata_document_supported: true` and supports both approaches
+4. **Authorization** — The user's browser opens a login page at the `/authorize` endpoint, where they sign in with their username and password. For CIMD clients, the login page shows the client's name and redirect hostname
+5. **Token Exchange** — `mcp-remote` exchanges the authorization code for a JWT access token
+6. **Authenticated Requests** — The MCP server validates the JWT, then forwards it to the Router as a Bearer token. The Router enforces `@authenticated` and `@requiresScopes` directives as usual
+
+### Available MCP Tools
+
+The MCP server exposes pre-defined GraphQL operations as tools:
+
+| Tool | Description | Operation |
+|------|-------------|-----------|
+| `MyProfileDetails` | Fetches the authenticated user's profile (username, email, address, loyalty points) | `query { me { id username email shippingAddress ... } }` |
+| `MyCart` | Fetches the authenticated user's shopping cart with full product details | `query { me { cart { items { product { ... } } } } }` |
+| `introspect` | Explores the GraphQL schema by type name | Built-in schema introspection |
+
+Operations are defined in `deploy/apollo-mcp-server/operations/` and can be customized by adding or modifying `.graphql` files.
+
## Creating Additional Environments
To create a new environment (e.g., "prod"):
diff --git a/scripts/minikube/02-setup-apollo-graph.sh b/scripts/minikube/02-setup-apollo-graph.sh
index 4e58e70..88efc26 100755
--- a/scripts/minikube/02-setup-apollo-graph.sh
+++ b/scripts/minikube/02-setup-apollo-graph.sh
@@ -166,6 +166,7 @@ fi
echo "" >> "$ENV_FILE"
echo "# Apollo GraphOS Configuration (generated by 02-setup-apollo-graph.sh)" >> "$ENV_FILE"
echo "export APOLLO_GRAPH_ID=\"$GRAPH_ID\"" >> "$ENV_FILE"
+echo "export APOLLO_GRAPH_REF=\"$GRAPH_ID@$ENVIRONMENT\"" >> "$ENV_FILE"
echo "export APOLLO_KEY=\"$GRAPH_KEY\"" >> "$ENV_FILE"
echo "export OPERATOR_KEY=\"$OPERATOR_KEY\"" >> "$ENV_FILE"
echo "" >> "$ENV_FILE"
diff --git a/scripts/minikube/12-deploy-mcp-server.sh b/scripts/minikube/12-deploy-mcp-server.sh
new file mode 100755
index 0000000..eae12b1
--- /dev/null
+++ b/scripts/minikube/12-deploy-mcp-server.sh
@@ -0,0 +1,126 @@
+#!/bin/bash
+set -euo pipefail
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m' # No Color
+
+echo -e "${GREEN}=== Apollo MCP Server Deployment ===${NC}"
+echo ""
+
+# Check prerequisites
+for cmd in minikube kubectl helm; do
+ if ! command -v $cmd &> /dev/null; then
+ echo -e "${RED}Error: $cmd is not installed.${NC}"
+ exit 1
+ fi
+done
+
+# Verify minikube is running
+if ! minikube status --format='{{.Host}}' 2>/dev/null | grep -q "Running"; then
+ echo -e "${RED}Error: Minikube is not running. Start it with 'minikube start'.${NC}"
+ exit 1
+fi
+
+echo -e "${GREEN}Prerequisites check passed.${NC}"
+echo ""
+
+# Load environment variables
+if [ -f "$REPO_ROOT/.env" ]; then
+ source "$REPO_ROOT/.env"
+else
+ echo -e "${RED}Error: .env file not found at $REPO_ROOT/.env${NC}"
+ echo "Run script 02 first to generate the .env file."
+ exit 1
+fi
+
+# Validate required environment variables
+if [ -z "${APOLLO_GRAPH_REF:-}" ]; then
+ echo -e "${RED}Error: APOLLO_GRAPH_REF is not set in .env${NC}"
+ echo "Run script 02 first to set up the Apollo graph."
+ exit 1
+fi
+
+if [ -z "${APOLLO_KEY:-}" ]; then
+ echo -e "${RED}Error: APOLLO_KEY is not set in .env${NC}"
+ exit 1
+fi
+
+NAMESPACE="apollo"
+ENVIRONMENT="${ENVIRONMENT:-dev}"
+
+# Ensure namespace exists
+if ! kubectl get namespace "$NAMESPACE" &>/dev/null; then
+ echo "Creating namespace '$NAMESPACE'..."
+ kubectl create namespace "$NAMESPACE"
+fi
+
+# Create or update the MCP credentials secret
+echo "Creating MCP credentials secret..."
+kubectl create secret generic apollo-mcp-credentials \
+ --namespace "$NAMESPACE" \
+ --from-literal=APOLLO_GRAPH_REF="$APOLLO_GRAPH_REF" \
+ --from-literal=APOLLO_KEY="$APOLLO_KEY" \
+ --from-literal=ROUTER_ENDPOINT="http://reference-architecture-${ENVIRONMENT}.${NAMESPACE}.svc.cluster.local:80" \
+ --from-literal=MCP_RESOURCE_URL="http://localhost:5001/mcp" \
+ --from-literal=AUTH_SERVER_URL="http://localhost:4001" \
+ --dry-run=client -o yaml | kubectl apply -f -
+
+echo -e "${GREEN}MCP credentials secret created/updated.${NC}"
+echo ""
+
+# Install/upgrade the Helm chart
+echo "Deploying Apollo MCP Server via Helm..."
+helm upgrade --install apollo-mcp-server \
+ "$REPO_ROOT/deploy/apollo-mcp-server" \
+ --namespace "$NAMESPACE" \
+ --wait \
+ --timeout 120s
+
+echo -e "${GREEN}Helm release installed/upgraded.${NC}"
+echo ""
+
+# Wait for the pod to be ready
+echo "Waiting for MCP server pod to be ready..."
+kubectl wait --for=condition=ready pod \
+ -l app=apollo-mcp-server \
+ -n "$NAMESPACE" \
+ --timeout=120s
+
+echo ""
+echo -e "${GREEN}=== Apollo MCP Server Deployed Successfully ===${NC}"
+echo ""
+echo -e "To access the MCP server locally, start both port-forwards:"
+echo ""
+echo -e " ${YELLOW}# Terminal 1: MCP server${NC}"
+echo -e " ${YELLOW}kubectl port-forward -n $NAMESPACE svc/apollo-mcp-server 5001:8000${NC}"
+echo ""
+echo -e " ${YELLOW}# Terminal 2: Auth server (users subgraph for OAuth)${NC}"
+echo -e " ${YELLOW}kubectl port-forward -n users svc/graphql 4001:4001${NC}"
+echo ""
+echo -e "Then configure your MCP client to connect to:"
+echo -e " ${YELLOW}http://localhost:5001/mcp${NC}"
+echo ""
+echo -e "Add a DNS entry so your machine can reach the OAuth server by its in-cluster name:"
+echo -e " ${YELLOW}echo '127.0.0.1 graphql.users.svc.cluster.local' | sudo tee -a /etc/hosts${NC}"
+echo ""
+echo -e "Example: Add to Claude Desktop config (~/Library/Application Support/Claude/claude_desktop_config.json):"
+echo -e ' {
+ "mcpServers": {
+ "apollo-reference-arch": {
+ "command": "npx",
+ "args": ["mcp-remote", "http://localhost:5001/mcp", "--transport", "http-only"]
+ }
+ }
+ }'
+echo ""
+echo -e "To test with MCP Inspector:"
+echo -e " ${YELLOW}npx @modelcontextprotocol/inspector http://localhost:5001/mcp --transport http${NC}"
+echo ""
+echo -e "For full instructions, see docs/setup.md (Step 6) and docs/mcp-production.md."
+echo ""
diff --git a/scripts/minikube/12a-mcp-port-forwards.sh b/scripts/minikube/12a-mcp-port-forwards.sh
new file mode 100755
index 0000000..3b978c3
--- /dev/null
+++ b/scripts/minikube/12a-mcp-port-forwards.sh
@@ -0,0 +1,150 @@
+#!/bin/bash
+set -euo pipefail
+
+# Script 12a: Start port-forwards for the Apollo MCP Server
+# Starts both the MCP server and OAuth auth server port-forwards in the background,
+# adds the /etc/hosts entry if missing, and verifies connectivity.
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+REPO_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)"
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[1;33m'
+NC='\033[0m'
+
+NAMESPACE="apollo"
+AUTH_NAMESPACE="users"
+MCP_LOCAL_PORT=5001
+MCP_REMOTE_PORT=8000
+AUTH_LOCAL_PORT=4001
+AUTH_REMOTE_PORT=4001
+AUTH_HOSTNAME="graphql.users.svc.cluster.local"
+
+echo -e "${GREEN}=== MCP Server Port Forwards ===${NC}"
+echo ""
+
+# Check prerequisites
+if ! command -v kubectl &> /dev/null; then
+ echo -e "${RED}Error: kubectl is not installed.${NC}"
+ exit 1
+fi
+
+if ! kubectl cluster-info &> /dev/null 2>&1; then
+ echo -e "${RED}Error: Cannot connect to Kubernetes cluster.${NC}"
+ exit 1
+fi
+
+# Verify the MCP server pod is running
+MCP_READY=$(kubectl get pods -n "$NAMESPACE" -l app=apollo-mcp-server --no-headers 2>/dev/null | grep -c "Running" || true)
+if [ "$MCP_READY" -eq 0 ]; then
+ echo -e "${RED}Error: Apollo MCP Server is not running in the '$NAMESPACE' namespace.${NC}"
+ echo "Deploy it first with: ./scripts/minikube/12-deploy-mcp-server.sh"
+ exit 1
+fi
+
+# Verify the auth server pod is running
+AUTH_READY=$(kubectl get pods -n "$AUTH_NAMESPACE" --no-headers 2>/dev/null | grep -c "Running" || true)
+if [ "$AUTH_READY" -eq 0 ]; then
+ echo -e "${RED}Error: Users subgraph (auth server) is not running in the '$AUTH_NAMESPACE' namespace.${NC}"
+ echo "Deploy subgraphs first with: ./scripts/minikube/05-deploy-subgraphs.sh"
+ exit 1
+fi
+
+# Kill any existing port-forwards on these ports
+for port in $MCP_LOCAL_PORT $AUTH_LOCAL_PORT; do
+ pid=$(lsof -ti:"$port" 2>/dev/null || true)
+ if [ -n "$pid" ]; then
+ echo "Killing existing process on port $port (PID $pid)..."
+ kill -9 $pid 2>/dev/null || true
+ sleep 1
+ fi
+done
+
+# Start MCP server port-forward
+echo "Starting MCP server port-forward (localhost:$MCP_LOCAL_PORT -> svc/apollo-mcp-server:$MCP_REMOTE_PORT)..."
+kubectl port-forward -n "$NAMESPACE" svc/apollo-mcp-server "$MCP_LOCAL_PORT:$MCP_REMOTE_PORT" &
+MCP_PF_PID=$!
+
+# Start auth server port-forward
+echo "Starting auth server port-forward (localhost:$AUTH_LOCAL_PORT -> svc/graphql:$AUTH_REMOTE_PORT)..."
+kubectl port-forward -n "$AUTH_NAMESPACE" svc/graphql "$AUTH_LOCAL_PORT:$AUTH_REMOTE_PORT" &
+AUTH_PF_PID=$!
+
+# Wait for port-forwards to establish
+echo ""
+echo "Waiting for port-forwards to establish..."
+sleep 3
+
+# Verify port-forwards are alive
+FAILED=false
+
+if ! kill -0 $MCP_PF_PID 2>/dev/null; then
+ echo -e "${RED}MCP server port-forward failed to start.${NC}"
+ FAILED=true
+fi
+
+if ! kill -0 $AUTH_PF_PID 2>/dev/null; then
+ echo -e "${RED}Auth server port-forward failed to start.${NC}"
+ FAILED=true
+fi
+
+if [ "$FAILED" = true ]; then
+ echo -e "${RED}One or more port-forwards failed. Check the output above for errors.${NC}"
+ exit 1
+fi
+
+# Check /etc/hosts entry
+echo ""
+if grep -q "$AUTH_HOSTNAME" /etc/hosts 2>/dev/null; then
+ echo -e "${GREEN}/etc/hosts already has an entry for $AUTH_HOSTNAME${NC}"
+else
+ echo -e "${YELLOW}Missing /etc/hosts entry for $AUTH_HOSTNAME${NC}"
+ echo ""
+ echo "The MCP OAuth flow requires this DNS entry. Run:"
+ echo -e " ${YELLOW}echo '127.0.0.1 $AUTH_HOSTNAME' | sudo tee -a /etc/hosts${NC}"
+ echo ""
+ read -p "Add it now? (requires sudo password) [y/N] " -n 1 -r
+ echo ""
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
+ echo "127.0.0.1 $AUTH_HOSTNAME" | sudo tee -a /etc/hosts
+ echo -e "${GREEN}Added /etc/hosts entry.${NC}"
+ else
+ echo -e "${YELLOW}Skipped. You'll need to add it manually before connecting MCP clients.${NC}"
+ fi
+fi
+
+# Verify connectivity
+echo ""
+echo "Verifying connectivity..."
+
+MCP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:$MCP_LOCAL_PORT/mcp" 2>/dev/null || echo "000")
+AUTH_STATUS=$(curl -s -o /dev/null -w "%{http_code}" "http://localhost:$AUTH_LOCAL_PORT/.well-known/oauth-authorization-server" 2>/dev/null || echo "000")
+
+if [ "$MCP_STATUS" = "401" ]; then
+ echo -e " MCP server: ${GREEN}reachable${NC} (HTTP 401 — auth required, as expected)"
+else
+ echo -e " MCP server: ${RED}HTTP $MCP_STATUS${NC}"
+fi
+
+if [ "$AUTH_STATUS" = "200" ]; then
+ echo -e " Auth server: ${GREEN}reachable${NC} (HTTP 200)"
+else
+ echo -e " Auth server: ${RED}HTTP $AUTH_STATUS${NC}"
+fi
+
+echo ""
+echo -e "${GREEN}=== Port Forwards Running ===${NC}"
+echo ""
+echo -e " MCP server: ${YELLOW}http://localhost:$MCP_LOCAL_PORT/mcp${NC} (PID $MCP_PF_PID)"
+echo -e " Auth server: ${YELLOW}http://localhost:$AUTH_LOCAL_PORT${NC} (PID $AUTH_PF_PID)"
+echo ""
+echo -e "To stop port-forwards:"
+echo -e " ${YELLOW}kill $MCP_PF_PID $AUTH_PF_PID${NC}"
+echo ""
+echo -e "Connect your MCP client to: ${YELLOW}http://localhost:$MCP_LOCAL_PORT/mcp${NC}"
+echo ""
+
+# Wait for both background processes (keeps the script running)
+echo "Press Ctrl+C to stop all port-forwards."
+wait
diff --git a/subgraphs/users/keys/jwks.json b/subgraphs/users/keys/jwks.json
index 574ae23..b7c9414 100644
--- a/subgraphs/users/keys/jwks.json
+++ b/subgraphs/users/keys/jwks.json
@@ -3,6 +3,9 @@
{
"kty": "EC",
"crv": "P-256",
+ "alg": "ES256",
+ "use": "sig",
+ "kid": "main-key-2024",
"x": "Ku7NHl_Biej1FB0GMBrN2NRxrUBj2PbBeFPj_OnZPyo",
"y": "HZUWbmSA7PjVskGPGh7NAmKx3AOLLVpbCgg07jvTVV8",
"x5c": [
diff --git a/subgraphs/users/src/index.ts b/subgraphs/users/src/index.ts
index 426b7f9..748b741 100644
--- a/subgraphs/users/src/index.ts
+++ b/subgraphs/users/src/index.ts
@@ -8,20 +8,127 @@ import {
} from "@apollo/server/standalone";
import * as jose from 'jose'
import cors from 'cors'
+import crypto from 'crypto'
import { readFileSync } from "fs";
+import { readFile } from "fs/promises";
+import { createPrivateKey } from "crypto";
import gql from "graphql-tag";
import { buildSubgraphSchema } from "@apollo/subgraph";
import { resolvers } from "./resolvers";
+import { users } from "./resolvers/data.js";
import { DataSourceContext } from "./types/DataSourceContext";
const app = express();
const port = process.env.PORT ?? "4001";
const subgraphName = require("../package.json").name;
-// For demo purposes, we are hosting the JWKS endpoint
const jwks = readFileSync("./keys/jwks.json", "utf-8");
const joseJWKS = jose.createLocalJWKSet(JSON.parse(jwks));
+// In-memory stores for OAuth (demo purposes)
+const registeredClients = new Map();
+const authorizationCodes = new Map();
+
+// --- Client ID Metadata Documents (draft-ietf-oauth-client-id-metadata-document-00) ---
+
+interface ClientMetadataDocument {
+ client_id: string;
+ client_name: string;
+ redirect_uris: string[];
+ client_uri?: string;
+ logo_uri?: string;
+ grant_types?: string[];
+ response_types?: string[];
+ token_endpoint_auth_method?: string;
+}
+
+const cimdCache = new Map();
+
+const PRIVATE_IP_PATTERNS = [
+ /^10\./,
+ /^172\.(1[6-9]|2\d|3[01])\./,
+ /^192\.168\./,
+ /^169\.254\./,
+ /^0\./,
+];
+
+function isUrlClientId(clientId: string | undefined): boolean {
+ if (!clientId) return false;
+ try {
+ const url = new URL(clientId);
+ const isHttps = url.protocol === 'https:';
+ const isLocalDev = (url.protocol === 'http:') &&
+ (url.hostname === 'localhost' || url.hostname === '127.0.0.1');
+ return (isHttps || isLocalDev) && url.pathname.length > 1;
+ } catch {
+ return false;
+ }
+}
+
+async function fetchClientMetadata(clientIdUrl: string): Promise {
+ const cached = cimdCache.get(clientIdUrl);
+ if (cached && cached.expires_at > Date.now()) return cached.doc;
+
+ const url = new URL(clientIdUrl);
+
+ if (url.protocol !== 'https:' && url.hostname !== 'localhost' && url.hostname !== '127.0.0.1') {
+ throw new Error('client_id URL must use HTTPS (http allowed only for localhost)');
+ }
+
+ if (url.protocol === 'https:' && PRIVATE_IP_PATTERNS.some(p => p.test(url.hostname))) {
+ throw new Error('client_id URL must not point to a private IP address');
+ }
+
+ const resp = await fetch(clientIdUrl, {
+ headers: { Accept: 'application/json' },
+ signal: AbortSignal.timeout(5000),
+ });
+ if (!resp.ok) throw new Error(`Failed to fetch client metadata: HTTP ${resp.status}`);
+
+ const doc: ClientMetadataDocument = await resp.json();
+
+ if (doc.client_id !== clientIdUrl) {
+ throw new Error('client_id in metadata document does not match the URL');
+ }
+ if (!doc.client_name || typeof doc.client_name !== 'string') {
+ throw new Error('Metadata document missing required field: client_name');
+ }
+ if (!Array.isArray(doc.redirect_uris) || doc.redirect_uris.length === 0) {
+ throw new Error('Metadata document missing required field: redirect_uris');
+ }
+
+ const cacheControl = resp.headers.get('cache-control');
+ const maxAge = cacheControl?.match(/max-age=(\d+)/)?.[1];
+ const ttl = maxAge ? parseInt(maxAge, 10) * 1000 : 5 * 60 * 1000;
+ cimdCache.set(clientIdUrl, { doc, expires_at: Date.now() + ttl });
+
+ return doc;
+}
+
+function validateRedirectUri(redirectUri: string, allowedUris: string[]): boolean {
+ return allowedUris.includes(redirectUri);
+}
+
+function getRedirectHostname(redirectUri: string): string {
+ try { return new URL(redirectUri).hostname; } catch { return redirectUri; }
+}
+
+function escapeHtml(str: string | undefined): string {
+ if (!str) return '';
+ return str
+ .replace(/&/g, '&')
+ .replace(//g, '>')
+ .replace(/"/g, '"')
+ .replace(/'/g, ''');
+}
+
+function getIssuer(req: express.Request): string {
+ const host = req.headers['x-forwarded-host'] || req.headers.host || `localhost:${port}`;
+ const protocol = req.headers['x-forwarded-proto'] || 'http';
+ return `${protocol}://${host}`;
+}
+
const context: ContextFunction<
[StandaloneServerContextFunctionArgument],
DataSourceContext
@@ -43,6 +150,83 @@ const context: ContextFunction<
return context
};
+type OAuthParams = { client_id: string; redirect_uri: string; state: string; scope: string; code_challenge: string; code_challenge_method: string };
+type CimdDisplayInfo = { client_name: string; client_uri?: string; logo_uri?: string; redirect_hostname: string };
+
+function renderLoginPage(res: express.Response, params: OAuthParams, error?: string, cimd?: CimdDisplayInfo) {
+ const errorHtml = error ? `