diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index f048e7a8..c035b3a8 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -12,11 +12,11 @@ concurrency: env: GO_VERSION: 1.23 - NPM_TOKEN: ${{ secrets.NPM_TOKEN }} UV_PUBLISH_TOKEN: ${{ secrets.UV_PUBLISH_TOKEN }} permissions: contents: write + id-token: write # Required for npmjs OIDC discussions: write jobs: @@ -39,6 +39,12 @@ jobs: files: | LICENSE kubernetes-mcp-server-* + # Ensure npm 11.5.1 or later is installed (required for https://docs.npmjs.com/trusted-publishers) + - name: Setup node + uses: actions/setup-node@v6 + with: + node-version: 24 + registry-url: 'https://registry.npmjs.org' - name: Publish npm run: make npm-publish diff --git a/AGENTS.md b/AGENTS.md index 854cfe5d..485ac1ad 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -11,12 +11,15 @@ This MCP server enables AI assistants (like Claude, Gemini, Cursor, and others) - Go package layout follows the standard Go conventions: - `cmd/kubernetes-mcp-server/` – main application entry point using Cobra CLI framework. - `pkg/` – libraries grouped by domain. + - `api/` - API-related functionality, tool definitions, and toolset interfaces. - `config/` – configuration management. - `helm/` - Helm chart operations integration. - `http/` - HTTP server and authorization middleware. - `kubernetes/` - Kubernetes client management, authentication, and access control. - `mcp/` - Model Context Protocol (MCP) server implementation with tool registration and STDIO/HTTP support. - `output/` - output formatting and rendering. + - `toolsets/` - Toolset registration and management for MCP tools. + - `version/` - Version information management. - `.github/` – GitHub-related configuration (Actions workflows, issue templates...). - `docs/` – documentation files. - `npm/` – Node packages that wraps the compiled binaries for distribution through npmjs.com. @@ -30,6 +33,21 @@ Implement new functionality in the Go sources under `cmd/` and `pkg/`. The JavaScript (`npm/`) and Python (`python/`) directories only wrap the compiled binary for distribution (npm and PyPI). Most changes will not require touching them unless the version or packaging needs to be updated. +### Adding new MCP tools + +The project uses a toolset-based architecture for organizing MCP tools: + +- **Tool definitions** are created in `pkg/api/` using the `ServerTool` struct. +- **Toolsets** group related tools together (e.g., config tools, core Kubernetes tools, Helm tools). +- **Registration** happens in `pkg/toolsets/` where toolsets are registered at initialization. +- Each toolset lives in its own subdirectory under `pkg/toolsets/` (e.g., `pkg/toolsets/config/`, `pkg/toolsets/core/`, `pkg/toolsets/helm/`). + +When adding a new tool: +1. Define the tool handler function that implements the tool's logic. +2. Create a `ServerTool` struct with the tool definition and handler. +3. Add the tool to an appropriate toolset (or create a new toolset if needed). +4. Register the toolset in `pkg/toolsets/` if it's a new toolset. + ## Building Use the provided Makefile targets: @@ -105,6 +123,45 @@ make lint The `lint` target downloads the specified `golangci-lint` version if it is not already present under `_output/tools/bin/`. +## Additional Makefile targets + +Beyond the basic build, test, and lint targets, the Makefile provides additional utilities: + +**Local Development:** +```bash +# Setup a complete local development environment with Kind cluster +make local-env-setup + +# Tear down the local Kind cluster +make local-env-teardown + +# Show Keycloak status and connection info (for OIDC testing) +make keycloak-status + +# Tail Keycloak logs +make keycloak-logs + +# Install required development tools (like Kind) to ./_output/bin/ +make tools +``` + +**Distribution and Publishing:** +```bash +# Copy compiled binaries to each npm package +make npm-copy-binaries + +# Publish the npm packages +make npm-publish + +# Publish the Python packages +make python-publish + +# Update README.md with the latest toolsets +make update-readme-tools +``` + +Run `make help` to see all available targets with descriptions. + ## Dependencies When introducing new modules run `make tidy` so that `go.mod` and `go.sum` remain tidy. diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 00000000..47dc3e3d --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +AGENTS.md \ No newline at end of file diff --git a/Makefile b/Makefile index 7f4e4271..04ff1ac0 100644 --- a/Makefile +++ b/Makefile @@ -16,7 +16,7 @@ LD_FLAGS = -s -w \ COMMON_BUILD_ARGS = -ldflags "$(LD_FLAGS)" GOLANGCI_LINT = $(shell pwd)/_output/tools/bin/golangci-lint -GOLANGCI_LINT_VERSION ?= v2.2.2 +GOLANGCI_LINT_VERSION ?= v2.5.0 # NPM version should not append the -dirty flag NPM_VERSION ?= $(shell echo $(shell git describe --tags --always) | sed 's/^v//') @@ -71,16 +71,14 @@ npm-publish: npm-copy-binaries ## Publish the npm packages $(foreach os,$(OSES),$(foreach arch,$(ARCHS), \ DIRNAME="$(BINARY_NAME)-$(os)-$(arch)"; \ cd npm/$$DIRNAME; \ - echo '//registry.npmjs.org/:_authToken=$(NPM_TOKEN)' >> .npmrc; \ jq '.version = "$(NPM_VERSION)"' package.json > tmp.json && mv tmp.json package.json; \ - npm publish; \ + npm publish --tag latest; \ cd ../..; \ )) cp README.md LICENSE ./npm/kubernetes-mcp-server/ - echo '//registry.npmjs.org/:_authToken=$(NPM_TOKEN)' >> ./npm/kubernetes-mcp-server/.npmrc jq '.version = "$(NPM_VERSION)"' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ jq '.optionalDependencies |= with_entries(.value = "$(NPM_VERSION)")' ./npm/kubernetes-mcp-server/package.json > tmp.json && mv tmp.json ./npm/kubernetes-mcp-server/package.json; \ - cd npm/kubernetes-mcp-server && npm publish + cd npm/kubernetes-mcp-server && npm publish --tag latest .PHONY: python-publish python-publish: ## Publish the python packages @@ -115,3 +113,43 @@ lint: golangci-lint ## Lint the code .PHONY: update-readme-tools update-readme-tools: ## Update the README.md file with the latest toolsets go run ./internal/tools/update-readme/main.go README.md + +##@ Tools + +.PHONY: tools +tools: ## Install all required tools (kind) to ./_output/bin/ + @echo "Checking and installing required tools to ./_output/bin/ ..." + @if [ -f _output/bin/kind ]; then echo "[OK] kind already installed"; else echo "Installing kind..."; $(MAKE) -s kind; fi + @echo "All tools ready!" + +##@ Local Development + +.PHONY: local-env-setup +local-env-setup: ## Setup complete local development environment with Kind cluster + @echo "=========================================" + @echo "Kubernetes MCP Server - Local Setup" + @echo "=========================================" + $(MAKE) tools + $(MAKE) kind-create-cluster + $(MAKE) keycloak-install + $(MAKE) build + @echo "" + @echo "=========================================" + @echo "Local environment ready!" + @echo "=========================================" + @echo "" + @echo "Configuration file generated:" + @echo " _output/config.toml" + @echo "" + @echo "Run the MCP server with:" + @echo " ./$(BINARY_NAME) --port 8008 --config _output/config.toml" + @echo "" + @echo "Or run with MCP inspector:" + @echo " npx @modelcontextprotocol/inspector@latest \$$(pwd)/$(BINARY_NAME) --config _output/config.toml" + +.PHONY: local-env-teardown +local-env-teardown: ## Tear down the local Kind cluster + $(MAKE) kind-delete-cluster + +# Include build configuration files +-include build/*.mk diff --git a/build/keycloak.mk b/build/keycloak.mk new file mode 100644 index 00000000..d541c8b4 --- /dev/null +++ b/build/keycloak.mk @@ -0,0 +1,448 @@ +# Keycloak IdP for development and testing + +KEYCLOAK_NAMESPACE = keycloak +KEYCLOAK_ADMIN_USER = admin +KEYCLOAK_ADMIN_PASSWORD = admin + +.PHONY: keycloak-install +keycloak-install: + @echo "Installing Keycloak (dev mode using official image)..." + @kubectl apply -f dev/config/keycloak/deployment.yaml + @echo "Applying Keycloak ingress (cert-manager will create TLS certificate)..." + @kubectl apply -f dev/config/keycloak/ingress.yaml + @echo "Extracting cert-manager CA certificate..." + @mkdir -p _output/cert-manager-ca + @kubectl get secret selfsigned-ca-secret -n cert-manager -o jsonpath='{.data.ca\.crt}' | base64 -d > _output/cert-manager-ca/ca.crt + @echo "✅ cert-manager CA certificate extracted to _output/cert-manager-ca/ca.crt (bind-mounted to API server)" + @echo "Restarting Kubernetes API server to pick up new CA..." + @docker exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver || \ + podman exec kubernetes-mcp-server-control-plane pkill -f kube-apiserver + @echo "Waiting for API server to restart..." + @sleep 5 + @echo "Waiting for API server to be ready..." + @for i in $$(seq 1 30); do \ + if kubectl get --raw /healthz >/dev/null 2>&1; then \ + echo "✅ Kubernetes API server updated with cert-manager CA"; \ + break; \ + fi; \ + sleep 2; \ + done + @echo "Waiting for Keycloak to be ready..." + @kubectl wait --for=condition=ready pod -l app=keycloak -n $(KEYCLOAK_NAMESPACE) --timeout=120s || true + @echo "Waiting for Keycloak HTTP endpoint to be available..." + @for i in $$(seq 1 30); do \ + STATUS=$$(curl -sk -o /dev/null -w "%{http_code}" https://keycloak.127-0-0-1.sslip.io:8443/realms/master 2>/dev/null || echo "000"); \ + if [ "$$STATUS" = "200" ]; then \ + echo "✅ Keycloak HTTP endpoint ready"; \ + break; \ + fi; \ + echo " Attempt $$i/30: Waiting for Keycloak (status: $$STATUS)..."; \ + sleep 3; \ + done + @echo "" + @echo "Setting up OpenShift realm..." + @$(MAKE) -s keycloak-setup-realm + @echo "" + @echo "✅ Keycloak installed and configured!" + @echo "Access at: https://keycloak.127-0-0-1.sslip.io:8443" + +.PHONY: keycloak-uninstall +keycloak-uninstall: + @kubectl delete -f dev/config/keycloak/deployment.yaml 2>/dev/null || true + +.PHONY: keycloak-status +keycloak-status: ## Show Keycloak status and connection info + @if kubectl get svc -n $(KEYCLOAK_NAMESPACE) keycloak >/dev/null 2>&1; then \ + echo "========================================"; \ + echo "Keycloak Status"; \ + echo "========================================"; \ + echo ""; \ + echo "Status: Installed"; \ + echo ""; \ + echo "Admin Console:"; \ + echo " URL: https://keycloak.127-0-0-1.sslip.io:8443"; \ + echo " Username: $(KEYCLOAK_ADMIN_USER)"; \ + echo " Password: $(KEYCLOAK_ADMIN_PASSWORD)"; \ + echo ""; \ + echo "OIDC Endpoints (openshift realm):"; \ + echo " Discovery: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/.well-known/openid-configuration"; \ + echo " Token: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/token"; \ + echo " Authorize: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/auth"; \ + echo " UserInfo: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/userinfo"; \ + echo " JWKS: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift/protocol/openid-connect/certs"; \ + echo ""; \ + echo "========================================"; \ + else \ + echo "Keycloak is not installed. Run: make keycloak-install"; \ + fi + +.PHONY: keycloak-logs +keycloak-logs: ## Tail Keycloak logs + @kubectl logs -n $(KEYCLOAK_NAMESPACE) -l app=keycloak -f --tail=100 + +.PHONY: keycloak-setup-realm +keycloak-setup-realm: + @echo "=========================================" + @echo "Setting up OpenShift Realm for Token Exchange" + @echo "=========================================" + @echo "Using Keycloak at https://keycloak.127-0-0-1.sslip.io:8443" + @echo "" + @echo "Getting admin access token..." + @RESPONSE=$$(curl -sk -X POST "https://keycloak.127-0-0-1.sslip.io:8443/realms/master/protocol/openid-connect/token" \ + -H "Content-Type: application/x-www-form-urlencoded" \ + -d "username=$(KEYCLOAK_ADMIN_USER)" \ + -d "password=$(KEYCLOAK_ADMIN_PASSWORD)" \ + -d "grant_type=password" \ + -d "client_id=admin-cli"); \ + TOKEN=$$(echo "$$RESPONSE" | jq -r '.access_token // empty' 2>/dev/null); \ + if [ -z "$$TOKEN" ] || [ "$$TOKEN" = "null" ]; then \ + echo "❌ Failed to get access token"; \ + echo "Response was: $$RESPONSE" | head -c 200; \ + echo ""; \ + echo "Check if:"; \ + echo " - Keycloak is running (make keycloak-install)"; \ + echo " - Keycloak is accessible at https://keycloak.127-0-0-1.sslip.io:8443"; \ + echo " - Admin credentials are correct: $(KEYCLOAK_ADMIN_USER)/$(KEYCLOAK_ADMIN_PASSWORD)"; \ + exit 1; \ + fi; \ + echo "✅ Successfully obtained access token"; \ + echo ""; \ + echo "Creating OpenShift realm..."; \ + REALM_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/realm/realm-create.json); \ + REALM_CODE=$$(echo "$$REALM_RESPONSE" | tail -c 4); \ + if [ "$$REALM_CODE" = "201" ] || [ "$$REALM_CODE" = "409" ]; then \ + if [ "$$REALM_CODE" = "201" ]; then echo "✅ OpenShift realm created"; \ + else echo "✅ OpenShift realm already exists"; fi; \ + else \ + echo "❌ Failed to create OpenShift realm (HTTP $$REALM_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Configuring realm events..."; \ + EVENT_CONFIG_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/realm/realm-events-config.json); \ + EVENT_CONFIG_CODE=$$(echo "$$EVENT_CONFIG_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$EVENT_CONFIG_CODE" = "204" ]; then \ + echo "✅ User and admin event logging enabled"; \ + else \ + echo "⚠️ Could not configure event logging (HTTP $$EVENT_CONFIG_CODE)"; \ + fi; \ + echo ""; \ + echo "Creating mcp:openshift client scope..."; \ + SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/client-scopes/mcp-openshift.json); \ + SCOPE_CODE=$$(echo "$$SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$SCOPE_CODE" = "201" ] || [ "$$SCOPE_CODE" = "409" ]; then \ + if [ "$$SCOPE_CODE" = "201" ]; then echo "✅ mcp:openshift client scope created"; \ + else echo "✅ mcp:openshift client scope already exists"; fi; \ + else \ + echo "❌ Failed to create mcp:openshift scope (HTTP $$SCOPE_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Adding audience mapper to mcp:openshift scope..."; \ + SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp:openshift") | .id // empty' 2>/dev/null); \ + if [ -z "$$SCOPE_ID" ]; then \ + echo "❌ Failed to find mcp:openshift scope"; \ + exit 1; \ + fi; \ + MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$SCOPE_ID/protocol-mappers/models" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/mappers/openshift-audience.json); \ + MAPPER_CODE=$$(echo "$$MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MAPPER_CODE" = "201" ] || [ "$$MAPPER_CODE" = "409" ]; then \ + if [ "$$MAPPER_CODE" = "201" ]; then echo "✅ Audience mapper added"; \ + else echo "✅ Audience mapper already exists"; fi; \ + else \ + echo "❌ Failed to create audience mapper (HTTP $$MAPPER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating groups client scope..."; \ + GROUPS_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/client-scopes/groups.json); \ + GROUPS_SCOPE_CODE=$$(echo "$$GROUPS_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$GROUPS_SCOPE_CODE" = "201" ] || [ "$$GROUPS_SCOPE_CODE" = "409" ]; then \ + if [ "$$GROUPS_SCOPE_CODE" = "201" ]; then echo "✅ groups client scope created"; \ + else echo "✅ groups client scope already exists"; fi; \ + else \ + echo "❌ Failed to create groups scope (HTTP $$GROUPS_SCOPE_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Adding group membership mapper to groups scope..."; \ + SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + GROUPS_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "groups") | .id // empty' 2>/dev/null); \ + if [ -z "$$GROUPS_SCOPE_ID" ]; then \ + echo "❌ Failed to find groups scope"; \ + exit 1; \ + fi; \ + GROUPS_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$GROUPS_SCOPE_ID/protocol-mappers/models" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/mappers/groups-membership.json); \ + GROUPS_MAPPER_CODE=$$(echo "$$GROUPS_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$GROUPS_MAPPER_CODE" = "201" ] || [ "$$GROUPS_MAPPER_CODE" = "409" ]; then \ + if [ "$$GROUPS_MAPPER_CODE" = "201" ]; then echo "✅ Group membership mapper added"; \ + else echo "✅ Group membership mapper already exists"; fi; \ + else \ + echo "❌ Failed to create group mapper (HTTP $$GROUPS_MAPPER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating mcp-server client scope..."; \ + MCP_SERVER_SCOPE_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/client-scopes/mcp-server.json); \ + MCP_SERVER_SCOPE_CODE=$$(echo "$$MCP_SERVER_SCOPE_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ] || [ "$$MCP_SERVER_SCOPE_CODE" = "409" ]; then \ + if [ "$$MCP_SERVER_SCOPE_CODE" = "201" ]; then echo "✅ mcp-server client scope created"; \ + else echo "✅ mcp-server client scope already exists"; fi; \ + else \ + echo "❌ Failed to create mcp-server scope (HTTP $$MCP_SERVER_SCOPE_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Adding audience mapper to mcp-server scope..."; \ + SCOPES_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + MCP_SERVER_SCOPE_ID=$$(echo "$$SCOPES_LIST" | jq -r '.[] | select(.name == "mcp-server") | .id // empty' 2>/dev/null); \ + if [ -z "$$MCP_SERVER_SCOPE_ID" ]; then \ + echo "❌ Failed to find mcp-server scope"; \ + exit 1; \ + fi; \ + MCP_SERVER_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/client-scopes/$$MCP_SERVER_SCOPE_ID/protocol-mappers/models" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/mappers/mcp-server-audience.json); \ + MCP_SERVER_MAPPER_CODE=$$(echo "$$MCP_SERVER_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ] || [ "$$MCP_SERVER_MAPPER_CODE" = "409" ]; then \ + if [ "$$MCP_SERVER_MAPPER_CODE" = "201" ]; then echo "✅ mcp-server audience mapper added"; \ + else echo "✅ mcp-server audience mapper already exists"; fi; \ + else \ + echo "❌ Failed to create mcp-server audience mapper (HTTP $$MCP_SERVER_MAPPER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating openshift service client..."; \ + OPENSHIFT_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/clients/openshift.json); \ + OPENSHIFT_CLIENT_CODE=$$(echo "$$OPENSHIFT_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ] || [ "$$OPENSHIFT_CLIENT_CODE" = "409" ]; then \ + if [ "$$OPENSHIFT_CLIENT_CODE" = "201" ]; then echo "✅ openshift client created"; \ + else echo "✅ openshift client already exists"; fi; \ + else \ + echo "❌ Failed to create openshift client (HTTP $$OPENSHIFT_CLIENT_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Adding username mapper to openshift client..."; \ + OPENSHIFT_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + OPENSHIFT_CLIENT_ID=$$(echo "$$OPENSHIFT_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "openshift") | .id // empty' 2>/dev/null); \ + OPENSHIFT_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$OPENSHIFT_CLIENT_ID/protocol-mappers/models" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/mappers/username.json); \ + OPENSHIFT_USERNAME_MAPPER_CODE=$$(echo "$$OPENSHIFT_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ] || [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "409" ]; then \ + if [ "$$OPENSHIFT_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to openshift client"; \ + else echo "✅ Username mapper already exists on openshift client"; fi; \ + else \ + echo "❌ Failed to create username mapper (HTTP $$OPENSHIFT_USERNAME_MAPPER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating mcp-client public client..."; \ + MCP_PUBLIC_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/clients/mcp-client.json); \ + MCP_PUBLIC_CLIENT_CODE=$$(echo "$$MCP_PUBLIC_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ] || [ "$$MCP_PUBLIC_CLIENT_CODE" = "409" ]; then \ + if [ "$$MCP_PUBLIC_CLIENT_CODE" = "201" ]; then echo "✅ mcp-client public client created"; \ + else echo "✅ mcp-client public client already exists"; fi; \ + else \ + echo "❌ Failed to create mcp-client public client (HTTP $$MCP_PUBLIC_CLIENT_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Adding username mapper to mcp-client..."; \ + MCP_PUBLIC_CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + MCP_PUBLIC_CLIENT_ID=$$(echo "$$MCP_PUBLIC_CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-client") | .id // empty' 2>/dev/null); \ + MCP_PUBLIC_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_PUBLIC_CLIENT_ID/protocol-mappers/models" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/mappers/username.json); \ + MCP_PUBLIC_USERNAME_MAPPER_CODE=$$(echo "$$MCP_PUBLIC_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "409" ]; then \ + if [ "$$MCP_PUBLIC_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-client"; \ + else echo "✅ Username mapper already exists on mcp-client"; fi; \ + else \ + echo "❌ Failed to create username mapper (HTTP $$MCP_PUBLIC_USERNAME_MAPPER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating mcp-server client with token exchange..."; \ + MCP_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/clients/mcp-server.json); \ + MCP_CLIENT_CODE=$$(echo "$$MCP_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MCP_CLIENT_CODE" = "201" ] || [ "$$MCP_CLIENT_CODE" = "409" ]; then \ + if [ "$$MCP_CLIENT_CODE" = "201" ]; then echo "✅ mcp-server client created"; \ + else echo "✅ mcp-server client already exists"; fi; \ + else \ + echo "❌ Failed to create mcp-server client (HTTP $$MCP_CLIENT_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Enabling standard token exchange for mcp-server..."; \ + CLIENTS_LIST=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + MCP_CLIENT_ID=$$(echo "$$CLIENTS_LIST" | jq -r '.[] | select(.clientId == "mcp-server") | .id // empty' 2>/dev/null); \ + if [ -z "$$MCP_CLIENT_ID" ]; then \ + echo "❌ Failed to find mcp-server client"; \ + exit 1; \ + fi; \ + UPDATE_CLIENT_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X PUT "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/clients/mcp-server-update.json); \ + UPDATE_CLIENT_CODE=$$(echo "$$UPDATE_CLIENT_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$UPDATE_CLIENT_CODE" = "204" ]; then \ + echo "✅ Standard token exchange enabled for mcp-server client"; \ + else \ + echo "⚠️ Could not enable token exchange (HTTP $$UPDATE_CLIENT_CODE)"; \ + fi; \ + echo ""; \ + echo "Getting mcp-server client secret..."; \ + SECRET_RESPONSE=$$(curl -sk -X GET "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/client-secret" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Accept: application/json"); \ + CLIENT_SECRET=$$(echo "$$SECRET_RESPONSE" | jq -r '.value // empty' 2>/dev/null); \ + if [ -z "$$CLIENT_SECRET" ]; then \ + echo "❌ Failed to get client secret"; \ + else \ + echo "✅ Client secret retrieved"; \ + fi; \ + echo ""; \ + echo "Adding username mapper to mcp-server client..."; \ + MCP_USERNAME_MAPPER_RESPONSE=$$(curl -sk -w "HTTPCODE:%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/clients/$$MCP_CLIENT_ID/protocol-mappers/models" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/mappers/username.json); \ + MCP_USERNAME_MAPPER_CODE=$$(echo "$$MCP_USERNAME_MAPPER_RESPONSE" | grep -o "HTTPCODE:[0-9]*" | cut -d: -f2); \ + if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ] || [ "$$MCP_USERNAME_MAPPER_CODE" = "409" ]; then \ + if [ "$$MCP_USERNAME_MAPPER_CODE" = "201" ]; then echo "✅ Username mapper added to mcp-server client"; \ + else echo "✅ Username mapper already exists on mcp-server client"; fi; \ + else \ + echo "❌ Failed to create username mapper (HTTP $$MCP_USERNAME_MAPPER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Creating test user mcp/mcp..."; \ + USER_RESPONSE=$$(curl -sk -w "%{http_code}" -X POST "https://keycloak.127-0-0-1.sslip.io:8443/admin/realms/openshift/users" \ + -H "Authorization: Bearer $$TOKEN" \ + -H "Content-Type: application/json" \ + -d @dev/config/keycloak/users/mcp.json); \ + USER_CODE=$$(echo "$$USER_RESPONSE" | tail -c 4); \ + if [ "$$USER_CODE" = "201" ] || [ "$$USER_CODE" = "409" ]; then \ + if [ "$$USER_CODE" = "201" ]; then echo "✅ mcp user created"; \ + else echo "✅ mcp user already exists"; fi; \ + else \ + echo "❌ Failed to create mcp user (HTTP $$USER_CODE)"; \ + exit 1; \ + fi; \ + echo ""; \ + echo "Setting up RBAC for mcp user..."; \ + kubectl apply -f dev/config/keycloak/rbac.yaml; \ + echo "✅ RBAC binding created for mcp user"; \ + echo ""; \ + echo "🎉 OpenShift realm setup complete!"; \ + echo ""; \ + echo "========================================"; \ + echo "Configuration Summary"; \ + echo "========================================"; \ + echo "Realm: openshift"; \ + echo "Authorization URL: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \ + echo "Issuer URL (for config.toml): https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \ + echo ""; \ + echo "Test User:"; \ + echo " Username: mcp"; \ + echo " Password: mcp"; \ + echo " Email: mcp@example.com"; \ + echo " RBAC: cluster-admin (full cluster access)"; \ + echo ""; \ + echo "Clients:"; \ + echo " mcp-client (public, for browser-based auth)"; \ + echo " Client ID: mcp-client"; \ + echo " Optional Scopes: mcp-server"; \ + echo " mcp-server (confidential, token exchange enabled)"; \ + echo " Client ID: mcp-server"; \ + echo " Client Secret: $$CLIENT_SECRET"; \ + echo " openshift (service account)"; \ + echo " Client ID: openshift"; \ + echo ""; \ + echo "Client Scopes:"; \ + echo " mcp-server (default) - Audience: mcp-server"; \ + echo " mcp:openshift (optional) - Audience: openshift"; \ + echo " groups (default) - Group membership mapper"; \ + echo ""; \ + echo "TOML Configuration (config.toml):"; \ + echo " require_oauth = true"; \ + echo " oauth_audience = \"mcp-server\""; \ + echo " oauth_scopes = [\"openid\", \"mcp-server\"]"; \ + echo " validate_token = false"; \ + echo " authorization_url = \"https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift\""; \ + echo " sts_client_id = \"mcp-server\""; \ + echo " sts_client_secret = \"$$CLIENT_SECRET\""; \ + echo " sts_audience = \"openshift\""; \ + echo " sts_scopes = [\"mcp:openshift\"]"; \ + echo " certificate_authority = \"_output/cert-manager-ca/ca.crt\""; \ + echo "========================================"; \ + echo ""; \ + echo "Note: The Kubernetes API server is configured with:"; \ + echo " --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"; \ + echo ""; \ + echo "Important: The cert-manager CA certificate was extracted to:"; \ + echo " _output/cert-manager-ca/ca.crt"; \ + echo ""; \ + echo "Writing configuration to _output/config.toml..."; \ + mkdir -p _output; \ + printf '%s\n' \ + 'require_oauth = true' \ + 'oauth_audience = "mcp-server"' \ + 'oauth_scopes = ["openid", "mcp-server"]' \ + 'validate_token = false' \ + 'authorization_url = "https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift"' \ + 'sts_client_id = "mcp-server"' \ + "sts_client_secret = \"$$CLIENT_SECRET\"" \ + 'sts_audience = "openshift"' \ + 'sts_scopes = ["mcp:openshift"]' \ + 'certificate_authority = "_output/cert-manager-ca/ca.crt"' \ + > _output/config.toml; \ + echo "✅ Configuration written to _output/config.toml" diff --git a/build/kind.mk b/build/kind.mk new file mode 100644 index 00000000..fe83f1ab --- /dev/null +++ b/build/kind.mk @@ -0,0 +1,61 @@ +# Kind cluster management + +KIND_CLUSTER_NAME ?= kubernetes-mcp-server + +# Detect container engine (docker or podman) +CONTAINER_ENGINE ?= $(shell command -v docker 2>/dev/null || command -v podman 2>/dev/null) + +.PHONY: kind-create-certs +kind-create-certs: + @if [ ! -f _output/cert-manager-ca/ca.crt ]; then \ + echo "Creating placeholder CA certificate for bind mount..."; \ + ./hack/generate-placeholder-ca.sh; \ + else \ + echo "✅ Placeholder CA already exists"; \ + fi + +.PHONY: kind-create-cluster +kind-create-cluster: kind kind-create-certs + @# Set KIND provider for podman on Linux + @if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \ + export KIND_EXPERIMENTAL_PROVIDER=podman; \ + fi; \ + if $(KIND) get clusters 2>/dev/null | grep -q "^$(KIND_CLUSTER_NAME)$$"; then \ + echo "Kind cluster '$(KIND_CLUSTER_NAME)' already exists, skipping creation"; \ + else \ + echo "Creating Kind cluster '$(KIND_CLUSTER_NAME)'..."; \ + $(KIND) create cluster --name $(KIND_CLUSTER_NAME) --config dev/config/kind/cluster.yaml; \ + echo "Adding ingress-ready label to control-plane node..."; \ + kubectl label node $(KIND_CLUSTER_NAME)-control-plane ingress-ready=true --overwrite; \ + echo "Installing nginx ingress controller..."; \ + kubectl apply -f dev/config/ingress/nginx-ingress.yaml; \ + echo "Waiting for ingress controller to be ready..."; \ + kubectl wait --namespace ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s; \ + echo "✅ Ingress controller ready"; \ + echo "Installing cert-manager..."; \ + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml; \ + echo "Waiting for cert-manager to be ready..."; \ + kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager --timeout=120s; \ + kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-cainjector --timeout=120s; \ + kubectl wait --namespace cert-manager --for=condition=available deployment/cert-manager-webhook --timeout=120s; \ + echo "✅ cert-manager ready"; \ + echo "Creating cert-manager ClusterIssuer..."; \ + sleep 5; \ + kubectl apply -f dev/config/cert-manager/selfsigned-issuer.yaml; \ + echo "✅ ClusterIssuer created"; \ + echo "Adding /etc/hosts entry for Keycloak in control plane..."; \ + if command -v docker >/dev/null 2>&1 && docker ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \ + docker exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \ + elif command -v podman >/dev/null 2>&1 && podman ps --filter "name=$(KIND_CLUSTER_NAME)-control-plane" --format "{{.Names}}" | grep -q "$(KIND_CLUSTER_NAME)-control-plane"; then \ + podman exec $(KIND_CLUSTER_NAME)-control-plane bash -c 'grep -q "keycloak.127-0-0-1.sslip.io" /etc/hosts || echo "127.0.0.1 keycloak.127-0-0-1.sslip.io" >> /etc/hosts'; \ + fi; \ + echo "✅ /etc/hosts entry added"; \ + fi + +.PHONY: kind-delete-cluster +kind-delete-cluster: kind + @# Set KIND provider for podman on Linux + @if [ "$(shell uname -s)" != "Darwin" ] && echo "$(CONTAINER_ENGINE)" | grep -q "podman"; then \ + export KIND_EXPERIMENTAL_PROVIDER=podman; \ + fi; \ + $(KIND) delete cluster --name $(KIND_CLUSTER_NAME) diff --git a/build/tools.mk b/build/tools.mk new file mode 100644 index 00000000..9c9945a8 --- /dev/null +++ b/build/tools.mk @@ -0,0 +1,20 @@ +# Tools + +# Platform detection +OS := $(shell uname -s | tr '[:upper:]' '[:lower:]') +ARCH := $(shell uname -m | tr '[:upper:]' '[:lower:]') +ifeq ($(ARCH),x86_64) + ARCH = amd64 +endif +ifeq ($(ARCH),aarch64) + ARCH = arm64 +endif + +KIND = _output/bin/kind +KIND_VERSION = v0.30.0 +$(KIND): + @mkdir -p _output/bin + GOBIN=$(PWD)/_output/bin go install sigs.k8s.io/kind@$(KIND_VERSION) + +.PHONY: kind +kind: $(KIND) ## Download kind locally if necessary diff --git a/dev/config/cert-manager/selfsigned-issuer.yaml b/dev/config/cert-manager/selfsigned-issuer.yaml new file mode 100644 index 00000000..8bb27f7a --- /dev/null +++ b/dev/config/cert-manager/selfsigned-issuer.yaml @@ -0,0 +1,31 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-issuer +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: selfsigned-ca + namespace: cert-manager +spec: + isCA: true + commonName: selfsigned-ca + secretName: selfsigned-ca-secret + privateKey: + algorithm: ECDSA + size: 256 + issuerRef: + name: selfsigned-issuer + kind: ClusterIssuer + group: cert-manager.io +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: selfsigned-ca-issuer +spec: + ca: + secretName: selfsigned-ca-secret diff --git a/dev/config/ingress/nginx-ingress.yaml b/dev/config/ingress/nginx-ingress.yaml new file mode 100644 index 00000000..8405740d --- /dev/null +++ b/dev/config/ingress/nginx-ingress.yaml @@ -0,0 +1,386 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ingress-nginx + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +data: + allow-snippet-annotations: "true" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + name: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + resourceNames: + - ingress-nginx-leader + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + type: NodePort + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http + appProtocol: http + - name: https + port: 443 + protocol: TCP + targetPort: https + appProtocol: https + selector: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + selector: + matchLabels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + replicas: 1 + revisionHistoryLimit: 10 + minReadySeconds: 0 + template: + metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + spec: + dnsPolicy: ClusterFirst + containers: + - name: controller + image: registry.k8s.io/ingress-nginx/controller:v1.11.1 + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + args: + - /nginx-ingress-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --watch-ingress-without-class=true + securityContext: + runAsNonRoot: true + runAsUser: 101 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + add: + - NET_BIND_SERVICE + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ports: + - name: http + containerPort: 80 + protocol: TCP + hostPort: 80 + - name: https + containerPort: 443 + protocol: TCP + hostPort: 443 + - name: https-alt + containerPort: 443 + protocol: TCP + hostPort: 8443 + - name: webhook + containerPort: 8443 + protocol: TCP + resources: + requests: + cpu: 100m + memory: 90Mi + nodeSelector: + ingress-ready: "true" + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Equal +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/component: controller + name: nginx +spec: + controller: k8s.io/ingress-nginx diff --git a/dev/config/keycloak/client-scopes/groups.json b/dev/config/keycloak/client-scopes/groups.json new file mode 100644 index 00000000..4eb20b74 --- /dev/null +++ b/dev/config/keycloak/client-scopes/groups.json @@ -0,0 +1,8 @@ +{ + "name": "groups", + "protocol": "openid-connect", + "attributes": { + "display.on.consent.screen": "false", + "include.in.token.scope": "true" + } +} diff --git a/dev/config/keycloak/client-scopes/mcp-openshift.json b/dev/config/keycloak/client-scopes/mcp-openshift.json new file mode 100644 index 00000000..39f55e7b --- /dev/null +++ b/dev/config/keycloak/client-scopes/mcp-openshift.json @@ -0,0 +1,8 @@ +{ + "name": "mcp:openshift", + "protocol": "openid-connect", + "attributes": { + "display.on.consent.screen": "false", + "include.in.token.scope": "true" + } +} diff --git a/dev/config/keycloak/client-scopes/mcp-server.json b/dev/config/keycloak/client-scopes/mcp-server.json new file mode 100644 index 00000000..5ac0440b --- /dev/null +++ b/dev/config/keycloak/client-scopes/mcp-server.json @@ -0,0 +1,8 @@ +{ + "name": "mcp-server", + "protocol": "openid-connect", + "attributes": { + "display.on.consent.screen": "false", + "include.in.token.scope": "true" + } +} diff --git a/dev/config/keycloak/clients/mcp-client.json b/dev/config/keycloak/clients/mcp-client.json new file mode 100644 index 00000000..7f2c596e --- /dev/null +++ b/dev/config/keycloak/clients/mcp-client.json @@ -0,0 +1,13 @@ +{ + "clientId": "mcp-client", + "enabled": true, + "publicClient": true, + "standardFlowEnabled": true, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": false, + "authorizationServicesEnabled": false, + "redirectUris": ["*"], + "webOrigins": ["*"], + "defaultClientScopes": ["profile", "email"], + "optionalClientScopes": ["mcp-server"] +} diff --git a/dev/config/keycloak/clients/mcp-server-update.json b/dev/config/keycloak/clients/mcp-server-update.json new file mode 100644 index 00000000..2709e75c --- /dev/null +++ b/dev/config/keycloak/clients/mcp-server-update.json @@ -0,0 +1,20 @@ +{ + "clientId": "mcp-server", + "enabled": true, + "publicClient": false, + "standardFlowEnabled": true, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": true, + "authorizationServicesEnabled": false, + "redirectUris": ["*"], + "webOrigins": ["*"], + "defaultClientScopes": ["profile", "email", "groups", "mcp-server"], + "optionalClientScopes": ["mcp:openshift"], + "attributes": { + "oauth2.device.authorization.grant.enabled": "false", + "oidc.ciba.grant.enabled": "false", + "backchannel.logout.session.required": "true", + "backchannel.logout.revoke.offline.tokens": "false", + "standard.token.exchange.enabled": "true" + } +} diff --git a/dev/config/keycloak/clients/mcp-server.json b/dev/config/keycloak/clients/mcp-server.json new file mode 100644 index 00000000..873fa5ce --- /dev/null +++ b/dev/config/keycloak/clients/mcp-server.json @@ -0,0 +1,19 @@ +{ + "clientId": "mcp-server", + "enabled": true, + "publicClient": false, + "standardFlowEnabled": true, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": true, + "authorizationServicesEnabled": false, + "redirectUris": ["*"], + "webOrigins": ["*"], + "defaultClientScopes": ["profile", "email", "groups", "mcp-server"], + "optionalClientScopes": ["mcp:openshift"], + "attributes": { + "oauth2.device.authorization.grant.enabled": "false", + "oidc.ciba.grant.enabled": "false", + "backchannel.logout.session.required": "true", + "backchannel.logout.revoke.offline.tokens": "false" + } +} diff --git a/dev/config/keycloak/clients/openshift.json b/dev/config/keycloak/clients/openshift.json new file mode 100644 index 00000000..2905c437 --- /dev/null +++ b/dev/config/keycloak/clients/openshift.json @@ -0,0 +1,13 @@ +{ + "clientId": "openshift", + "enabled": true, + "publicClient": false, + "standardFlowEnabled": true, + "directAccessGrantsEnabled": true, + "serviceAccountsEnabled": true, + "authorizationServicesEnabled": false, + "redirectUris": ["*"], + "webOrigins": ["*"], + "defaultClientScopes": ["profile", "email", "groups"], + "optionalClientScopes": [] +} diff --git a/dev/config/keycloak/deployment.yaml b/dev/config/keycloak/deployment.yaml new file mode 100644 index 00000000..efcb7e0f --- /dev/null +++ b/dev/config/keycloak/deployment.yaml @@ -0,0 +1,71 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: keycloak +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: keycloak + namespace: keycloak + labels: + app: keycloak +spec: + replicas: 1 + selector: + matchLabels: + app: keycloak + template: + metadata: + labels: + app: keycloak + spec: + containers: + - name: keycloak + image: quay.io/keycloak/keycloak:26.4 + args: ["start-dev"] + env: + - name: KC_BOOTSTRAP_ADMIN_USERNAME + value: "admin" + - name: KC_BOOTSTRAP_ADMIN_PASSWORD + value: "admin" + - name: KC_HOSTNAME + value: "https://keycloak.127-0-0-1.sslip.io:8443" + - name: KC_HTTP_ENABLED + value: "true" + - name: KC_HEALTH_ENABLED + value: "true" + - name: KC_PROXY_HEADERS + value: "xforwarded" + ports: + - name: http + containerPort: 8080 + readinessProbe: + httpGet: + path: /health/ready + port: 9000 + initialDelaySeconds: 30 + periodSeconds: 10 + livenessProbe: + httpGet: + path: /health/live + port: 9000 + initialDelaySeconds: 60 + periodSeconds: 30 +--- +apiVersion: v1 +kind: Service +metadata: + name: keycloak + namespace: keycloak + labels: + app: keycloak +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + selector: + app: keycloak + type: ClusterIP diff --git a/dev/config/keycloak/ingress.yaml b/dev/config/keycloak/ingress.yaml new file mode 100644 index 00000000..d172e091 --- /dev/null +++ b/dev/config/keycloak/ingress.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: keycloak + namespace: keycloak + labels: + app: keycloak + annotations: + cert-manager.io/cluster-issuer: "selfsigned-ca-issuer" + nginx.ingress.kubernetes.io/ssl-redirect: "true" + nginx.ingress.kubernetes.io/backend-protocol: "HTTP" + # Required for Keycloak 26.2.0+ to include port in issuer URLs + nginx.ingress.kubernetes.io/configuration-snippet: | + proxy_set_header X-Forwarded-Proto https; + proxy_set_header X-Forwarded-Port 8443; + proxy_set_header X-Forwarded-Host $host:8443; +spec: + ingressClassName: nginx + tls: + - hosts: + - keycloak.127-0-0-1.sslip.io + secretName: keycloak-tls-cert + rules: + - host: keycloak.127-0-0-1.sslip.io + http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: keycloak + port: + number: 80 diff --git a/dev/config/keycloak/mappers/groups-membership.json b/dev/config/keycloak/mappers/groups-membership.json new file mode 100644 index 00000000..266a66e9 --- /dev/null +++ b/dev/config/keycloak/mappers/groups-membership.json @@ -0,0 +1,12 @@ +{ + "name": "groups", + "protocol": "openid-connect", + "protocolMapper": "oidc-group-membership-mapper", + "config": { + "claim.name": "groups", + "full.path": "false", + "id.token.claim": "true", + "access.token.claim": "true", + "userinfo.token.claim": "true" + } +} diff --git a/dev/config/keycloak/mappers/mcp-server-audience.json b/dev/config/keycloak/mappers/mcp-server-audience.json new file mode 100644 index 00000000..37b7e969 --- /dev/null +++ b/dev/config/keycloak/mappers/mcp-server-audience.json @@ -0,0 +1,10 @@ +{ + "name": "mcp-server-audience", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-mapper", + "config": { + "included.client.audience": "mcp-server", + "id.token.claim": "true", + "access.token.claim": "true" + } +} diff --git a/dev/config/keycloak/mappers/openshift-audience.json b/dev/config/keycloak/mappers/openshift-audience.json new file mode 100644 index 00000000..74b84b71 --- /dev/null +++ b/dev/config/keycloak/mappers/openshift-audience.json @@ -0,0 +1,10 @@ +{ + "name": "openshift-audience", + "protocol": "openid-connect", + "protocolMapper": "oidc-audience-mapper", + "config": { + "included.client.audience": "openshift", + "id.token.claim": "true", + "access.token.claim": "true" + } +} diff --git a/dev/config/keycloak/mappers/username.json b/dev/config/keycloak/mappers/username.json new file mode 100644 index 00000000..d76ccfa2 --- /dev/null +++ b/dev/config/keycloak/mappers/username.json @@ -0,0 +1,13 @@ +{ + "name": "username", + "protocol": "openid-connect", + "protocolMapper": "oidc-usermodel-property-mapper", + "config": { + "userinfo.token.claim": "true", + "user.attribute": "username", + "id.token.claim": "true", + "access.token.claim": "true", + "claim.name": "preferred_username", + "jsonType.label": "String" + } +} diff --git a/dev/config/keycloak/rbac.yaml b/dev/config/keycloak/rbac.yaml new file mode 100644 index 00000000..6f3f8c75 --- /dev/null +++ b/dev/config/keycloak/rbac.yaml @@ -0,0 +1,20 @@ +# RBAC ClusterRoleBinding for mcp user with OIDC authentication +# +# IMPORTANT: This requires Kubernetes API server to be configured with OIDC: +# --oidc-issuer-url=https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift +# --oidc-username-claim=preferred_username +# +# Without OIDC configuration, this binding will not work. +# +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: oidc-mcp-cluster-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- apiGroup: rbac.authorization.k8s.io + kind: User + name: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift#mcp diff --git a/dev/config/keycloak/realm/realm-create.json b/dev/config/keycloak/realm/realm-create.json new file mode 100644 index 00000000..d651e7dd --- /dev/null +++ b/dev/config/keycloak/realm/realm-create.json @@ -0,0 +1,4 @@ +{ + "realm": "openshift", + "enabled": true +} diff --git a/dev/config/keycloak/realm/realm-events-config.json b/dev/config/keycloak/realm/realm-events-config.json new file mode 100644 index 00000000..72b07a5b --- /dev/null +++ b/dev/config/keycloak/realm/realm-events-config.json @@ -0,0 +1,8 @@ +{ + "realm": "openshift", + "enabled": true, + "eventsEnabled": true, + "eventsListeners": ["jboss-logging"], + "adminEventsEnabled": true, + "adminEventsDetailsEnabled": true +} diff --git a/dev/config/keycloak/users/mcp.json b/dev/config/keycloak/users/mcp.json new file mode 100644 index 00000000..b84bc3f2 --- /dev/null +++ b/dev/config/keycloak/users/mcp.json @@ -0,0 +1,15 @@ +{ + "username": "mcp", + "email": "mcp@example.com", + "firstName": "MCP", + "lastName": "User", + "enabled": true, + "emailVerified": true, + "credentials": [ + { + "type": "password", + "value": "mcp", + "temporary": false + } + ] +} diff --git a/dev/config/kind/cluster.yaml b/dev/config/kind/cluster.yaml new file mode 100644 index 00000000..fda11689 --- /dev/null +++ b/dev/config/kind/cluster.yaml @@ -0,0 +1,30 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane + extraMounts: + - hostPath: ./_output/cert-manager-ca/ca.crt + containerPath: /etc/kubernetes/pki/keycloak-ca.crt + readOnly: true + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + + kind: ClusterConfiguration + apiServer: + extraArgs: + oidc-issuer-url: https://keycloak.127-0-0-1.sslip.io:8443/realms/openshift + oidc-client-id: openshift + oidc-username-claim: preferred_username + oidc-groups-claim: groups + oidc-ca-file: /etc/kubernetes/pki/keycloak-ca.crt + extraPortMappings: + - containerPort: 80 + hostPort: 8000 + protocol: TCP + - containerPort: 443 + hostPort: 8443 + protocol: TCP diff --git a/go.mod b/go.mod index 78ed49c2..de49820d 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/fsnotify/fsnotify v1.9.0 github.com/go-jose/go-jose/v4 v4.1.3 github.com/google/jsonschema-go v0.3.0 - github.com/mark3labs/mcp-go v0.41.1 + github.com/mark3labs/mcp-go v0.42.0 github.com/pkg/errors v0.9.1 github.com/spf13/afero v1.15.0 github.com/spf13/cobra v1.10.1 diff --git a/go.sum b/go.sum index 2185419b..52a72c8b 100644 --- a/go.sum +++ b/go.sum @@ -187,8 +187,8 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= -github.com/mark3labs/mcp-go v0.41.1 h1:w78eWfiQam2i8ICL7AL0WFiq7KHNJQ6UB53ZVtH4KGA= -github.com/mark3labs/mcp-go v0.41.1/go.mod h1:T7tUa2jO6MavG+3P25Oy/jR7iCeJPHImCZHRymCn39g= +github.com/mark3labs/mcp-go v0.42.0 h1:gk/8nYJh8t3yroCAOBhNbYsM9TCKvkM13I5t5Hfu6Ls= +github.com/mark3labs/mcp-go v0.42.0/go.mod h1:YnJfOL382MIWDx1kMY+2zsRHU/q78dBg9aFb8W6Thdw= github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= diff --git a/hack/generate-placeholder-ca.sh b/hack/generate-placeholder-ca.sh new file mode 100755 index 00000000..5428304d --- /dev/null +++ b/hack/generate-placeholder-ca.sh @@ -0,0 +1,22 @@ +#!/bin/bash +set -e + +# Generate a placeholder self-signed CA certificate for KIND cluster startup +# This will be replaced with the real cert-manager CA after the cluster is created + +CERT_DIR="_output/cert-manager-ca" +CA_CERT="$CERT_DIR/ca.crt" +CA_KEY="$CERT_DIR/ca.key" + +mkdir -p "$CERT_DIR" + +# Generate a self-signed CA certificate (valid placeholder) +openssl req -x509 -newkey rsa:2048 -nodes \ + -keyout "$CA_KEY" \ + -out "$CA_CERT" \ + -days 365 \ + -subj "/CN=placeholder-ca" \ + 2>/dev/null + +echo "✅ Placeholder CA certificate created at $CA_CERT" +echo "⚠️ This will be replaced with cert-manager CA after cluster creation" diff --git a/internal/test/mcp.go b/internal/test/mcp.go index 8daaae40..b82e3194 100644 --- a/internal/test/mcp.go +++ b/internal/test/mcp.go @@ -1,12 +1,12 @@ package test import ( + "net/http" "net/http/httptest" "testing" "github.com/mark3labs/mcp-go/client" "github.com/mark3labs/mcp-go/mcp" - "github.com/mark3labs/mcp-go/server" "github.com/stretchr/testify/require" "golang.org/x/net/context" ) @@ -17,7 +17,7 @@ type McpClient struct { *client.Client } -func NewMcpClient(t *testing.T, mcpHttpServer *server.StreamableHTTPServer) *McpClient { +func NewMcpClient(t *testing.T, mcpHttpServer http.Handler) *McpClient { require.NotNil(t, mcpHttpServer, "McpHttpServer must be provided") var err error ret := &McpClient{ctx: t.Context()} diff --git a/npm/kubernetes-mcp-server-darwin-amd64/package.json b/npm/kubernetes-mcp-server-darwin-amd64/package.json index f83bf58b..49e05004 100644 --- a/npm/kubernetes-mcp-server-darwin-amd64/package.json +++ b/npm/kubernetes-mcp-server-darwin-amd64/package.json @@ -2,6 +2,10 @@ "name": "kubernetes-mcp-server-darwin-amd64", "version": "0.0.0", "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "repository": { + "type": "git", + "url": "git+https://github.com/containers/kubernetes-mcp-server.git" + }, "os": [ "darwin" ], diff --git a/npm/kubernetes-mcp-server-darwin-arm64/package.json b/npm/kubernetes-mcp-server-darwin-arm64/package.json index d8cbc618..f8e313c2 100644 --- a/npm/kubernetes-mcp-server-darwin-arm64/package.json +++ b/npm/kubernetes-mcp-server-darwin-arm64/package.json @@ -2,6 +2,10 @@ "name": "kubernetes-mcp-server-darwin-arm64", "version": "0.0.0", "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "repository": { + "type": "git", + "url": "git+https://github.com/containers/kubernetes-mcp-server.git" + }, "os": [ "darwin" ], diff --git a/npm/kubernetes-mcp-server-linux-amd64/package.json b/npm/kubernetes-mcp-server-linux-amd64/package.json index deaa5364..1a519074 100644 --- a/npm/kubernetes-mcp-server-linux-amd64/package.json +++ b/npm/kubernetes-mcp-server-linux-amd64/package.json @@ -2,6 +2,10 @@ "name": "kubernetes-mcp-server-linux-amd64", "version": "0.0.0", "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "repository": { + "type": "git", + "url": "git+https://github.com/containers/kubernetes-mcp-server.git" + }, "os": [ "linux" ], diff --git a/npm/kubernetes-mcp-server-linux-arm64/package.json b/npm/kubernetes-mcp-server-linux-arm64/package.json index ba2f6475..b861abeb 100644 --- a/npm/kubernetes-mcp-server-linux-arm64/package.json +++ b/npm/kubernetes-mcp-server-linux-arm64/package.json @@ -2,6 +2,10 @@ "name": "kubernetes-mcp-server-linux-arm64", "version": "0.0.0", "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "repository": { + "type": "git", + "url": "git+https://github.com/containers/kubernetes-mcp-server.git" + }, "os": [ "linux" ], diff --git a/npm/kubernetes-mcp-server-windows-amd64/package.json b/npm/kubernetes-mcp-server-windows-amd64/package.json index 04b5d8ef..306e5047 100644 --- a/npm/kubernetes-mcp-server-windows-amd64/package.json +++ b/npm/kubernetes-mcp-server-windows-amd64/package.json @@ -2,6 +2,10 @@ "name": "kubernetes-mcp-server-windows-amd64", "version": "0.0.0", "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "repository": { + "type": "git", + "url": "git+https://github.com/containers/kubernetes-mcp-server.git" + }, "os": [ "win32" ], diff --git a/npm/kubernetes-mcp-server-windows-arm64/package.json b/npm/kubernetes-mcp-server-windows-arm64/package.json index 38aa06f7..c30c4a30 100644 --- a/npm/kubernetes-mcp-server-windows-arm64/package.json +++ b/npm/kubernetes-mcp-server-windows-arm64/package.json @@ -2,6 +2,10 @@ "name": "kubernetes-mcp-server-windows-arm64", "version": "0.0.0", "description": "Model Context Protocol (MCP) server for Kubernetes and OpenShift", + "repository": { + "type": "git", + "url": "git+https://github.com/containers/kubernetes-mcp-server.git" + }, "os": [ "win32" ], diff --git a/pkg/config/config.go b/pkg/config/config.go index 5fe8e165..5bd00ff3 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -19,7 +19,7 @@ const ( type StaticConfig struct { DeniedResources []GroupVersionKind `toml:"denied_resources"` - LogLevel int `toml:"log_level,omitempty"` + LogLevel int `toml:"log_level,omitzero"` Port string `toml:"port,omitempty"` SSEBaseURL string `toml:"sse_base_url,omitempty"` KubeConfig string `toml:"kubeconfig,omitempty"` @@ -70,13 +70,6 @@ type StaticConfig struct { parsedClusterProviderConfigs map[string]ProviderConfig } -func Default() *StaticConfig { - return &StaticConfig{ - ListOutput: "table", - Toolsets: []string{"core", "config", "helm"}, - } -} - type GroupVersionKind struct { Group string `toml:"group"` Version string `toml:"version"` diff --git a/pkg/config/config_default.go b/pkg/config/config_default.go new file mode 100644 index 00000000..febea70c --- /dev/null +++ b/pkg/config/config_default.go @@ -0,0 +1,43 @@ +package config + +import ( + "bytes" + + "github.com/BurntSushi/toml" +) + +func Default() *StaticConfig { + defaultConfig := StaticConfig{ + ListOutput: "table", + Toolsets: []string{"core", "config", "helm"}, + } + overrides := defaultOverrides() + mergedConfig := mergeConfig(defaultConfig, overrides) + return &mergedConfig +} + +// HasDefaultOverrides indicates whether the internal defaultOverrides function +// provides any overrides or an empty StaticConfig. +func HasDefaultOverrides() bool { + overrides := defaultOverrides() + var buf bytes.Buffer + if err := toml.NewEncoder(&buf).Encode(overrides); err != nil { + // If marshaling fails, assume no overrides + return false + } + return len(bytes.TrimSpace(buf.Bytes())) > 0 +} + +// mergeConfig applies non-zero values from override to base using TOML serialization +// and returns the merged StaticConfig. +// In case of any error during marshalling or unmarshalling, it returns the base config unchanged. +func mergeConfig(base, override StaticConfig) StaticConfig { + var overrideBuffer bytes.Buffer + if err := toml.NewEncoder(&overrideBuffer).Encode(override); err != nil { + // If marshaling fails, return base unchanged + return base + } + + _, _ = toml.NewDecoder(&overrideBuffer).Decode(&base) + return base +} diff --git a/pkg/config/config_default_overrides.go b/pkg/config/config_default_overrides.go new file mode 100644 index 00000000..70d065bc --- /dev/null +++ b/pkg/config/config_default_overrides.go @@ -0,0 +1,8 @@ +package config + +func defaultOverrides() StaticConfig { + return StaticConfig{ + // IMPORTANT: this file is used to override default config values in downstream builds. + // This is intentionally left blank. + } +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index d0e87726..afdde191 100644 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -174,6 +174,49 @@ func (s *ConfigSuite) TestReadConfigValidPreservesDefaultsForMissingFields() { }) } +func (s *ConfigSuite) TestMergeConfig() { + base := StaticConfig{ + ListOutput: "table", + Toolsets: []string{"core", "config", "helm"}, + Port: "8080", + } + s.Run("merges override values on top of base", func() { + override := StaticConfig{ + ListOutput: "json", + Port: "9090", + } + + result := mergeConfig(base, override) + + s.Equal("json", result.ListOutput, "ListOutput should be overridden") + s.Equal("9090", result.Port, "Port should be overridden") + }) + + s.Run("preserves base values when override is empty", func() { + override := StaticConfig{} + + result := mergeConfig(base, override) + + s.Equal("table", result.ListOutput, "ListOutput should be preserved from base") + s.Equal([]string{"core", "config", "helm"}, result.Toolsets, "Toolsets should be preserved from base") + s.Equal("8080", result.Port, "Port should be preserved from base") + }) + + s.Run("handles partial overrides", func() { + override := StaticConfig{ + Toolsets: []string{"custom"}, + ReadOnly: true, + } + + result := mergeConfig(base, override) + + s.Equal("table", result.ListOutput, "ListOutput should be preserved from base") + s.Equal([]string{"custom"}, result.Toolsets, "Toolsets should be overridden") + s.Equal("8080", result.Port, "Port should be preserved from base since override doesn't specify it") + s.True(result.ReadOnly, "ReadOnly should be overridden to true") + }) +} + func TestConfig(t *testing.T) { suite.Run(t, new(ConfigSuite)) } diff --git a/pkg/http/authorization.go b/pkg/http/authorization.go index cded7f3e..19f61709 100644 --- a/pkg/http/authorization.go +++ b/pkg/http/authorization.go @@ -108,7 +108,7 @@ func write401(w http.ResponseWriter, wwwAuthenticateHeader, errorType, message s // - If ValidateToken is set, the exchanged token is then used against the Kubernetes API Server for TokenReview. // // see TestAuthorizationOidcTokenExchange -func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier) func(http.Handler) http.Handler { +func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, verifier KubernetesApiTokenVerifier, httpClient *http.Client) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.URL.Path == healthEndpoint || slices.Contains(WellKnownEndpoints, r.URL.EscapedPath()) { @@ -159,7 +159,11 @@ func AuthorizationMiddleware(staticConfig *config.StaticConfig, oidcProvider *oi if err == nil && sts.IsEnabled() { var exchangedToken *oauth2.Token // If the token is valid, we can exchange it for a new token with the specified audience and scopes. - exchangedToken, err = sts.ExternalAccountTokenExchange(r.Context(), &oauth2.Token{ + ctx := r.Context() + if httpClient != nil { + ctx = context.WithValue(ctx, oauth2.HTTPClient, httpClient) + } + exchangedToken, err = sts.ExternalAccountTokenExchange(ctx, &oauth2.Token{ AccessToken: claims.Token, TokenType: "Bearer", }) diff --git a/pkg/http/http.go b/pkg/http/http.go index 3f74c09f..8001462c 100644 --- a/pkg/http/http.go +++ b/pkg/http/http.go @@ -24,11 +24,11 @@ const ( sseMessageEndpoint = "/message" ) -func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider) error { +func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.StaticConfig, oidcProvider *oidc.Provider, httpClient *http.Client) error { mux := http.NewServeMux() wrappedMux := RequestMiddleware( - AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer)(mux), + AuthorizationMiddleware(staticConfig, oidcProvider, mcpServer, httpClient)(mux), ) httpServer := &http.Server{ @@ -44,7 +44,7 @@ func Serve(ctx context.Context, mcpServer *mcp.Server, staticConfig *config.Stat mux.HandleFunc(healthEndpoint, func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) }) - mux.Handle("/.well-known/", WellKnownHandler(staticConfig)) + mux.Handle("/.well-known/", WellKnownHandler(staticConfig, httpClient)) ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/pkg/http/http_test.go b/pkg/http/http_test.go index 36e7f883..ab531813 100644 --- a/pkg/http/http_test.go +++ b/pkg/http/http_test.go @@ -89,7 +89,7 @@ func (c *httpContext) beforeEach(t *testing.T) { timeoutCtx, c.timeoutCancel = context.WithTimeout(t.Context(), 10*time.Second) group, gc := errgroup.WithContext(timeoutCtx) cancelCtx, c.StopServer = context.WithCancel(gc) - group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider) }) + group.Go(func() error { return Serve(cancelCtx, mcpServer, c.StaticConfig, c.OidcProvider, nil) }) c.WaitForShutdown = group.Wait // Wait for HTTP server to start (using net) for i := 0; i < 10; i++ { @@ -390,6 +390,122 @@ func TestWellKnownReverseProxy(t *testing.T) { }) } +func TestWellKnownHeaderPropagation(t *testing.T) { + cases := []string{ + ".well-known/oauth-authorization-server", + ".well-known/oauth-protected-resource", + ".well-known/openid-configuration", + } + var receivedRequestHeaders http.Header + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.EscapedPath(), "/.well-known/") { + http.NotFound(w, r) + return + } + // Capture headers received from the proxy + receivedRequestHeaders = r.Header.Clone() + // Set response headers that should be propagated back + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Access-Control-Allow-Origin", "https://example.com") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("X-Custom-Backend-Header", "backend-value") + _, _ = w.Write([]byte(`{"issuer": "https://example.com"}`)) + })) + t.Cleanup(testServer.Close) + staticConfig := &config.StaticConfig{ + AuthorizationURL: testServer.URL, + RequireOAuth: true, + ValidateToken: true, + ClusterProviderStrategy: config.ClusterProviderKubeConfig, + } + testCaseWithContext(t, &httpContext{StaticConfig: staticConfig}, func(ctx *httpContext) { + for _, path := range cases { + receivedRequestHeaders = nil + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/%s", ctx.HttpAddress, path), nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + // Add various headers to test propagation + req.Header.Set("Origin", "https://example.com") + req.Header.Set("User-Agent", "Test-Agent/1.0") + req.Header.Set("Accept", "application/json") + req.Header.Set("Accept-Language", "en-US") + req.Header.Set("X-Custom-Header", "custom-value") + req.Header.Set("Referer", "https://example.com/page") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + t.Fatalf("Failed to get %s endpoint: %v", path, err) + } + t.Cleanup(func() { _ = resp.Body.Close() }) + + t.Run("Well-known proxy propagates Origin header to backend for "+path, func(t *testing.T) { + if receivedRequestHeaders == nil { + t.Fatal("Backend did not receive any headers") + } + if receivedRequestHeaders.Get("Origin") != "https://example.com" { + t.Errorf("Expected Origin header 'https://example.com', got '%s'", receivedRequestHeaders.Get("Origin")) + } + }) + + t.Run("Well-known proxy propagates User-Agent header to backend for "+path, func(t *testing.T) { + if receivedRequestHeaders.Get("User-Agent") != "Test-Agent/1.0" { + t.Errorf("Expected User-Agent header 'Test-Agent/1.0', got '%s'", receivedRequestHeaders.Get("User-Agent")) + } + }) + + t.Run("Well-known proxy propagates Accept header to backend for "+path, func(t *testing.T) { + if receivedRequestHeaders.Get("Accept") != "application/json" { + t.Errorf("Expected Accept header 'application/json', got '%s'", receivedRequestHeaders.Get("Accept")) + } + }) + + t.Run("Well-known proxy propagates Accept-Language header to backend for "+path, func(t *testing.T) { + if receivedRequestHeaders.Get("Accept-Language") != "en-US" { + t.Errorf("Expected Accept-Language header 'en-US', got '%s'", receivedRequestHeaders.Get("Accept-Language")) + } + }) + + t.Run("Well-known proxy propagates custom headers to backend for "+path, func(t *testing.T) { + if receivedRequestHeaders.Get("X-Custom-Header") != "custom-value" { + t.Errorf("Expected X-Custom-Header 'custom-value', got '%s'", receivedRequestHeaders.Get("X-Custom-Header")) + } + }) + + t.Run("Well-known proxy propagates Referer header to backend for "+path, func(t *testing.T) { + if receivedRequestHeaders.Get("Referer") != "https://example.com/page" { + t.Errorf("Expected Referer header 'https://example.com/page', got '%s'", receivedRequestHeaders.Get("Referer")) + } + }) + + t.Run("Well-known proxy returns Access-Control-Allow-Origin from backend for "+path, func(t *testing.T) { + if resp.Header.Get("Access-Control-Allow-Origin") != "https://example.com" { + t.Errorf("Expected Access-Control-Allow-Origin header 'https://example.com', got '%s'", resp.Header.Get("Access-Control-Allow-Origin")) + } + }) + + t.Run("Well-known proxy returns Access-Control-Allow-Methods from backend for "+path, func(t *testing.T) { + if resp.Header.Get("Access-Control-Allow-Methods") != "GET, POST, OPTIONS" { + t.Errorf("Expected Access-Control-Allow-Methods header 'GET, POST, OPTIONS', got '%s'", resp.Header.Get("Access-Control-Allow-Methods")) + } + }) + + t.Run("Well-known proxy returns Cache-Control from backend for "+path, func(t *testing.T) { + if resp.Header.Get("Cache-Control") != "no-cache" { + t.Errorf("Expected Cache-Control header 'no-cache', got '%s'", resp.Header.Get("Cache-Control")) + } + }) + + t.Run("Well-known proxy returns custom response headers from backend for "+path, func(t *testing.T) { + if resp.Header.Get("X-Custom-Backend-Header") != "backend-value" { + t.Errorf("Expected X-Custom-Backend-Header 'backend-value', got '%s'", resp.Header.Get("X-Custom-Backend-Header")) + } + }) + } + }) +} + func TestWellKnownOverrides(t *testing.T) { cases := []string{ ".well-known/oauth-authorization-server", diff --git a/pkg/http/wellknown.go b/pkg/http/wellknown.go index 0d80221e..01ff3092 100644 --- a/pkg/http/wellknown.go +++ b/pkg/http/wellknown.go @@ -25,19 +25,24 @@ type WellKnown struct { authorizationUrl string scopesSupported []string disableDynamicClientRegistration bool + httpClient *http.Client } var _ http.Handler = &WellKnown{} -func WellKnownHandler(staticConfig *config.StaticConfig) http.Handler { +func WellKnownHandler(staticConfig *config.StaticConfig, httpClient *http.Client) http.Handler { authorizationUrl := staticConfig.AuthorizationURL - if authorizationUrl != "" && strings.HasSuffix("authorizationUrl", "/") { + if authorizationUrl != "" && strings.HasSuffix(authorizationUrl, "/") { authorizationUrl = strings.TrimSuffix(authorizationUrl, "/") } + if httpClient == nil { + httpClient = http.DefaultClient + } return &WellKnown{ authorizationUrl: authorizationUrl, disableDynamicClientRegistration: staticConfig.DisableDynamicClientRegistration, scopesSupported: staticConfig.OAuthScopes, + httpClient: httpClient, } } @@ -51,7 +56,12 @@ func (w WellKnown) ServeHTTP(writer http.ResponseWriter, request *http.Request) http.Error(writer, "Failed to create request: "+err.Error(), http.StatusInternalServerError) return } - resp, err := http.DefaultClient.Do(req.WithContext(request.Context())) + for key, values := range request.Header { + for _, value := range values { + req.Header.Add(key, value) + } + } + resp, err := w.httpClient.Do(req.WithContext(request.Context())) if err != nil { http.Error(writer, "Failed to perform request: "+err.Error(), http.StatusInternalServerError) return diff --git a/pkg/kubernetes-mcp-server/cmd/root.go b/pkg/kubernetes-mcp-server/cmd/root.go index 1e91d0c4..db1782ab 100644 --- a/pkg/kubernetes-mcp-server/cmd/root.go +++ b/pkg/kubernetes-mcp-server/cmd/root.go @@ -301,10 +301,11 @@ func (m *MCPServerOptions) Run() error { } var oidcProvider *oidc.Provider + var httpClient *http.Client if m.StaticConfig.AuthorizationURL != "" { ctx := context.Background() if m.StaticConfig.CertificateAuthority != "" { - httpClient := &http.Client{} + httpClient = &http.Client{} caCert, err := os.ReadFile(m.StaticConfig.CertificateAuthority) if err != nil { return fmt.Errorf("failed to read CA certificate from %s: %w", m.StaticConfig.CertificateAuthority, err) @@ -341,7 +342,7 @@ func (m *MCPServerOptions) Run() error { if m.StaticConfig.Port != "" { ctx := context.Background() - return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider) + return internalhttp.Serve(ctx, mcpServer, m.StaticConfig, oidcProvider, httpClient) } if err := mcpServer.ServeStdio(); err != nil && !errors.Is(err, context.Canceled) { diff --git a/pkg/kubernetes/accesscontrol_clientset.go b/pkg/kubernetes/accesscontrol_clientset.go index ed875c64..0ce64c49 100644 --- a/pkg/kubernetes/accesscontrol_clientset.go +++ b/pkg/kubernetes/accesscontrol_clientset.go @@ -39,6 +39,22 @@ func (a *AccessControlClientset) DiscoveryClient() discovery.DiscoveryInterface return a.discoveryClient } +func (a *AccessControlClientset) NodesLogs(ctx context.Context, name, logPath string) (*rest.Request, error) { + gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"} + if !isAllowed(a.staticConfig, gvk) { + return nil, isNotAllowedError(gvk) + } + + if _, err := a.delegate.CoreV1().Nodes().Get(ctx, name, metav1.GetOptions{}); err != nil { + return nil, fmt.Errorf("failed to get node %s: %w", name, err) + } + + url := []string{"api", "v1", "nodes", name, "proxy", "logs", logPath} + return a.delegate.CoreV1().RESTClient(). + Get(). + AbsPath(url...), nil +} + func (a *AccessControlClientset) Pods(namespace string) (corev1.PodInterface, error) { gvk := &schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} if !isAllowed(a.staticConfig, gvk) { diff --git a/pkg/kubernetes/configuration.go b/pkg/kubernetes/configuration.go index 7b658acb..71fd2dd2 100644 --- a/pkg/kubernetes/configuration.go +++ b/pkg/kubernetes/configuration.go @@ -38,9 +38,6 @@ func (k *Kubernetes) NamespaceOrDefault(namespace string) string { // ConfigurationContextsDefault returns the current context name // TODO: Should be moved to the Provider level ? func (k *Kubernetes) ConfigurationContextsDefault() (string, error) { - if k.manager.inCluster { - return inClusterKubeConfigDefaultContext, nil - } cfg, err := k.manager.clientCmdConfig.RawConfig() if err != nil { return "", err @@ -51,9 +48,6 @@ func (k *Kubernetes) ConfigurationContextsDefault() (string, error) { // ConfigurationContextsList returns the list of available context names // TODO: Should be moved to the Provider level ? func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) { - if k.manager.inCluster { - return map[string]string{inClusterKubeConfigDefaultContext: ""}, nil - } cfg, err := k.manager.clientCmdConfig.RawConfig() if err != nil { return nil, err @@ -77,21 +71,7 @@ func (k *Kubernetes) ConfigurationContextsList() (map[string]string, error) { func (k *Kubernetes) ConfigurationView(minify bool) (runtime.Object, error) { var cfg clientcmdapi.Config var err error - if k.manager.inCluster { - cfg = *clientcmdapi.NewConfig() - cfg.Clusters["cluster"] = &clientcmdapi.Cluster{ - Server: k.manager.cfg.Host, - InsecureSkipTLSVerify: k.manager.cfg.Insecure, - } - cfg.AuthInfos["user"] = &clientcmdapi.AuthInfo{ - Token: k.manager.cfg.BearerToken, - } - cfg.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{ - Cluster: "cluster", - AuthInfo: "user", - } - cfg.CurrentContext = inClusterKubeConfigDefaultContext - } else if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil { + if cfg, err = k.manager.clientCmdConfig.RawConfig(); err != nil { return nil, err } if minify { diff --git a/pkg/kubernetes/kubernetes_derived_test.go b/pkg/kubernetes/kubernetes_derived_test.go index 5ad64db1..69d4ef33 100644 --- a/pkg/kubernetes/kubernetes_derived_test.go +++ b/pkg/kubernetes/kubernetes_derived_test.go @@ -47,7 +47,7 @@ users: kubeconfig = "` + strings.ReplaceAll(kubeconfigPath, `\`, `\\`) + `" `))) s.Run("without authorization header returns original manager", func() { - testManager, err := NewManager(testStaticConfig, "") + testManager, err := NewKubeconfigManager(testStaticConfig, "") s.Require().NoErrorf(err, "failed to create test manager: %v", err) s.T().Cleanup(testManager.Close) @@ -58,7 +58,7 @@ users: }) s.Run("with invalid authorization header returns original manager", func() { - testManager, err := NewManager(testStaticConfig, "") + testManager, err := NewKubeconfigManager(testStaticConfig, "") s.Require().NoErrorf(err, "failed to create test manager: %v", err) s.T().Cleanup(testManager.Close) @@ -70,7 +70,7 @@ users: }) s.Run("with valid bearer token creates derived manager with correct configuration", func() { - testManager, err := NewManager(testStaticConfig, "") + testManager, err := NewKubeconfigManager(testStaticConfig, "") s.Require().NoErrorf(err, "failed to create test manager: %v", err) s.T().Cleanup(testManager.Close) @@ -138,7 +138,7 @@ users: `))) s.Run("with no authorization header returns oauth token required error", func() { - testManager, err := NewManager(testStaticConfig, "") + testManager, err := NewKubeconfigManager(testStaticConfig, "") s.Require().NoErrorf(err, "failed to create test manager: %v", err) s.T().Cleanup(testManager.Close) @@ -149,7 +149,7 @@ users: }) s.Run("with invalid authorization header returns oauth token required error", func() { - testManager, err := NewManager(testStaticConfig, "") + testManager, err := NewKubeconfigManager(testStaticConfig, "") s.Require().NoErrorf(err, "failed to create test manager: %v", err) s.T().Cleanup(testManager.Close) @@ -161,7 +161,7 @@ users: }) s.Run("with valid bearer token creates derived manager", func() { - testManager, err := NewManager(testStaticConfig, "") + testManager, err := NewKubeconfigManager(testStaticConfig, "") s.Require().NoErrorf(err, "failed to create test manager: %v", err) s.T().Cleanup(testManager.Close) diff --git a/pkg/kubernetes/manager.go b/pkg/kubernetes/manager.go index 9a283a58..d09b8790 100644 --- a/pkg/kubernetes/manager.go +++ b/pkg/kubernetes/manager.go @@ -25,7 +25,6 @@ import ( type Manager struct { cfg *rest.Config clientCmdConfig clientcmd.ClientConfig - inCluster bool discoveryClient discovery.CachedDiscoveryInterface accessControlClientSet *AccessControlClientset accessControlRESTMapper *AccessControlRESTMapper @@ -38,33 +37,77 @@ type Manager struct { var _ helm.Kubernetes = (*Manager)(nil) var _ Openshift = (*Manager)(nil) -func NewManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) { - k8s := &Manager{ - staticConfig: config, +var ( + ErrorKubeconfigInClusterNotAllowed = errors.New("kubeconfig manager cannot be used in in-cluster deployments") + ErrorInClusterNotInCluster = errors.New("in-cluster manager cannot be used outside of a cluster") +) + +func NewKubeconfigManager(config *config.StaticConfig, kubeconfigContext string) (*Manager, error) { + if IsInCluster(config) { + return nil, ErrorKubeconfigInClusterNotAllowed } + pathOptions := clientcmd.NewDefaultPathOptions() - if k8s.staticConfig.KubeConfig != "" { - pathOptions.LoadingRules.ExplicitPath = k8s.staticConfig.KubeConfig + if config.KubeConfig != "" { + pathOptions.LoadingRules.ExplicitPath = config.KubeConfig } - k8s.clientCmdConfig = clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientCmdConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( pathOptions.LoadingRules, &clientcmd.ConfigOverrides{ ClusterInfo: clientcmdapi.Cluster{Server: ""}, CurrentContext: kubeconfigContext, }) - var err error - if IsInCluster(k8s.staticConfig) { - k8s.cfg, err = InClusterConfig() - k8s.inCluster = true - } else { - k8s.cfg, err = k8s.clientCmdConfig.ClientConfig() + + restConfig, err := clientCmdConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("failed to create kubernetes rest config from kubeconfig: %v", err) } - if err != nil || k8s.cfg == nil { - return nil, fmt.Errorf("failed to create kubernetes rest config: %v", err) + + return newManager(config, restConfig, clientCmdConfig) +} + +func NewInClusterManager(config *config.StaticConfig) (*Manager, error) { + if config.KubeConfig != "" { + return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster deployments: %v", config.KubeConfig, ErrorKubeconfigInClusterNotAllowed) + } + + if !IsInCluster(config) { + return nil, ErrorInClusterNotInCluster + } + + restConfig, err := InClusterConfig() + if err != nil { + return nil, fmt.Errorf("failed to create in-cluster kubernetes rest config: %v", err) + } + + // Create a dummy kubeconfig clientcmdapi.Config for in-cluster config to be used in places where clientcmd.ClientConfig is required + clientCmdConfig := clientcmdapi.NewConfig() + clientCmdConfig.Clusters["cluster"] = &clientcmdapi.Cluster{ + Server: restConfig.Host, + InsecureSkipTLSVerify: restConfig.Insecure, + } + clientCmdConfig.AuthInfos["user"] = &clientcmdapi.AuthInfo{ + Token: restConfig.BearerToken, + } + clientCmdConfig.Contexts[inClusterKubeConfigDefaultContext] = &clientcmdapi.Context{ + Cluster: "cluster", + AuthInfo: "user", + } + clientCmdConfig.CurrentContext = inClusterKubeConfigDefaultContext + + return newManager(config, restConfig, clientcmd.NewDefaultClientConfig(*clientCmdConfig, nil)) +} + +func newManager(config *config.StaticConfig, restConfig *rest.Config, clientCmdConfig clientcmd.ClientConfig) (*Manager, error) { + k8s := &Manager{ + staticConfig: config, + cfg: restConfig, + clientCmdConfig: clientCmdConfig, } if k8s.cfg.UserAgent == "" { k8s.cfg.UserAgent = rest.DefaultKubernetesUserAgent() } + var err error // TODO: Won't work because not all client-go clients use the shared context (e.g. discovery client uses context.TODO()) //k8s.cfg.Wrap(func(original http.RoundTripper) http.RoundTripper { // return &impersonateRoundTripper{original} @@ -229,7 +272,6 @@ func (m *Manager) Derived(ctx context.Context) (*Kubernetes, error) { derived := &Kubernetes{ manager: &Manager{ clientCmdConfig: clientcmd.NewDefaultClientConfig(clientCmdApiConfig, nil), - inCluster: m.inCluster, cfg: derivedCfg, staticConfig: m.staticConfig, }, diff --git a/pkg/kubernetes/manager_test.go b/pkg/kubernetes/manager_test.go index 696e4f50..63241fa9 100644 --- a/pkg/kubernetes/manager_test.go +++ b/pkg/kubernetes/manager_test.go @@ -34,126 +34,165 @@ func (s *ManagerTestSuite) TearDownTest() { } } -func (s *ManagerTestSuite) TestNewManagerInCluster() { - InClusterConfig = func() (*rest.Config, error) { - return &rest.Config{}, nil - } - s.Run("with default StaticConfig (empty kubeconfig)", func() { - manager, err := NewManager(&config.StaticConfig{}, "") - s.Require().NoError(err) - s.Require().NotNil(manager) - s.Run("behaves as in cluster", func() { - s.True(manager.inCluster, "expected in cluster, got not in cluster") - }) - s.Run("sets default user-agent", func() { - s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")") - }) - }) - s.Run("with explicit kubeconfig", func() { - manager, err := NewManager(&config.StaticConfig{ - KubeConfig: s.mockServer.KubeconfigFile(s.T()), - }, "") - s.Require().NoError(err) - s.Require().NotNil(manager) - s.Run("behaves as NOT in cluster", func() { - s.False(manager.inCluster, "expected not in cluster, got in cluster") +func (s *ManagerTestSuite) TestNewInClusterManager() { + s.Run("In cluster", func() { + InClusterConfig = func() (*rest.Config, error) { + return &rest.Config{}, nil + } + s.Run("with default StaticConfig (empty kubeconfig)", func() { + manager, err := NewInClusterManager(&config.StaticConfig{}) + s.Require().NoError(err) + s.Require().NotNil(manager) + s.Run("behaves as in cluster", func() { + rawConfig, err := manager.clientCmdConfig.RawConfig() + s.Require().NoError(err) + s.Equal("in-cluster", rawConfig.CurrentContext, "expected current context to be 'in-cluster'") + }) + s.Run("sets default user-agent", func() { + s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")") + }) + }) + s.Run("with explicit kubeconfig", func() { + manager, err := NewInClusterManager(&config.StaticConfig{ + KubeConfig: s.mockServer.KubeconfigFile(s.T()), + }) + s.Run("returns error", func() { + s.Error(err) + s.Nil(manager) + s.Regexp("kubeconfig file .+ cannot be used with the in-cluster deployments", err.Error()) + }) }) }) -} - -func (s *ManagerTestSuite) TestNewManagerLocal() { - InClusterConfig = func() (*rest.Config, error) { - return nil, rest.ErrNotInCluster - } - s.Run("with valid kubeconfig in env", func() { - kubeconfig := s.mockServer.KubeconfigFile(s.T()) - s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig)) - manager, err := NewManager(&config.StaticConfig{}, "") - s.Require().NoError(err) - s.Require().NotNil(manager) - s.Run("behaves as NOT in cluster", func() { - s.False(manager.inCluster, "expected not in cluster, got in cluster") - }) - s.Run("loads correct config", func() { - s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match") - }) - s.Run("sets default user-agent", func() { - s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")") - }) - s.Run("rest config host points to mock server", func() { - s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") - }) - }) - s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() { - kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T()) - s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv)) - kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T()) - manager, err := NewManager(&config.StaticConfig{ - KubeConfig: kubeconfigExplicit, - }, "") - s.Require().NoError(err) - s.Require().NotNil(manager) - s.Run("behaves as NOT in cluster", func() { - s.False(manager.inCluster, "expected not in cluster, got in cluster") - }) - s.Run("loads correct config (explicit)", func() { - s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env") - s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit") - }) - s.Run("rest config host points to mock server", func() { - s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") - }) - }) - s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() { - kubeconfig := s.mockServer.Kubeconfig() - kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext() - kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server" - kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster() - kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this - kubeconfig.CurrentContext = "not-the-mock-server" - kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig) - s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile)) - manager, err := NewManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves - s.Require().NoError(err) - s.Require().NotNil(manager) - s.Run("behaves as NOT in cluster", func() { - s.False(manager.inCluster, "expected not in cluster, got in cluster") - }) - s.Run("loads correct config", func() { - s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match") - }) - s.Run("rest config host points to mock server", func() { - s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") - }) - }) - s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() { - kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T()) - s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv)) - manager, err := NewManager(&config.StaticConfig{}, "i-do-not-exist") + s.Run("Out of cluster", func() { + InClusterConfig = func() (*rest.Config, error) { + return nil, rest.ErrNotInCluster + } + manager, err := NewInClusterManager(&config.StaticConfig{}) s.Run("returns error", func() { s.Error(err) s.Nil(manager) - s.ErrorContains(err, `failed to create kubernetes rest config: context "i-do-not-exist" does not exist`) + s.ErrorIs(err, ErrorInClusterNotInCluster) + s.ErrorContains(err, "in-cluster manager cannot be used outside of a cluster") }) }) - s.Run("with invalid path kubeconfig in env", func() { - s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist")) - manager, err := NewManager(&config.StaticConfig{}, "") - s.Run("returns error", func() { - s.Error(err) - s.Nil(manager) - s.ErrorContains(err, "failed to create kubernetes rest config") +} + +func (s *ManagerTestSuite) TestNewKubeconfigManager() { + s.Run("Out of cluster", func() { + InClusterConfig = func() (*rest.Config, error) { + return nil, rest.ErrNotInCluster + } + s.Run("with valid kubeconfig in env", func() { + kubeconfig := s.mockServer.KubeconfigFile(s.T()) + s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfig)) + manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") + s.Require().NoError(err) + s.Require().NotNil(manager) + s.Run("behaves as NOT in cluster", func() { + rawConfig, err := manager.clientCmdConfig.RawConfig() + s.Require().NoError(err) + s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'") + s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig") + }) + s.Run("loads correct config", func() { + s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfig, "expected kubeconfig path to match") + }) + s.Run("sets default user-agent", func() { + s.Contains(manager.cfg.UserAgent, "("+runtime.GOOS+"/"+runtime.GOARCH+")") + }) + s.Run("rest config host points to mock server", func() { + s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") + }) + }) + s.Run("with valid kubeconfig in env and explicit kubeconfig in config", func() { + kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T()) + s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv)) + kubeconfigExplicit := s.mockServer.KubeconfigFile(s.T()) + manager, err := NewKubeconfigManager(&config.StaticConfig{ + KubeConfig: kubeconfigExplicit, + }, "") + s.Require().NoError(err) + s.Require().NotNil(manager) + s.Run("behaves as NOT in cluster", func() { + rawConfig, err := manager.clientCmdConfig.RawConfig() + s.Require().NoError(err) + s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'") + s.Equal("fake-context", rawConfig.CurrentContext, "expected current context to be 'fake-context' as in kubeconfig") + }) + s.Run("loads correct config (explicit)", func() { + s.NotContains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigInEnv, "expected kubeconfig path to NOT match env") + s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigExplicit, "expected kubeconfig path to match explicit") + }) + s.Run("rest config host points to mock server", func() { + s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") + }) + }) + s.Run("with valid kubeconfig in env and explicit kubeconfig context (valid)", func() { + kubeconfig := s.mockServer.Kubeconfig() + kubeconfig.Contexts["not-the-mock-server"] = clientcmdapi.NewContext() + kubeconfig.Contexts["not-the-mock-server"].Cluster = "not-the-mock-server" + kubeconfig.Clusters["not-the-mock-server"] = clientcmdapi.NewCluster() + kubeconfig.Clusters["not-the-mock-server"].Server = "https://not-the-mock-server:6443" // REST configuration should point to mock server, not this + kubeconfig.CurrentContext = "not-the-mock-server" + kubeconfigFile := test.KubeconfigFile(s.T(), kubeconfig) + s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigFile)) + manager, err := NewKubeconfigManager(&config.StaticConfig{}, "fake-context") // fake-context is the one mock-server serves + s.Require().NoError(err) + s.Require().NotNil(manager) + s.Run("behaves as NOT in cluster", func() { + rawConfig, err := manager.clientCmdConfig.RawConfig() + s.Require().NoError(err) + s.NotEqual("in-cluster", rawConfig.CurrentContext, "expected current context to NOT be 'in-cluster'") + s.Equal("not-the-mock-server", rawConfig.CurrentContext, "expected current context to be 'not-the-mock-server' as in explicit context") + }) + s.Run("loads correct config", func() { + s.Contains(manager.clientCmdConfig.ConfigAccess().GetLoadingPrecedence(), kubeconfigFile, "expected kubeconfig path to match") + }) + s.Run("rest config host points to mock server", func() { + s.Equal(s.mockServer.Config().Host, manager.cfg.Host, "expected rest config host to match mock server") + }) + }) + s.Run("with valid kubeconfig in env and explicit kubeconfig context (invalid)", func() { + kubeconfigInEnv := s.mockServer.KubeconfigFile(s.T()) + s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigInEnv)) + manager, err := NewKubeconfigManager(&config.StaticConfig{}, "i-do-not-exist") + s.Run("returns error", func() { + s.Error(err) + s.Nil(manager) + s.ErrorContains(err, `failed to create kubernetes rest config from kubeconfig: context "i-do-not-exist" does not exist`) + }) + }) + s.Run("with invalid path kubeconfig in env", func() { + s.Require().NoError(os.Setenv("KUBECONFIG", "i-dont-exist")) + manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") + s.Run("returns error", func() { + s.Error(err) + s.Nil(manager) + s.ErrorContains(err, "failed to create kubernetes rest config") + }) + }) + s.Run("with empty kubeconfig in env", func() { + kubeconfigPath := filepath.Join(s.T().TempDir(), "config") + s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644)) + s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath)) + manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") + s.Run("returns error", func() { + s.Error(err) + s.Nil(manager) + s.ErrorContains(err, "no configuration has been provided") + }) }) }) - s.Run("with empty kubeconfig in env", func() { - kubeconfigPath := filepath.Join(s.T().TempDir(), "config") - s.Require().NoError(os.WriteFile(kubeconfigPath, []byte(""), 0644)) - s.Require().NoError(os.Setenv("KUBECONFIG", kubeconfigPath)) - manager, err := NewManager(&config.StaticConfig{}, "") + s.Run("In cluster", func() { + InClusterConfig = func() (*rest.Config, error) { + return &rest.Config{}, nil + } + manager, err := NewKubeconfigManager(&config.StaticConfig{}, "") s.Run("returns error", func() { s.Error(err) s.Nil(manager) - s.ErrorContains(err, "no configuration has been provided") + s.ErrorIs(err, ErrorKubeconfigInClusterNotAllowed) + s.ErrorContains(err, "kubeconfig manager cannot be used in in-cluster deployments") }) }) } diff --git a/pkg/kubernetes/nodes.go b/pkg/kubernetes/nodes.go new file mode 100644 index 00000000..76d9cc92 --- /dev/null +++ b/pkg/kubernetes/nodes.go @@ -0,0 +1,36 @@ +package kubernetes + +import ( + "context" + "fmt" +) + +func (k *Kubernetes) NodesLog(ctx context.Context, name string, logPath string, tail int64) (string, error) { + // Use the node proxy API to access logs from the kubelet + // Common log paths: + // - /var/log/kubelet.log - kubelet logs + // - /var/log/kube-proxy.log - kube-proxy logs + // - /var/log/containers/ - container logs + + req, err := k.AccessControlClientset().NodesLogs(ctx, name, logPath) + if err != nil { + return "", err + } + + // Query parameters for tail + if tail > 0 { + req.Param("tailLines", fmt.Sprintf("%d", tail)) + } + + result := req.Do(ctx) + if result.Error() != nil { + return "", fmt.Errorf("failed to get node logs: %w", result.Error()) + } + + rawData, err := result.Raw() + if err != nil { + return "", fmt.Errorf("failed to read node log response: %w", err) + } + + return string(rawData), nil +} diff --git a/pkg/kubernetes/provider.go b/pkg/kubernetes/provider.go index 26c8ff05..092c7de8 100644 --- a/pkg/kubernetes/provider.go +++ b/pkg/kubernetes/provider.go @@ -30,12 +30,7 @@ func NewProvider(cfg *config.StaticConfig) (Provider, error) { return nil, err } - m, err := NewManager(cfg, "") - if err != nil { - return nil, err - } - - return factory(m, cfg) + return factory(cfg) } func resolveStrategy(cfg *config.StaticConfig) string { diff --git a/pkg/kubernetes/provider_kubeconfig.go b/pkg/kubernetes/provider_kubeconfig.go index 21b64136..9ab055c8 100644 --- a/pkg/kubernetes/provider_kubeconfig.go +++ b/pkg/kubernetes/provider_kubeconfig.go @@ -2,6 +2,7 @@ package kubernetes import ( "context" + "errors" "fmt" "github.com/containers/kubernetes-mcp-server/pkg/config" @@ -27,11 +28,16 @@ func init() { } // newKubeConfigClusterProvider creates a provider that manages multiple clusters -// via kubeconfig contexts. Returns an error if the manager is in-cluster mode. -func newKubeConfigClusterProvider(m *Manager, cfg *config.StaticConfig) (Provider, error) { - // Handle in-cluster mode - if IsInCluster(cfg) { - return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments") +// via kubeconfig contexts. +// Internally, it leverages a KubeconfigManager for each context, initializing them +// lazily when requested. +func newKubeConfigClusterProvider(cfg *config.StaticConfig) (Provider, error) { + m, err := NewKubeconfigManager(cfg, "") + if err != nil { + if errors.Is(err, ErrorKubeconfigInClusterNotAllowed) { + return nil, fmt.Errorf("kubeconfig ClusterProviderStrategy is invalid for in-cluster deployments: %v", err) + } + return nil, err } rawConfig, err := m.clientCmdConfig.RawConfig() @@ -65,7 +71,7 @@ func (p *kubeConfigClusterProvider) managerForContext(context string) (*Manager, baseManager := p.managers[p.defaultContext] - m, err := NewManager(baseManager.staticConfig, context) + m, err := NewKubeconfigManager(baseManager.staticConfig, context) if err != nil { return nil, err } diff --git a/pkg/kubernetes/provider_registry.go b/pkg/kubernetes/provider_registry.go index 9af5a9ee..b9077f15 100644 --- a/pkg/kubernetes/provider_registry.go +++ b/pkg/kubernetes/provider_registry.go @@ -10,7 +10,7 @@ import ( // ProviderFactory creates a new Provider instance for a given strategy. // Implementations should validate that the Manager is compatible with their strategy // (e.g., kubeconfig provider should reject in-cluster managers). -type ProviderFactory func(m *Manager, cfg *config.StaticConfig) (Provider, error) +type ProviderFactory func(cfg *config.StaticConfig) (Provider, error) var providerFactories = make(map[string]ProviderFactory) diff --git a/pkg/kubernetes/provider_registry_test.go b/pkg/kubernetes/provider_registry_test.go index 876e2bab..c94e1ec1 100644 --- a/pkg/kubernetes/provider_registry_test.go +++ b/pkg/kubernetes/provider_registry_test.go @@ -13,18 +13,18 @@ type ProviderRegistryTestSuite struct { func (s *ProviderRegistryTestSuite) TestRegisterProvider() { s.Run("With no pre-existing provider, registers the provider", func() { - RegisterProvider("test-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) { + RegisterProvider("test-strategy", func(cfg *config.StaticConfig) (Provider, error) { return nil, nil }) _, exists := providerFactories["test-strategy"] s.True(exists, "Provider should be registered") }) s.Run("With pre-existing provider, panics", func() { - RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) { + RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) { return nil, nil }) s.Panics(func() { - RegisterProvider("test-pre-existent", func(m *Manager, cfg *config.StaticConfig) (Provider, error) { + RegisterProvider("test-pre-existent", func(cfg *config.StaticConfig) (Provider, error) { return nil, nil }) }, "Registering a provider with an existing strategy should panic") @@ -39,10 +39,10 @@ func (s *ProviderRegistryTestSuite) TestGetRegisteredStrategies() { }) s.Run("With multiple registered providers, returns sorted list", func() { providerFactories = make(map[string]ProviderFactory) - RegisterProvider("foo-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) { + RegisterProvider("foo-strategy", func(cfg *config.StaticConfig) (Provider, error) { return nil, nil }) - RegisterProvider("bar-strategy", func(m *Manager, cfg *config.StaticConfig) (Provider, error) { + RegisterProvider("bar-strategy", func(cfg *config.StaticConfig) (Provider, error) { return nil, nil }) strategies := GetRegisteredStrategies() diff --git a/pkg/kubernetes/provider_single.go b/pkg/kubernetes/provider_single.go index a3de8b4f..3693d639 100644 --- a/pkg/kubernetes/provider_single.go +++ b/pkg/kubernetes/provider_single.go @@ -2,6 +2,7 @@ package kubernetes import ( "context" + "errors" "fmt" "github.com/containers/kubernetes-mcp-server/pkg/config" @@ -24,14 +25,26 @@ func init() { } // newSingleClusterProvider creates a provider that manages a single cluster. -// Validates that the manager is in-cluster when the in-cluster strategy is used. +// When used within a cluster or with an 'in-cluster' strategy, it uses an InClusterManager. +// Otherwise, it uses a KubeconfigManager. func newSingleClusterProvider(strategy string) ProviderFactory { - return func(m *Manager, cfg *config.StaticConfig) (Provider, error) { + return func(cfg *config.StaticConfig) (Provider, error) { if cfg != nil && cfg.KubeConfig != "" && strategy == config.ClusterProviderInCluster { return nil, fmt.Errorf("kubeconfig file %s cannot be used with the in-cluster ClusterProviderStrategy", cfg.KubeConfig) } - if strategy == config.ClusterProviderInCluster && !IsInCluster(cfg) { - return nil, fmt.Errorf("server must be deployed in cluster for the in-cluster ClusterProviderStrategy") + + var m *Manager + var err error + if strategy == config.ClusterProviderInCluster || IsInCluster(cfg) { + m, err = NewInClusterManager(cfg) + } else { + m, err = NewKubeconfigManager(cfg, "") + } + if err != nil { + if errors.Is(err, ErrorInClusterNotInCluster) { + return nil, fmt.Errorf("server must be deployed in cluster for the %s ClusterProviderStrategy: %v", strategy, err) + } + return nil, err } return &singleClusterProvider{ diff --git a/pkg/kubernetes/provider_test.go b/pkg/kubernetes/provider_test.go index b178cb34..32ea5668 100644 --- a/pkg/kubernetes/provider_test.go +++ b/pkg/kubernetes/provider_test.go @@ -126,6 +126,15 @@ func (s *ProviderTestSuite) TestNewProviderLocal() { s.NotNil(provider, "Expected provider instance") s.IsType(&kubeConfigClusterProvider{}, provider, "Expected kubeConfigClusterProvider type") }) + s.Run("With cluster_provider_strategy=disabled, returns single-cluster provider", func() { + cfg := test.Must(config.ReadToml([]byte(` + cluster_provider_strategy = "disabled" + `))) + provider, err := NewProvider(cfg) + s.Require().NoError(err, "Expected no error for disabled strategy") + s.NotNil(provider, "Expected provider instance") + s.IsType(&singleClusterProvider{}, provider, "Expected singleClusterProvider type") + }) s.Run("With cluster_provider_strategy=in-cluster, returns error", func() { cfg := test.Must(config.ReadToml([]byte(` cluster_provider_strategy = "in-cluster" @@ -145,7 +154,7 @@ func (s *ProviderTestSuite) TestNewProviderLocal() { s.Regexp("kubeconfig file .+ cannot be used with the in-cluster ClusterProviderStrategy", err.Error()) s.Nilf(provider, "Expected no provider instance, got %v", provider) }) - s.Run("With configured cluster_provider_strategy=non-existent, returns error", func() { + s.Run("With cluster_provider_strategy=non-existent, returns error", func() { cfg := test.Must(config.ReadToml([]byte(` cluster_provider_strategy = "i-do-not-exist" `))) diff --git a/pkg/mcp/events_test.go b/pkg/mcp/events_test.go index 6d771bca..68ca85a8 100644 --- a/pkg/mcp/events_test.go +++ b/pkg/mcp/events_test.go @@ -126,6 +126,7 @@ func (s *EventsSuite) TestEventsListDenied() { s.InitMcpClient() s.Run("events_list (denied)", func() { toolResult, err := s.CallTool("events_list", map[string]interface{}{}) + s.Require().NotNil(toolResult, "toolResult should not be nil") s.Run("has error", func() { s.Truef(toolResult.IsError, "call tool should fail") s.Nilf(err, "call tool should not return error object") diff --git a/pkg/mcp/mcp_middleware_test.go b/pkg/mcp/mcp_middleware_test.go new file mode 100644 index 00000000..987bfe4f --- /dev/null +++ b/pkg/mcp/mcp_middleware_test.go @@ -0,0 +1,68 @@ +package mcp + +import ( + "regexp" + "strings" + "testing" + + "github.com/mark3labs/mcp-go/client/transport" +) + +func TestToolCallLogging(t *testing.T) { + testCaseWithContext(t, &mcpContext{logLevel: 5}, func(c *mcpContext) { + _, _ = c.callTool("configuration_view", map[string]interface{}{ + "minified": false, + }) + t.Run("Logs tool name", func(t *testing.T) { + expectedLog := "mcp tool call: configuration_view(" + if !strings.Contains(c.logBuffer.String(), expectedLog) { + t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) + } + }) + t.Run("Logs tool call arguments", func(t *testing.T) { + expected := `"mcp tool call: configuration_view\((.+)\)"` + m := regexp.MustCompile(expected).FindStringSubmatch(c.logBuffer.String()) + if len(m) != 2 { + t.Fatalf("Expected log entry to contain arguments, got %s", c.logBuffer.String()) + } + if m[1] != "map[minified:false]" { + t.Errorf("Expected log arguments to be 'map[minified:false]', got %s", m[1]) + } + }) + }) + before := func(c *mcpContext) { + c.clientOptions = append(c.clientOptions, transport.WithHeaders(map[string]string{ + "Accept-Encoding": "gzip", + "Authorization": "Bearer should-not-be-logged", + "authorization": "Bearer should-not-be-logged", + "a-loggable-header": "should-be-logged", + })) + } + testCaseWithContext(t, &mcpContext{logLevel: 7, before: before}, func(c *mcpContext) { + _, _ = c.callTool("configuration_view", map[string]interface{}{ + "minified": false, + }) + t.Run("Logs tool call headers", func(t *testing.T) { + expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged" + if !strings.Contains(c.logBuffer.String(), expectedLog) { + t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) + } + }) + sensitiveHeaders := []string{ + "Authorization:", + // TODO: Add more sensitive headers as needed + } + t.Run("Does not log sensitive headers", func(t *testing.T) { + for _, header := range sensitiveHeaders { + if strings.Contains(c.logBuffer.String(), header) { + t.Errorf("Log should not contain sensitive header '%s', got: %s", header, c.logBuffer.String()) + } + } + }) + t.Run("Does not log sensitive header values", func(t *testing.T) { + if strings.Contains(c.logBuffer.String(), "should-not-be-logged") { + t.Errorf("Log should not contain sensitive header value 'should-not-be-logged', got: %s", c.logBuffer.String()) + } + }) + }) +} diff --git a/pkg/mcp/mcp_tools_test.go b/pkg/mcp/mcp_tools_test.go index 196b93e2..f6b8a8be 100644 --- a/pkg/mcp/mcp_tools_test.go +++ b/pkg/mcp/mcp_tools_test.go @@ -1,180 +1,130 @@ package mcp import ( - "regexp" - "strings" "testing" - "github.com/mark3labs/mcp-go/client/transport" + "github.com/BurntSushi/toml" "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" "k8s.io/utils/ptr" - - "github.com/containers/kubernetes-mcp-server/internal/test" - "github.com/containers/kubernetes-mcp-server/pkg/config" ) -func TestUnrestricted(t *testing.T) { - testCase(t, func(c *mcpContext) { - tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) - t.Run("ListTools returns tools", func(t *testing.T) { - if err != nil { - t.Fatalf("call ListTools failed %v", err) - } - }) - t.Run("Destructive tools ARE NOT read only", func(t *testing.T) { - for _, tool := range tools.Tools { - readOnly := ptr.Deref(tool.Annotations.ReadOnlyHint, false) - destructive := ptr.Deref(tool.Annotations.DestructiveHint, false) - if readOnly && destructive { - t.Errorf("Tool %s is read-only and destructive, which is not allowed", tool.Name) - } - } - }) +// McpToolProcessingSuite tests MCP tool processing (isToolApplicable) +type McpToolProcessingSuite struct { + BaseMcpSuite +} + +func (s *McpToolProcessingSuite) TestUnrestricted() { + s.InitMcpClient() + + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NotNil(tools) + + s.Run("ListTools returns tools", func() { + s.NoError(err, "call ListTools failed") + s.NotNilf(tools, "list tools failed") + }) + + s.Run("Destructive tools ARE NOT read only", func() { + for _, tool := range tools.Tools { + readOnly := ptr.Deref(tool.Annotations.ReadOnlyHint, false) + destructive := ptr.Deref(tool.Annotations.DestructiveHint, false) + s.Falsef(readOnly && destructive, "Tool %s is read-only and destructive, which is not allowed", tool.Name) + } }) } -func TestReadOnly(t *testing.T) { - readOnlyServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{ReadOnly: true} } - testCaseWithContext(t, &mcpContext{before: readOnlyServer}, func(c *mcpContext) { - tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) - t.Run("ListTools returns tools", func(t *testing.T) { - if err != nil { - t.Fatalf("call ListTools failed %v", err) - } - }) - t.Run("ListTools returns only read-only tools", func(t *testing.T) { - for _, tool := range tools.Tools { - if tool.Annotations.ReadOnlyHint == nil || !*tool.Annotations.ReadOnlyHint { - t.Errorf("Tool %s is not read-only but should be", tool.Name) - } - if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint { - t.Errorf("Tool %s is destructive but should not be in read-only mode", tool.Name) - } - } - }) +func (s *McpToolProcessingSuite) TestReadOnly() { + s.Require().NoError(toml.Unmarshal([]byte(` + read_only = true + `), s.Cfg), "Expected to parse read only server config") + s.InitMcpClient() + + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NotNil(tools) + + s.Run("ListTools returns tools", func() { + s.NoError(err, "call ListTools failed") + s.NotNilf(tools, "list tools failed") + }) + + s.Run("ListTools returns only read-only tools", func() { + for _, tool := range tools.Tools { + s.Falsef(tool.Annotations.ReadOnlyHint == nil || !*tool.Annotations.ReadOnlyHint, + "Tool %s is not read-only but should be", tool.Name) + s.Falsef(tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint, + "Tool %s is destructive but should not be in read-only mode", tool.Name) + } }) } -func TestDisableDestructive(t *testing.T) { - disableDestructiveServer := func(c *mcpContext) { c.staticConfig = &config.StaticConfig{DisableDestructive: true} } - testCaseWithContext(t, &mcpContext{before: disableDestructiveServer}, func(c *mcpContext) { - tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) - t.Run("ListTools returns tools", func(t *testing.T) { - if err != nil { - t.Fatalf("call ListTools failed %v", err) - } - }) - t.Run("ListTools does not return destructive tools", func(t *testing.T) { - for _, tool := range tools.Tools { - if tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint { - t.Errorf("Tool %s is destructive but should not be", tool.Name) - } - } - }) +func (s *McpToolProcessingSuite) TestDisableDestructive() { + s.Require().NoError(toml.Unmarshal([]byte(` + disable_destructive = true + `), s.Cfg), "Expected to parse disable destructive server config") + s.InitMcpClient() + + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NotNil(tools) + + s.Run("ListTools returns tools", func() { + s.NoError(err, "call ListTools failed") + s.NotNilf(tools, "list tools failed") + }) + + s.Run("ListTools does not return destructive tools", func() { + for _, tool := range tools.Tools { + s.Falsef(tool.Annotations.DestructiveHint != nil && *tool.Annotations.DestructiveHint, + "Tool %s is destructive but should not be in disable_destructive mode", tool.Name) + } }) } -func TestEnabledTools(t *testing.T) { - enabledToolsServer := test.Must(config.ReadToml([]byte(` +func (s *McpToolProcessingSuite) TestEnabledTools() { + s.Require().NoError(toml.Unmarshal([]byte(` enabled_tools = [ "namespaces_list", "events_list" ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: enabledToolsServer}, func(c *mcpContext) { - tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) - t.Run("ListTools returns tools", func(t *testing.T) { - if err != nil { - t.Fatalf("call ListTools failed %v", err) - } - }) - t.Run("ListTools returns only explicitly enabled tools", func(t *testing.T) { - if len(tools.Tools) != 2 { - t.Fatalf("ListTools should return 2 tools, got %d", len(tools.Tools)) - } - for _, tool := range tools.Tools { - if tool.Name != "namespaces_list" && tool.Name != "events_list" { - t.Errorf("Tool %s is not enabled but should be", tool.Name) - } - } - }) + `), s.Cfg), "Expected to parse enabled tools server config") + s.InitMcpClient() + + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NotNil(tools) + + s.Run("ListTools returns tools", func() { + s.NoError(err, "call ListTools failed") + s.NotNilf(tools, "list tools failed") }) -} -func TestDisabledTools(t *testing.T) { - testCaseWithContext(t, &mcpContext{ - staticConfig: &config.StaticConfig{ - DisabledTools: []string{"namespaces_list", "events_list"}, - }, - }, func(c *mcpContext) { - tools, err := c.mcpClient.ListTools(c.ctx, mcp.ListToolsRequest{}) - t.Run("ListTools returns tools", func(t *testing.T) { - if err != nil { - t.Fatalf("call ListTools failed %v", err) - } - }) - t.Run("ListTools does not return disabled tools", func(t *testing.T) { - for _, tool := range tools.Tools { - if tool.Name == "namespaces_list" || tool.Name == "events_list" { - t.Errorf("Tool %s is not disabled but should be", tool.Name) - } - } - }) + s.Run("ListTools returns only explicitly enabled tools", func() { + s.Len(tools.Tools, 2, "ListTools should return exactly 2 tools") + for _, tool := range tools.Tools { + s.Falsef(tool.Name != "namespaces_list" && tool.Name != "events_list", + "Tool %s is not enabled but should be", tool.Name) + } }) } -func TestToolCallLogging(t *testing.T) { - testCaseWithContext(t, &mcpContext{logLevel: 5}, func(c *mcpContext) { - _, _ = c.callTool("configuration_view", map[string]interface{}{ - "minified": false, - }) - t.Run("Logs tool name", func(t *testing.T) { - expectedLog := "mcp tool call: configuration_view(" - if !strings.Contains(c.logBuffer.String(), expectedLog) { - t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) - } - }) - t.Run("Logs tool call arguments", func(t *testing.T) { - expected := `"mcp tool call: configuration_view\((.+)\)"` - m := regexp.MustCompile(expected).FindStringSubmatch(c.logBuffer.String()) - if len(m) != 2 { - t.Fatalf("Expected log entry to contain arguments, got %s", c.logBuffer.String()) - } - if m[1] != "map[minified:false]" { - t.Errorf("Expected log arguments to be 'map[minified:false]', got %s", m[1]) - } - }) +func (s *McpToolProcessingSuite) TestDisabledTools() { + s.Require().NoError(toml.Unmarshal([]byte(` + disabled_tools = [ "namespaces_list", "events_list" ] + `), s.Cfg), "Expected to parse disabled tools server config") + s.InitMcpClient() + + tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) + s.Require().NotNil(tools) + + s.Run("ListTools returns tools", func() { + s.NoError(err, "call ListTools failed") + s.NotNilf(tools, "list tools failed") }) - before := func(c *mcpContext) { - c.clientOptions = append(c.clientOptions, transport.WithHeaders(map[string]string{ - "Accept-Encoding": "gzip", - "Authorization": "Bearer should-not-be-logged", - "authorization": "Bearer should-not-be-logged", - "a-loggable-header": "should-be-logged", - })) - } - testCaseWithContext(t, &mcpContext{logLevel: 7, before: before}, func(c *mcpContext) { - _, _ = c.callTool("configuration_view", map[string]interface{}{ - "minified": false, - }) - t.Run("Logs tool call headers", func(t *testing.T) { - expectedLog := "mcp tool call headers: A-Loggable-Header: should-be-logged" - if !strings.Contains(c.logBuffer.String(), expectedLog) { - t.Errorf("Expected log to contain '%s', got: %s", expectedLog, c.logBuffer.String()) - } - }) - sensitiveHeaders := []string{ - "Authorization:", - // TODO: Add more sensitive headers as needed + + s.Run("ListTools does not return disabled tools", func() { + for _, tool := range tools.Tools { + s.Falsef(tool.Name == "namespaces_list" || tool.Name == "events_list", + "Tool %s is not disabled but should be", tool.Name) } - t.Run("Does not log sensitive headers", func(t *testing.T) { - for _, header := range sensitiveHeaders { - if strings.Contains(c.logBuffer.String(), header) { - t.Errorf("Log should not contain sensitive header '%s', got: %s", header, c.logBuffer.String()) - } - } - }) - t.Run("Does not log sensitive header values", func(t *testing.T) { - if strings.Contains(c.logBuffer.String(), "should-not-be-logged") { - t.Errorf("Log should not contain sensitive header value 'should-not-be-logged', got: %s", c.logBuffer.String()) - } - }) }) } + +func TestMcpToolProcessing(t *testing.T) { + suite.Run(t, new(McpToolProcessingSuite)) +} diff --git a/pkg/mcp/nodes_test.go b/pkg/mcp/nodes_test.go new file mode 100644 index 00000000..ce2cbc7e --- /dev/null +++ b/pkg/mcp/nodes_test.go @@ -0,0 +1,205 @@ +package mcp + +import ( + "net/http" + "testing" + + "github.com/BurntSushi/toml" + "github.com/containers/kubernetes-mcp-server/internal/test" + "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" +) + +type NodesSuite struct { + BaseMcpSuite + mockServer *test.MockServer +} + +func (s *NodesSuite) SetupTest() { + s.BaseMcpSuite.SetupTest() + s.mockServer = test.NewMockServer() + s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T()) +} + +func (s *NodesSuite) TearDownTest() { + s.BaseMcpSuite.TearDownTest() + if s.mockServer != nil { + s.mockServer.Close() + } +} + +func (s *NodesSuite) TestNodesLog() { + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // Get Node response + if req.URL.Path == "/api/v1/nodes/existing-node" { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(`{ + "apiVersion": "v1", + "kind": "Node", + "metadata": { + "name": "existing-node" + } + }`)) + return + } + // Get Empty Log response + if req.URL.Path == "/api/v1/nodes/existing-node/proxy/logs/empty.log" { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(``)) + return + } + // Get Kubelet Log response + if req.URL.Path == "/api/v1/nodes/existing-node/proxy/logs/kubelet.log" { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + logContent := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n" + if req.URL.Query().Get("tailLines") != "" { + logContent = "Line 4\nLine 5\n" + } + _, _ = w.Write([]byte(logContent)) + return + } + w.WriteHeader(http.StatusNotFound) + })) + s.InitMcpClient() + s.Run("nodes_log(name=nil)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{}) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes missing name", func() { + expectedMessage := "failed to get node log, missing argument name" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + s.Run("nodes_log(name=inexistent-node)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "inexistent-node", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes missing node", func() { + expectedMessage := "failed to get node log for inexistent-node: failed to get node inexistent-node: the server could not find the requested resource (get nodes inexistent-node)" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + s.Run("nodes_log(name=existing-node, log_path=missing.log)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "existing-node", + "log_path": "missing.log", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes missing log file", func() { + expectedMessage := "failed to get node log for existing-node: failed to get node logs: the server could not find the requested resource" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + s.Run("nodes_log(name=existing-node, log_path=empty.log)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "existing-node", + "log_path": "empty.log", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes empty log", func() { + expectedMessage := "The node existing-node has not logged any message yet or the log file is empty" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive message '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + s.Run("nodes_log(name=existing-node, log_path=kubelet.log)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "existing-node", + "log_path": "kubelet.log", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("returns full log", func() { + expectedMessage := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected log content '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + for _, tailCase := range []interface{}{2, int64(2), float64(2)} { + s.Run("nodes_log(name=existing-node, log_path=kubelet.log, tail=2)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "existing-node", + "log_path": "kubelet.log", + "tail": tailCase, + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("returns tail log", func() { + expectedMessage := "Line 4\nLine 5\n" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected log content '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + s.Run("nodes_log(name=existing-node, log_path=kubelet.log, tail=-1)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "existing-node", + "log_path": "kubelet.log", + "tail": -1, + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("no error", func() { + s.Falsef(toolResult.IsError, "call tool should succeed") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("returns full log", func() { + expectedMessage := "Line 1\nLine 2\nLine 3\nLine 4\nLine 5\n" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected log content '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) + } +} + +func (s *NodesSuite) TestNodesLogDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` + denied_resources = [ { version = "v1", kind = "Node" } ] + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("nodes_log (denied)", func() { + toolResult, err := s.CallTool("nodes_log", map[string]interface{}{ + "name": "does-not-matter", + }) + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") + }) + s.Run("describes denial", func() { + expectedMessage := "failed to get node log for does-not-matter: resource not allowed: /v1, Kind=Node" + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) + }) + }) +} + +func TestNodes(t *testing.T) { + suite.Run(t, new(NodesSuite)) +} diff --git a/pkg/mcp/pods_exec_test.go b/pkg/mcp/pods_exec_test.go index dac6883c..c39cc8d6 100644 --- a/pkg/mcp/pods_exec_test.go +++ b/pkg/mcp/pods_exec_test.go @@ -7,125 +7,132 @@ import ( "strings" "testing" + "github.com/BurntSushi/toml" "github.com/mark3labs/mcp-go/mcp" + "github.com/stretchr/testify/suite" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/containers/kubernetes-mcp-server/internal/test" - "github.com/containers/kubernetes-mcp-server/pkg/config" ) -func TestPodsExec(t *testing.T) { - testCase(t, func(c *mcpContext) { - mockServer := test.NewMockServer() - defer mockServer.Close() - c.withKubeConfig(mockServer.Config()) - mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec/exec" { - return - } - var stdin, stdout bytes.Buffer - ctx, err := test.CreateHTTPStreams(w, req, &test.StreamOptions{ - Stdin: &stdin, - Stdout: &stdout, - }) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - _, _ = w.Write([]byte(err.Error())) - return - } - defer func(conn io.Closer) { _ = conn.Close() }(ctx.Closer) - _, _ = io.WriteString(ctx.StdoutStream, "command:"+strings.Join(req.URL.Query()["command"], " ")+"\n") - _, _ = io.WriteString(ctx.StdoutStream, "container:"+strings.Join(req.URL.Query()["container"], " ")+"\n") - })) - mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec" { - return - } - test.WriteObject(w, &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "pod-to-exec", - }, - Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container-to-exec"}}}, - }) - })) - podsExecNilNamespace, err := c.callTool("pods_exec", map[string]interface{}{ +type PodsExecSuite struct { + BaseMcpSuite + mockServer *test.MockServer +} + +func (s *PodsExecSuite) SetupTest() { + s.BaseMcpSuite.SetupTest() + s.mockServer = test.NewMockServer() + s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T()) +} + +func (s *PodsExecSuite) TearDownTest() { + s.BaseMcpSuite.TearDownTest() + if s.mockServer != nil { + s.mockServer.Close() + } +} + +func (s *PodsExecSuite) TestPodsExec() { + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec/exec" { + return + } + var stdin, stdout bytes.Buffer + ctx, err := test.CreateHTTPStreams(w, req, &test.StreamOptions{ + Stdin: &stdin, + Stdout: &stdout, + }) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + defer func(conn io.Closer) { _ = conn.Close() }(ctx.Closer) + _, _ = io.WriteString(ctx.StdoutStream, "command:"+strings.Join(req.URL.Query()["command"], " ")+"\n") + _, _ = io.WriteString(ctx.StdoutStream, "container:"+strings.Join(req.URL.Query()["container"], " ")+"\n") + })) + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if req.URL.Path != "/api/v1/namespaces/default/pods/pod-to-exec" { + return + } + test.WriteObject(w, &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "pod-to-exec", + }, + Spec: v1.PodSpec{Containers: []v1.Container{{Name: "container-to-exec"}}}, + }) + })) + s.InitMcpClient() + + s.Run("pods_exec(name=pod-to-exec, namespace=nil, command=[ls -l]), uses configured namespace", func() { + result, err := s.CallTool("pods_exec", map[string]interface{}{ "name": "pod-to-exec", "command": []interface{}{"ls", "-l"}, }) - t.Run("pods_exec with name and nil namespace returns command output", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if podsExecNilNamespace.IsError { - t.Fatalf("call tool failed: %v", podsExecNilNamespace.Content) - } - if !strings.Contains(podsExecNilNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { - t.Errorf("unexpected result %v", podsExecNilNamespace.Content[0].(mcp.TextContent).Text) - } + s.Require().NotNil(result) + s.Run("returns command output", func() { + s.NoError(err, "call tool failed %v", err) + s.Falsef(result.IsError, "call tool failed: %v", result.Content) + s.Contains(result.Content[0].(mcp.TextContent).Text, "command:ls -l\n", "unexpected result %v", result.Content[0].(mcp.TextContent).Text) }) - podsExecInNamespace, err := c.callTool("pods_exec", map[string]interface{}{ + }) + s.Run("pods_exec(name=pod-to-exec, namespace=default, command=[ls -l])", func() { + result, err := s.CallTool("pods_exec", map[string]interface{}{ "namespace": "default", "name": "pod-to-exec", "command": []interface{}{"ls", "-l"}, }) - t.Run("pods_exec with name and namespace returns command output", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if podsExecInNamespace.IsError { - t.Fatalf("call tool failed: %v", podsExecInNamespace.Content) - } - if !strings.Contains(podsExecInNamespace.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { - t.Errorf("unexpected result %v", podsExecInNamespace.Content[0].(mcp.TextContent).Text) - } + s.Require().NotNil(result) + s.Run("returns command output", func() { + s.NoError(err, "call tool failed %v", err) + s.Falsef(result.IsError, "call tool failed: %v", result.Content) + s.Contains(result.Content[0].(mcp.TextContent).Text, "command:ls -l\n", "unexpected result %v", result.Content[0].(mcp.TextContent).Text) }) - podsExecInNamespaceAndContainer, err := c.callTool("pods_exec", map[string]interface{}{ + }) + s.Run("pods_exec(name=pod-to-exec, namespace=default, command=[ls -l], container=a-specific-container)", func() { + result, err := s.CallTool("pods_exec", map[string]interface{}{ "namespace": "default", "name": "pod-to-exec", "command": []interface{}{"ls", "-l"}, "container": "a-specific-container", }) - t.Run("pods_exec with name, namespace, and container returns command output", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if podsExecInNamespaceAndContainer.IsError { - t.Fatalf("call tool failed") - } - if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "command:ls -l\n") { - t.Errorf("unexpected result %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text) - } - if !strings.Contains(podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text, "container:a-specific-container\n") { - t.Errorf("expected container name not found %v", podsExecInNamespaceAndContainer.Content[0].(mcp.TextContent).Text) - } + s.Require().NotNil(result) + s.Run("returns command output", func() { + s.NoError(err, "call tool failed %v", err) + s.Falsef(result.IsError, "call tool failed: %v", result.Content) + s.Contains(result.Content[0].(mcp.TextContent).Text, "command:ls -l\n", "unexpected result %v", result.Content[0].(mcp.TextContent).Text) }) }) } -func TestPodsExecDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *PodsExecSuite) TestPodsExecDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { version = "v1", kind = "Pod" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - c.withEnvTest() - podsRun, _ := c.callTool("pods_exec", map[string]interface{}{ + `), s.Cfg), "Expected to parse denied resources config") + s.InitMcpClient() + s.Run("pods_exec (denied)", func() { + toolResult, err := s.CallTool("pods_exec", map[string]interface{}{ "namespace": "default", "name": "pod-to-exec", "command": []interface{}{"ls", "-l"}, "container": "a-specific-container", }) - t.Run("pods_exec has error", func(t *testing.T) { - if !podsRun.IsError { - t.Fatalf("call tool should fail") - } + s.Require().NotNil(toolResult, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(toolResult.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("pods_exec describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to exec in pod pod-to-exec in namespace default: resource not allowed: /v1, Kind=Pod" - if podsRun.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsRun.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, toolResult.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, toolResult.Content[0].(mcp.TextContent).Text) }) }) } + +func TestPodsExec(t *testing.T) { + suite.Run(t, new(PodsExecSuite)) +} diff --git a/pkg/mcp/pods_top_test.go b/pkg/mcp/pods_top_test.go index 9fd218bb..92f6505a 100644 --- a/pkg/mcp/pods_top_test.go +++ b/pkg/mcp/pods_top_test.go @@ -5,247 +5,246 @@ import ( "regexp" "testing" + "github.com/BurntSushi/toml" "github.com/containers/kubernetes-mcp-server/internal/test" "github.com/mark3labs/mcp-go/mcp" - - "github.com/containers/kubernetes-mcp-server/pkg/config" + "github.com/stretchr/testify/suite" ) -func TestPodsTopMetricsUnavailable(t *testing.T) { - testCase(t, func(c *mcpContext) { - mockServer := test.NewMockServer() - defer mockServer.Close() - c.withKubeConfig(mockServer.Config()) - mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "application/json") - // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) - if req.URL.Path == "/api" { - _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) - return - } - // Request Performed by DiscoveryClient to Kube API (Get API Groups) - if req.URL.Path == "/apis" { - _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) - return - } - })) - podsTopMetricsApiUnavailable, err := c.callTool("pods_top", map[string]interface{}{}) - t.Run("pods_top with metrics API not available", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - if !podsTopMetricsApiUnavailable.IsError { - t.Errorf("call tool should have returned an error") - } - if podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text != "failed to get pods top: metrics API is not available" { - t.Errorf("call tool returned unexpected content: %s", podsTopMetricsApiUnavailable.Content[0].(mcp.TextContent).Text) - } - }) +type PodsTopSuite struct { + BaseMcpSuite + mockServer *test.MockServer +} + +func (s *PodsTopSuite) SetupTest() { + s.BaseMcpSuite.SetupTest() + s.mockServer = test.NewMockServer() + s.Cfg.KubeConfig = s.mockServer.KubeconfigFile(s.T()) +} + +func (s *PodsTopSuite) TearDownTest() { + s.BaseMcpSuite.TearDownTest() + if s.mockServer != nil { + s.mockServer.Close() + } +} + +func (s *PodsTopSuite) TestPodsTopMetricsUnavailable() { + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":[],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + })) + s.InitMcpClient() + + s.Run("pods_top with metrics API not available", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{}) + s.NoError(err, "call tool failed %v", err) + s.Require().NoError(err) + s.True(result.IsError, "call tool should have returned an error") + s.Equalf("failed to get pods top: metrics API is not available", result.Content[0].(mcp.TextContent).Text, + "call tool returned unexpected content: %s", result.Content[0].(mcp.TextContent).Text) }) } -func TestPodsTopMetricsAvailable(t *testing.T) { - testCase(t, func(c *mcpContext) { - mockServer := test.NewMockServer() - defer mockServer.Close() - c.withKubeConfig(mockServer.Config()) - mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - println("Request received:", req.Method, req.URL.Path) // TODO: REMOVE LINE - w.Header().Set("Content-Type", "application/json") - // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) - if req.URL.Path == "/api" { - _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) - return - } - // Request Performed by DiscoveryClient to Kube API (Get API Groups) - if req.URL.Path == "/apis" { - _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) - return - } - // Request Performed by DiscoveryClient to Kube API (Get API Resources) - if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { - _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) - return - } - // Pod Metrics from all namespaces - if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/pods" { - if req.URL.Query().Get("labelSelector") == "app=pod-ns-5-42" { - _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + - `{"metadata":{"name":"pod-ns-5-42","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"42m","memory":"42Mi","swap":"42Mi"}}]}` + - `]}`)) - } else { - _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + - `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"100m","memory":"200Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"200m","memory":"300Mi","swap":"37Mi"}}]},` + - `{"metadata":{"name":"pod-2","namespace":"ns-1"},"containers":[{"name":"container-1-ns-1","usage":{"cpu":"300m","memory":"400Mi","swap":"42Mi"}}]}` + - `]}`)) - - } - return - } - // Pod Metrics from configured namespace - if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods" { +func (s *PodsTopSuite) TestPodsTopMetricsAvailable() { + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Resources) + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { + _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) + return + } + // Pod Metrics from all namespaces + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/pods" { + if req.URL.Query().Get("labelSelector") == "app=pod-ns-5-42" { _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + - `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"30m","memory":"40Mi","swap":"37Mi"}}]}` + + `{"metadata":{"name":"pod-ns-5-42","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"42m","memory":"42Mi","swap":"42Mi"}}]}` + `]}`)) - return - } - // Pod Metrics from ns-5 namespace - if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods" { + } else { _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + - `{"metadata":{"name":"pod-ns-5-1","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"42Mi"}}]}` + + `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"100m","memory":"200Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"200m","memory":"300Mi","swap":"37Mi"}}]},` + + `{"metadata":{"name":"pod-2","namespace":"ns-1"},"containers":[{"name":"container-1-ns-1","usage":{"cpu":"300m","memory":"400Mi","swap":"42Mi"}}]}` + `]}`)) - return - } - // Pod Metrics from ns-5 namespace with pod-ns-5-5 pod name - if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods/pod-ns-5-5" { - _, _ = w.Write([]byte(`{"kind":"PodMetrics","apiVersion":"metrics.k8s.io/v1beta1",` + - `"metadata":{"name":"pod-ns-5-5","namespace":"ns-5"},` + - `"containers":[{"name":"container-1","usage":{"cpu":"13m","memory":"37Mi","swap":"42Mi"}}]` + - `}`)) - } - })) - podsTopDefaults, err := c.callTool("pods_top", map[string]interface{}{}) - t.Run("pods_top defaults returns pod metrics from all namespaces", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - textContent := podsTopDefaults.Content[0].(mcp.TextContent).Text - if podsTopDefaults.IsError { - t.Fatalf("call tool failed %s", textContent) - } - expectedHeaders := regexp.MustCompile(`(?m)^\s*NAMESPACE\s+POD\s+NAME\s+CPU\(cores\)\s+MEMORY\(bytes\)\s+SWAP\(bytes\)\s*$`) - if !expectedHeaders.MatchString(textContent) { - t.Errorf("Expected headers '%s' not found in output:\n%s", expectedHeaders.String(), textContent) - } - expectedRows := []string{ - "default\\s+pod-1\\s+container-1\\s+100m\\s+200Mi\\s+13Mi", - "default\\s+pod-1\\s+container-2\\s+200m\\s+300Mi\\s+37Mi", - "ns-1\\s+pod-2\\s+container-1-ns-1\\s+300m\\s+400Mi\\s+42Mi", - } - for _, row := range expectedRows { - if !regexp.MustCompile(row).MatchString(textContent) { - t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent) - } - } - expectedTotal := regexp.MustCompile(`(?m)^\s+600m\s+900Mi\s+92Mi\s*$`) - if !expectedTotal.MatchString(textContent) { - t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + } - }) - podsTopConfiguredNamespace, err := c.callTool("pods_top", map[string]interface{}{ + return + } + // Pod Metrics from configured namespace + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/default/pods" { + _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + + `{"metadata":{"name":"pod-1","namespace":"default"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"13Mi"}},{"name":"container-2","usage":{"cpu":"30m","memory":"40Mi","swap":"37Mi"}}]}` + + `]}`)) + return + } + // Pod Metrics from ns-5 namespace + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods" { + _, _ = w.Write([]byte(`{"kind":"PodMetricsList","apiVersion":"metrics.k8s.io/v1beta1","items":[` + + `{"metadata":{"name":"pod-ns-5-1","namespace":"ns-5"},"containers":[{"name":"container-1","usage":{"cpu":"10m","memory":"20Mi","swap":"42Mi"}}]}` + + `]}`)) + return + } + // Pod Metrics from ns-5 namespace with pod-ns-5-5 pod name + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1/namespaces/ns-5/pods/pod-ns-5-5" { + _, _ = w.Write([]byte(`{"kind":"PodMetrics","apiVersion":"metrics.k8s.io/v1beta1",` + + `"metadata":{"name":"pod-ns-5-5","namespace":"ns-5"},` + + `"containers":[{"name":"container-1","usage":{"cpu":"13m","memory":"37Mi","swap":"42Mi"}}]` + + `}`)) + } + })) + s.InitMcpClient() + + s.Run("pods_top(defaults) returns pod metrics from all namespaces", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{}) + s.Require().NotNil(result) + s.NoErrorf(err, "call tool failed %v", err) + textContent := result.Content[0].(mcp.TextContent).Text + s.Falsef(result.IsError, "call tool failed %v", textContent) + + expectedHeaders := regexp.MustCompile(`(?m)^\s*NAMESPACE\s+POD\s+NAME\s+CPU\(cores\)\s+MEMORY\(bytes\)\s+SWAP\(bytes\)\s*$`) + s.Regexpf(expectedHeaders, textContent, "expected headers '%s' not found in output:\n%s", expectedHeaders.String(), textContent) + expectedRows := []string{ + "default\\s+pod-1\\s+container-1\\s+100m\\s+200Mi\\s+13Mi", + "default\\s+pod-1\\s+container-2\\s+200m\\s+300Mi\\s+37Mi", + "ns-1\\s+pod-2\\s+container-1-ns-1\\s+300m\\s+400Mi\\s+42Mi", + } + + for _, row := range expectedRows { + s.Regexpf(row, textContent, "expected row '%s' not found in output:\n%s", row, textContent) + } + + expectedTotal := regexp.MustCompile(`(?m)^\s+600m\s+900Mi\s+92Mi\s*$`) + s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + }) + + s.Run("pods_top(allNamespaces=false) returns pod metrics from configured namespace", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{ "all_namespaces": false, }) - t.Run("pods_top[allNamespaces=false] returns pod metrics from configured namespace", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - textContent := podsTopConfiguredNamespace.Content[0].(mcp.TextContent).Text - expectedRows := []string{ - "default\\s+pod-1\\s+container-1\\s+10m\\s+20Mi\\s+13Mi", - "default\\s+pod-1\\s+container-2\\s+30m\\s+40Mi\\s+37Mi", - } - for _, row := range expectedRows { - if !regexp.MustCompile(row).MatchString(textContent) { - t.Errorf("Expected row '%s' not found in output:\n%s", row, textContent) - } - } - expectedTotal := regexp.MustCompile(`(?m)^\s+40m\s+60Mi\s+50Mi\s*$`) - if !expectedTotal.MatchString(textContent) { - t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) - } - }) - podsTopNamespace, err := c.callTool("pods_top", map[string]interface{}{ + s.Require().NotNil(result) + s.NoErrorf(err, "call tool failed %v", err) + textContent := result.Content[0].(mcp.TextContent).Text + s.Falsef(result.IsError, "call tool failed %v", textContent) + + expectedRows := []string{ + "default\\s+pod-1\\s+container-1\\s+10m\\s+20Mi\\s+13Mi", + "default\\s+pod-1\\s+container-2\\s+30m\\s+40Mi\\s+37Mi", + } + for _, row := range expectedRows { + s.Regexpf(row, textContent, "expected row '%s' not found in output:\n%s", row, textContent) + } + + expectedTotal := regexp.MustCompile(`(?m)^\s+40m\s+60Mi\s+50Mi\s*$`) + s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + }) + + s.Run("pods_top(namespace=ns-5) returns pod metrics from provided namespace", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{ "namespace": "ns-5", }) - t.Run("pods_top[namespace=ns-5] returns pod metrics from provided namespace", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - textContent := podsTopNamespace.Content[0].(mcp.TextContent).Text - expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-1\s+container-1\s+10m\s+20Mi\s+42Mi`) - if !expectedRow.MatchString(textContent) { - t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) - } - expectedTotal := regexp.MustCompile(`(?m)^\s+10m\s+20Mi\s+42Mi\s*$`) - if !expectedTotal.MatchString(textContent) { - t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) - } - }) - podsTopNamespaceName, err := c.callTool("pods_top", map[string]interface{}{ + s.Require().NotNil(result) + s.NoErrorf(err, "call tool failed %v", err) + textContent := result.Content[0].(mcp.TextContent).Text + s.Falsef(result.IsError, "call tool failed %v", textContent) + + expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-1\s+container-1\s+10m\s+20Mi\s+42Mi`) + s.Regexpf(expectedRow, textContent, "expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) + + expectedTotal := regexp.MustCompile(`(?m)^\s+10m\s+20Mi\s+42Mi\s*$`) + s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + }) + + s.Run("pods_top(namespace=ns-5,name=pod-ns-5-5) returns pod metrics from provided namespace and name", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{ "namespace": "ns-5", "name": "pod-ns-5-5", }) - t.Run("pods_top[namespace=ns-5,name=pod-ns-5-5] returns pod metrics from provided namespace and name", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - textContent := podsTopNamespaceName.Content[0].(mcp.TextContent).Text - expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-5\s+container-1\s+13m\s+37Mi\s+42Mi`) - if !expectedRow.MatchString(textContent) { - t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) - } - expectedTotal := regexp.MustCompile(`(?m)^\s+13m\s+37Mi\s+42Mi\s*$`) - if !expectedTotal.MatchString(textContent) { - t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) - } - }) - podsTopNamespaceLabelSelector, err := c.callTool("pods_top", map[string]interface{}{ + s.Require().NotNil(result) + s.NoErrorf(err, "call tool failed %v", err) + textContent := result.Content[0].(mcp.TextContent).Text + s.Falsef(result.IsError, "call tool failed %v", textContent) + + expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-5\s+container-1\s+13m\s+37Mi\s+42Mi`) + s.Regexpf(expectedRow, textContent, "expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) + + expectedTotal := regexp.MustCompile(`(?m)^\s+13m\s+37Mi\s+42Mi\s*$`) + s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) + }) + + s.Run("pods_top[label_selector=app=pod-ns-5-42] returns pod metrics from pods matching selector", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{ "label_selector": "app=pod-ns-5-42", }) - t.Run("pods_top[label_selector=app=pod-ns-5-42] returns pod metrics from pods matching selector", func(t *testing.T) { - if err != nil { - t.Fatalf("call tool failed %v", err) - } - textContent := podsTopNamespaceLabelSelector.Content[0].(mcp.TextContent).Text - expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-42\s+container-1\s+42m\s+42Mi`) - if !expectedRow.MatchString(textContent) { - t.Errorf("Expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) - } - expectedTotal := regexp.MustCompile(`(?m)^\s+42m\s+42Mi\s+42Mi\s*$`) - if !expectedTotal.MatchString(textContent) { - t.Errorf("Expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) - } - }) + s.Require().NotNil(result) + s.NoErrorf(err, "call tool failed %v", err) + textContent := result.Content[0].(mcp.TextContent).Text + s.Falsef(result.IsError, "call tool failed %v", textContent) + + expectedRow := regexp.MustCompile(`ns-5\s+pod-ns-5-42\s+container-1\s+42m\s+42Mi`) + s.Regexpf(expectedRow, textContent, "expected row '%s' not found in output:\n%s", expectedRow.String(), textContent) + + expectedTotal := regexp.MustCompile(`(?m)^\s+42m\s+42Mi\s+42Mi\s*$`) + s.Regexpf(expectedTotal, textContent, "expected total row '%s' not found in output:\n%s", expectedTotal.String(), textContent) }) } -func TestPodsTopDenied(t *testing.T) { - deniedResourcesServer := test.Must(config.ReadToml([]byte(` +func (s *PodsTopSuite) TestPodsTopDenied() { + s.Require().NoError(toml.Unmarshal([]byte(` denied_resources = [ { group = "metrics.k8s.io", version = "v1beta1" } ] - `))) - testCaseWithContext(t, &mcpContext{staticConfig: deniedResourcesServer}, func(c *mcpContext) { - mockServer := test.NewMockServer() - defer mockServer.Close() - c.withKubeConfig(mockServer.Config()) - mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Content-Type", "application/json") - // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) - if req.URL.Path == "/api" { - _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) - return - } - // Request Performed by DiscoveryClient to Kube API (Get API Groups) - if req.URL.Path == "/apis" { - _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) - return - } - // Request Performed by DiscoveryClient to Kube API (Get API Resources) - if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { - _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) - return - } - })) - podsTop, _ := c.callTool("pods_top", map[string]interface{}{}) - t.Run("pods_run has error", func(t *testing.T) { - if !podsTop.IsError { - t.Fatalf("call tool should fail") - } + `), s.Cfg), "Expected to parse denied resources config") + s.mockServer.Handle(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/json") + // Request Performed by DiscoveryClient to Kube API (Get API Groups legacy -core-) + if req.URL.Path == "/api" { + _, _ = w.Write([]byte(`{"kind":"APIVersions","versions":["metrics.k8s.io/v1beta1"],"serverAddressByClientCIDRs":[{"clientCIDR":"0.0.0.0/0"}]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Groups) + if req.URL.Path == "/apis" { + _, _ = w.Write([]byte(`{"kind":"APIGroupList","apiVersion":"v1","groups":[]}`)) + return + } + // Request Performed by DiscoveryClient to Kube API (Get API Resources) + if req.URL.Path == "/apis/metrics.k8s.io/v1beta1" { + _, _ = w.Write([]byte(`{"kind":"APIResourceList","apiVersion":"v1","groupVersion":"metrics.k8s.io/v1beta1","resources":[{"name":"pods","singularName":"","namespaced":true,"kind":"PodMetrics","verbs":["get","list"]}]}`)) + return + } + })) + s.InitMcpClient() + + s.Run("pods_top (denied)", func() { + result, err := s.CallTool("pods_top", map[string]interface{}{}) + s.Require().NotNil(result, "toolResult should not be nil") + s.Run("has error", func() { + s.Truef(result.IsError, "call tool should fail") + s.Nilf(err, "call tool should not return error object") }) - t.Run("pods_run describes denial", func(t *testing.T) { + s.Run("describes denial", func() { expectedMessage := "failed to get pods top: resource not allowed: metrics.k8s.io/v1beta1, Kind=PodMetrics" - if podsTop.Content[0].(mcp.TextContent).Text != expectedMessage { - t.Fatalf("expected descriptive error '%s', got %v", expectedMessage, podsTop.Content[0].(mcp.TextContent).Text) - } + s.Equalf(expectedMessage, result.Content[0].(mcp.TextContent).Text, + "expected descriptive error '%s', got %v", expectedMessage, result.Content[0].(mcp.TextContent).Text) }) }) } + +func TestPodsTop(t *testing.T) { + suite.Run(t, new(PodsTopSuite)) +} diff --git a/pkg/mcp/testdata/toolsets-core-tools.json b/pkg/mcp/testdata/toolsets-core-tools.json index 43680dae..37345100 100644 --- a/pkg/mcp/testdata/toolsets-core-tools.json +++ b/pkg/mcp/testdata/toolsets-core-tools.json @@ -33,6 +33,40 @@ }, "name": "namespaces_list" }, + { + "annotations": { + "title": "Node: Log", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", + "inputSchema": { + "type": "object", + "properties": { + "log_path": { + "default": "kubelet.log", + "description": "Path to the log file on the node (e.g. 'kubelet.log', 'kube-proxy.log'). Default is 'kubelet.log'", + "type": "string" + }, + "name": { + "description": "Name of the node to get logs from", + "type": "string" + }, + "tail": { + "default": 100, + "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)", + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name" + ] + }, + "name": "nodes_log" + }, { "annotations": { "title": "Pods: Delete", diff --git a/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json b/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json index 97af6fb5..7041bd3f 100644 --- a/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json +++ b/pkg/mcp/testdata/toolsets-full-tools-multicluster-enum.json @@ -195,6 +195,48 @@ }, "name": "namespaces_list" }, + { + "annotations": { + "title": "Node: Log", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", + "inputSchema": { + "type": "object", + "properties": { + "context": { + "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set", + "enum": [ + "extra-cluster", + "fake-context" + ], + "type": "string" + }, + "log_path": { + "default": "kubelet.log", + "description": "Path to the log file on the node (e.g. 'kubelet.log', 'kube-proxy.log'). Default is 'kubelet.log'", + "type": "string" + }, + "name": { + "description": "Name of the node to get logs from", + "type": "string" + }, + "tail": { + "default": 100, + "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)", + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name" + ] + }, + "name": "nodes_log" + }, { "annotations": { "title": "Pods: Delete", diff --git a/pkg/mcp/testdata/toolsets-full-tools-multicluster.json b/pkg/mcp/testdata/toolsets-full-tools-multicluster.json index 861a1b5a..a454f1ef 100644 --- a/pkg/mcp/testdata/toolsets-full-tools-multicluster.json +++ b/pkg/mcp/testdata/toolsets-full-tools-multicluster.json @@ -175,6 +175,44 @@ }, "name": "namespaces_list" }, + { + "annotations": { + "title": "Node: Log", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", + "inputSchema": { + "type": "object", + "properties": { + "context": { + "description": "Optional parameter selecting which context to run the tool in. Defaults to fake-context if not set", + "type": "string" + }, + "log_path": { + "default": "kubelet.log", + "description": "Path to the log file on the node (e.g. 'kubelet.log', 'kube-proxy.log'). Default is 'kubelet.log'", + "type": "string" + }, + "name": { + "description": "Name of the node to get logs from", + "type": "string" + }, + "tail": { + "default": 100, + "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)", + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name" + ] + }, + "name": "nodes_log" + }, { "annotations": { "title": "Pods: Delete", diff --git a/pkg/mcp/testdata/toolsets-full-tools-openshift.json b/pkg/mcp/testdata/toolsets-full-tools-openshift.json index b5018945..5e5fa4ea 100644 --- a/pkg/mcp/testdata/toolsets-full-tools-openshift.json +++ b/pkg/mcp/testdata/toolsets-full-tools-openshift.json @@ -139,6 +139,40 @@ }, "name": "namespaces_list" }, + { + "annotations": { + "title": "Node: Log", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", + "inputSchema": { + "type": "object", + "properties": { + "log_path": { + "default": "kubelet.log", + "description": "Path to the log file on the node (e.g. 'kubelet.log', 'kube-proxy.log'). Default is 'kubelet.log'", + "type": "string" + }, + "name": { + "description": "Name of the node to get logs from", + "type": "string" + }, + "tail": { + "default": 100, + "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)", + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name" + ] + }, + "name": "nodes_log" + }, { "annotations": { "title": "Pods: Delete", diff --git a/pkg/mcp/testdata/toolsets-full-tools.json b/pkg/mcp/testdata/toolsets-full-tools.json index 7b9f471d..56a160ed 100644 --- a/pkg/mcp/testdata/toolsets-full-tools.json +++ b/pkg/mcp/testdata/toolsets-full-tools.json @@ -139,6 +139,40 @@ }, "name": "namespaces_list" }, + { + "annotations": { + "title": "Node: Log", + "readOnlyHint": true, + "destructiveHint": false, + "idempotentHint": false, + "openWorldHint": true + }, + "description": "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", + "inputSchema": { + "type": "object", + "properties": { + "log_path": { + "default": "kubelet.log", + "description": "Path to the log file on the node (e.g. 'kubelet.log', 'kube-proxy.log'). Default is 'kubelet.log'", + "type": "string" + }, + "name": { + "description": "Name of the node to get logs from", + "type": "string" + }, + "tail": { + "default": 100, + "description": "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)", + "minimum": 0, + "type": "integer" + } + }, + "required": [ + "name" + ] + }, + "name": "nodes_log" + }, { "annotations": { "title": "Pods: Delete", diff --git a/pkg/mcp/toolsets_test.go b/pkg/mcp/toolsets_test.go index 527b1e22..d81392a5 100644 --- a/pkg/mcp/toolsets_test.go +++ b/pkg/mcp/toolsets_test.go @@ -65,6 +65,9 @@ func (s *ToolsetsSuite) TestNoToolsets() { } func (s *ToolsetsSuite) TestDefaultToolsetsTools() { + if configuration.HasDefaultOverrides() { + s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)") + } s.Run("Default configuration toolsets", func() { s.InitMcpClient() tools, err := s.ListTools(s.T().Context(), mcp.ListToolsRequest{}) @@ -82,6 +85,9 @@ func (s *ToolsetsSuite) TestDefaultToolsetsTools() { } func (s *ToolsetsSuite) TestDefaultToolsetsToolsInOpenShift() { + if configuration.HasDefaultOverrides() { + s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)") + } s.Run("Default configuration toolsets in OpenShift", func() { s.Handle(&test.InOpenShiftHandler{}) s.InitMcpClient() @@ -100,6 +106,9 @@ func (s *ToolsetsSuite) TestDefaultToolsetsToolsInOpenShift() { } func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiCluster() { + if configuration.HasDefaultOverrides() { + s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)") + } s.Run("Default configuration toolsets in multi-cluster (with 11 clusters)", func() { kubeconfig := s.Kubeconfig() for i := 0; i < 10; i++ { @@ -123,6 +132,9 @@ func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiCluster() { } func (s *ToolsetsSuite) TestDefaultToolsetsToolsInMultiClusterEnum() { + if configuration.HasDefaultOverrides() { + s.T().Skip("Skipping test because default configuration overrides are present (this is a downstream fork)") + } s.Run("Default configuration toolsets in multi-cluster (with 2 clusters)", func() { kubeconfig := s.Kubeconfig() // Add additional cluster to force multi-cluster behavior with enum parameter diff --git a/pkg/toolsets/core/nodes.go b/pkg/toolsets/core/nodes.go new file mode 100644 index 00000000..6c669398 --- /dev/null +++ b/pkg/toolsets/core/nodes.go @@ -0,0 +1,80 @@ +package core + +import ( + "errors" + "fmt" + + "github.com/google/jsonschema-go/jsonschema" + "k8s.io/utils/ptr" + + "github.com/containers/kubernetes-mcp-server/pkg/api" +) + +func initNodes() []api.ServerTool { + return []api.ServerTool{ + {Tool: api.Tool{ + Name: "nodes_log", + Description: "Get logs from a Kubernetes node (kubelet, kube-proxy, or other system logs). This accesses node logs through the Kubernetes API proxy to the kubelet", + InputSchema: &jsonschema.Schema{ + Type: "object", + Properties: map[string]*jsonschema.Schema{ + "name": { + Type: "string", + Description: "Name of the node to get logs from", + }, + "log_path": { + Type: "string", + Description: "Path to the log file on the node (e.g. 'kubelet.log', 'kube-proxy.log'). Default is 'kubelet.log'", + Default: api.ToRawMessage("kubelet.log"), + }, + "tail": { + Type: "integer", + Description: "Number of lines to retrieve from the end of the logs (Optional, 0 means all logs)", + Default: api.ToRawMessage(100), + Minimum: ptr.To(float64(0)), + }, + }, + Required: []string{"name"}, + }, + Annotations: api.ToolAnnotations{ + Title: "Node: Log", + ReadOnlyHint: ptr.To(true), + DestructiveHint: ptr.To(false), + IdempotentHint: ptr.To(false), + OpenWorldHint: ptr.To(true), + }, + }, Handler: nodesLog}, + } +} + +func nodesLog(params api.ToolHandlerParams) (*api.ToolCallResult, error) { + name, ok := params.GetArguments()["name"].(string) + if !ok || name == "" { + return api.NewToolCallResult("", errors.New("failed to get node log, missing argument name")), nil + } + logPath, ok := params.GetArguments()["log_path"].(string) + if !ok || logPath == "" { + logPath = "kubelet.log" + } + tail := params.GetArguments()["tail"] + var tailInt int64 + if tail != nil { + // Convert to int64 - safely handle both float64 (JSON number) and int types + switch v := tail.(type) { + case float64: + tailInt = int64(v) + case int: + case int64: + tailInt = v + default: + return api.NewToolCallResult("", fmt.Errorf("failed to parse tail parameter: expected integer, got %T", tail)), nil + } + } + ret, err := params.NodesLog(params, name, logPath, tailInt) + if err != nil { + return api.NewToolCallResult("", fmt.Errorf("failed to get node log for %s: %v", name, err)), nil + } else if ret == "" { + ret = fmt.Sprintf("The node %s has not logged any message yet or the log file is empty", name) + } + return api.NewToolCallResult(ret, nil), nil +} diff --git a/pkg/toolsets/core/toolset.go b/pkg/toolsets/core/toolset.go index 9f88c7aa..dfd61f42 100644 --- a/pkg/toolsets/core/toolset.go +++ b/pkg/toolsets/core/toolset.go @@ -24,6 +24,7 @@ func (t *Toolset) GetTools(o internalk8s.Openshift) []api.ServerTool { return slices.Concat( initEvents(), initNamespaces(o), + initNodes(), initPods(), initResources(o), ) diff --git a/vendor/github.com/mark3labs/mcp-go/client/client.go b/vendor/github.com/mark3labs/mcp-go/client/client.go index 220786b6..929785cd 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/client.go +++ b/vendor/github.com/mark3labs/mcp-go/client/client.go @@ -86,15 +86,10 @@ func (c *Client) Start(ctx context.Context) error { return fmt.Errorf("transport is nil") } - if _, ok := c.transport.(*transport.Stdio); !ok { - // the stdio transport from NewStdioMCPClientWithOptions - // is already started, dont start again. - // - // Start the transport for other transport types - err := c.transport.Start(ctx) - if err != nil { - return err - } + // Start is idempotent - transports handle being called multiple times + err := c.transport.Start(ctx) + if err != nil { + return err } c.transport.SetNotificationHandler(func(notification mcp.JSONRPCNotification) { @@ -502,6 +497,19 @@ func (c *Client) handleSamplingRequestTransport(ctx context.Context, request tra } } + // Fix content parsing - HTTP transport unmarshals TextContent as map[string]any + // Use the helper function to properly handle content from different transports + for i := range params.Messages { + if contentMap, ok := params.Messages[i].Content.(map[string]any); ok { + // Parse the content map into a proper Content type + content, err := mcp.ParseContent(contentMap) + if err != nil { + return nil, fmt.Errorf("failed to parse content for message %d: %w", i, err) + } + params.Messages[i].Content = content + } + } + // Create the MCP request mcpRequest := mcp.CreateMessageRequest{ Request: mcp.Request{ diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go b/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go index f69fa7b5..46765426 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/inprocess.go @@ -19,6 +19,8 @@ type InProcessTransport struct { onNotification func(mcp.JSONRPCNotification) notifyMu sync.RWMutex + started bool + startedMu sync.Mutex } type InProcessOption func(*InProcessTransport) @@ -55,10 +57,21 @@ func NewInProcessTransportWithOptions(server *server.MCPServer, opts ...InProces } func (c *InProcessTransport) Start(ctx context.Context) error { + c.startedMu.Lock() + if c.started { + c.startedMu.Unlock() + return nil + } + c.started = true + c.startedMu.Unlock() + // Create and register session if we have handlers if c.samplingHandler != nil || c.elicitationHandler != nil { c.session = server.NewInProcessSessionWithHandlers(c.sessionID, c.samplingHandler, c.elicitationHandler) if err := c.server.RegisterSession(ctx, c.session); err != nil { + c.startedMu.Lock() + c.started = false + c.startedMu.Unlock() return fmt.Errorf("failed to register session: %w", err) } } diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/oauth.go b/vendor/github.com/mark3labs/mcp-go/client/transport/oauth.go index bb702adf..0fce1d80 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/oauth.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/oauth.go @@ -34,6 +34,9 @@ type OAuthConfig struct { AuthServerMetadataURL string // PKCEEnabled enables PKCE for the OAuth flow (recommended for public clients) PKCEEnabled bool + // HTTPClient is an optional HTTP client to use for requests. + // If nil, a default HTTP client with a 30 second timeout will be used. + HTTPClient *http.Client } // TokenStore is an interface for storing and retrieving OAuth tokens. @@ -151,10 +154,13 @@ func NewOAuthHandler(config OAuthConfig) *OAuthHandler { if config.TokenStore == nil { config.TokenStore = NewMemoryTokenStore() } + if config.HTTPClient == nil { + config.HTTPClient = &http.Client{Timeout: 30 * time.Second} + } return &OAuthHandler{ config: config, - httpClient: &http.Client{Timeout: 30 * time.Second}, + httpClient: config.HTTPClient, } } diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go b/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go index 305c9316..3cc7e98e 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/sse.go @@ -115,7 +115,7 @@ func NewSSE(baseURL string, options ...ClientOption) (*SSE, error) { // Returns an error if the connection fails or times out waiting for the endpoint. func (c *SSE) Start(ctx context.Context) error { if c.started.Load() { - return fmt.Errorf("has already started") + return nil } ctx, cancel := context.WithCancel(ctx) diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/stdio.go b/vendor/github.com/mark3labs/mcp-go/client/transport/stdio.go index e4f26857..baa9cec4 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/stdio.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/stdio.go @@ -40,6 +40,8 @@ type Stdio struct { ctx context.Context ctxMu sync.RWMutex logger util.Logger + started bool + startedMu sync.Mutex } // StdioOption defines a function that configures a Stdio transport instance. @@ -124,12 +126,23 @@ func NewStdioWithOptions( } func (c *Stdio) Start(ctx context.Context) error { + c.startedMu.Lock() + if c.started { + c.startedMu.Unlock() + return nil + } + c.started = true + c.startedMu.Unlock() + // Store the context for use in request handling c.ctxMu.Lock() c.ctx = ctx c.ctxMu.Unlock() if err := c.spawnCommand(ctx); err != nil { + c.startedMu.Lock() + c.started = false + c.startedMu.Unlock() return err } diff --git a/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go b/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go index 9d521813..000237ce 100644 --- a/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go +++ b/vendor/github.com/mark3labs/mcp-go/client/transport/streamable_http.go @@ -162,6 +162,13 @@ func NewStreamableHTTP(serverURL string, options ...StreamableHTTPCOption) (*Str // Start initiates the HTTP connection to the server. func (c *StreamableHTTP) Start(ctx context.Context) error { + // Start is idempotent - check if already initialized + select { + case <-c.initialized: + return nil + default: + } + // For Streamable HTTP, we don't need to establish a persistent connection by default if c.getListeningEnabled { go func() { @@ -594,10 +601,14 @@ func (c *StreamableHTTP) IsOAuthEnabled() bool { func (c *StreamableHTTP) listenForever(ctx context.Context) { c.logger.Infof("listening to server forever") for { - connectCtx, cancel := context.WithCancel(ctx) - err := c.createGETConnectionToServer(connectCtx) - cancel() - + // Use the original context for continuous listening - no per-iteration timeout + // The SSE connection itself will detect disconnections via the underlying HTTP transport, + // and the context cancellation will propagate from the parent to stop listening gracefully. + // We don't add an artificial timeout here because: + // 1. Persistent SSE connections are meant to stay open indefinitely + // 2. Network-level timeouts and keep-alives handle connection health + // 3. Context cancellation (user-initiated or system shutdown) provides clean shutdown + err := c.createGETConnectionToServer(ctx) if errors.Is(err, ErrGetMethodNotAllowed) { // server does not support listening c.logger.Errorf("server does not support listening") diff --git a/vendor/github.com/mark3labs/mcp-go/mcp/tools.go b/vendor/github.com/mark3labs/mcp-go/mcp/tools.go index 493e8c77..80ae5091 100644 --- a/vendor/github.com/mark3labs/mcp-go/mcp/tools.go +++ b/vendor/github.com/mark3labs/mcp-go/mcp/tools.go @@ -613,6 +613,11 @@ func (t Tool) MarshalJSON() ([]byte, error) { m["annotations"] = t.Annotations + // Marshal Meta if present + if t.Meta != nil { + m["_meta"] = t.Meta + } + return json.Marshal(m) } @@ -1086,8 +1091,10 @@ func WithObject(name string, opts ...PropertyOption) ToolOption { } } -// WithArray adds an array property to the tool schema. -// It accepts property options to configure the array property's behavior and constraints. +// WithArray returns a ToolOption that adds an array-typed property with the given name to a Tool's input schema. +// It applies provided PropertyOption functions to configure the property's schema, moves a `required` flag +// from the property schema into the Tool's InputSchema.Required slice when present, and registers the resulting +// schema under InputSchema.Properties[name]. func WithArray(name string, opts ...PropertyOption) ToolOption { return func(t *Tool) { schema := map[string]any{ @@ -1108,7 +1115,29 @@ func WithArray(name string, opts ...PropertyOption) ToolOption { } } -// Properties defines the properties for an object schema +// WithAny adds an input property named name with no predefined JSON Schema type to the Tool's input schema. +// The returned ToolOption applies the provided PropertyOption functions to the property's schema, moves a property-level +// `required` flag into the Tool's InputSchema.Required list if present, and stores the resulting schema under InputSchema.Properties[name]. +func WithAny(name string, opts ...PropertyOption) ToolOption { + return func(t *Tool) { + schema := map[string]any{} + + for _, opt := range opts { + opt(schema) + } + + // Remove required from property schema and add to InputSchema.required + if required, ok := schema["required"].(bool); ok && required { + delete(schema, "required") + t.InputSchema.Required = append(t.InputSchema.Required, name) + } + + t.InputSchema.Properties[name] = schema + } +} + +// Properties sets the "properties" map for an object schema. +// The returned PropertyOption stores the provided map under the schema's "properties" key. func Properties(props map[string]any) PropertyOption { return func(schema map[string]any) { schema["properties"] = props diff --git a/vendor/github.com/mark3labs/mcp-go/mcp/types.go b/vendor/github.com/mark3labs/mcp-go/mcp/types.go index 69ea73ff..0f97821b 100644 --- a/vendor/github.com/mark3labs/mcp-go/mcp/types.go +++ b/vendor/github.com/mark3labs/mcp-go/mcp/types.go @@ -739,8 +739,9 @@ type ResourceContents interface { } type TextResourceContents struct { - // Meta is a metadata object that is reserved by MCP for storing additional information. - Meta *Meta `json:"_meta,omitempty"` + // Raw per‑resource metadata; pass‑through as defined by MCP. Not the same as mcp.Meta. + // Allows _meta to be used for MCP-UI features for example. Does not assume any specific format. + Meta map[string]any `json:"_meta,omitempty"` // The URI of this resource. URI string `json:"uri"` // The MIME type of this resource, if known. @@ -753,8 +754,9 @@ type TextResourceContents struct { func (TextResourceContents) isResourceContents() {} type BlobResourceContents struct { - // Meta is a metadata object that is reserved by MCP for storing additional information. - Meta *Meta `json:"_meta,omitempty"` + // Raw per‑resource metadata; pass‑through as defined by MCP. Not the same as mcp.Meta. + // Allows _meta to be used for MCP-UI features for example. Does not assume any specific format. + Meta map[string]any `json:"_meta,omitempty"` // The URI of this resource. URI string `json:"uri"` // The MIME type of this resource, if known. diff --git a/vendor/github.com/mark3labs/mcp-go/mcp/utils.go b/vendor/github.com/mark3labs/mcp-go/mcp/utils.go index 0a3cde23..904a3dd6 100644 --- a/vendor/github.com/mark3labs/mcp-go/mcp/utils.go +++ b/vendor/github.com/mark3labs/mcp-go/mcp/utils.go @@ -767,8 +767,15 @@ func ParseResourceContents(contentMap map[string]any) (ResourceContents, error) mimeType := ExtractString(contentMap, "mimeType") + meta := ExtractMap(contentMap, "_meta") + + if _, present := contentMap["_meta"]; present && meta == nil { + return nil, fmt.Errorf("_meta must be an object") + } + if text := ExtractString(contentMap, "text"); text != "" { return TextResourceContents{ + Meta: meta, URI: uri, MIMEType: mimeType, Text: text, @@ -777,6 +784,7 @@ func ParseResourceContents(contentMap map[string]any) (ResourceContents, error) if blob := ExtractString(contentMap, "blob"); blob != "" { return BlobResourceContents{ + Meta: meta, URI: uri, MIMEType: mimeType, Blob: blob, @@ -941,3 +949,31 @@ func ParseStringMap(request CallToolRequest, key string, defaultValue map[string func ToBoolPtr(b bool) *bool { return &b } + +// GetTextFromContent extracts text from a Content interface that might be a TextContent struct +// or a map[string]any that was unmarshaled from JSON. This is useful when dealing with content +// that comes from different transport layers that may handle JSON differently. +// +// This function uses fallback behavior for non-text content - it returns a string representation +// via fmt.Sprintf for any content that cannot be extracted as text. This is a lossy operation +// intended for convenience in logging and display scenarios. +// +// For strict type validation, use ParseContent() instead, which returns an error for invalid content. +func GetTextFromContent(content any) string { + switch c := content.(type) { + case TextContent: + return c.Text + case map[string]any: + // Handle JSON unmarshaled content + if contentType, exists := c["type"]; exists && contentType == "text" { + if text, exists := c["text"].(string); exists { + return text + } + } + return fmt.Sprintf("%v", content) + case string: + return c + default: + return fmt.Sprintf("%v", content) + } +} diff --git a/vendor/github.com/mark3labs/mcp-go/server/errors.go b/vendor/github.com/mark3labs/mcp-go/server/errors.go index 3864f36f..4668e459 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/errors.go +++ b/vendor/github.com/mark3labs/mcp-go/server/errors.go @@ -13,11 +13,12 @@ var ( ErrToolNotFound = errors.New("tool not found") // Session-related errors - ErrSessionNotFound = errors.New("session not found") - ErrSessionExists = errors.New("session already exists") - ErrSessionNotInitialized = errors.New("session not properly initialized") - ErrSessionDoesNotSupportTools = errors.New("session does not support per-session tools") - ErrSessionDoesNotSupportLogging = errors.New("session does not support setting logging level") + ErrSessionNotFound = errors.New("session not found") + ErrSessionExists = errors.New("session already exists") + ErrSessionNotInitialized = errors.New("session not properly initialized") + ErrSessionDoesNotSupportTools = errors.New("session does not support per-session tools") + ErrSessionDoesNotSupportResources = errors.New("session does not support per-session resources") + ErrSessionDoesNotSupportLogging = errors.New("session does not support setting logging level") // Notification-related errors ErrNotificationNotInitialized = errors.New("notification channel not initialized") diff --git a/vendor/github.com/mark3labs/mcp-go/server/server.go b/vendor/github.com/mark3labs/mcp-go/server/server.go index 5234bd6c..f45c0353 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/server.go +++ b/vendor/github.com/mark3labs/mcp-go/server/server.go @@ -2,10 +2,12 @@ package server import ( + "cmp" "context" "encoding/base64" "encoding/json" "fmt" + "maps" "slices" "sort" "sync" @@ -826,21 +828,36 @@ func (s *MCPServer) handleListResources( request mcp.ListResourcesRequest, ) (*mcp.ListResourcesResult, *requestError) { s.resourcesMu.RLock() - resources := make([]mcp.Resource, 0, len(s.resources)) - for _, entry := range s.resources { - resources = append(resources, entry.resource) + resourceMap := make(map[string]mcp.Resource, len(s.resources)) + for uri, entry := range s.resources { + resourceMap[uri] = entry.resource } s.resourcesMu.RUnlock() + // Check if there are session-specific resources + session := ClientSessionFromContext(ctx) + if session != nil { + if sessionWithResources, ok := session.(SessionWithResources); ok { + if sessionResources := sessionWithResources.GetSessionResources(); sessionResources != nil { + // Merge session-specific resources with global resources + for uri, serverResource := range sessionResources { + resourceMap[uri] = serverResource.Resource + } + } + } + } + // Sort the resources by name - sort.Slice(resources, func(i, j int) bool { - return resources[i].Name < resources[j].Name + resourcesList := slices.SortedFunc(maps.Values(resourceMap), func(a, b mcp.Resource) int { + return cmp.Compare(a.Name, b.Name) }) + + // Apply pagination resourcesToReturn, nextCursor, err := listByPagination( ctx, s, request.Params.Cursor, - resources, + resourcesList, ) if err != nil { return nil, &requestError{ @@ -900,9 +917,35 @@ func (s *MCPServer) handleReadResource( request mcp.ReadResourceRequest, ) (*mcp.ReadResourceResult, *requestError) { s.resourcesMu.RLock() + + // First check session-specific resources + var handler ResourceHandlerFunc + var ok bool + + session := ClientSessionFromContext(ctx) + if session != nil { + if sessionWithResources, typeAssertOk := session.(SessionWithResources); typeAssertOk { + if sessionResources := sessionWithResources.GetSessionResources(); sessionResources != nil { + resource, sessionOk := sessionResources[request.Params.URI] + if sessionOk { + handler = resource.Handler + ok = true + } + } + } + } + + // If not found in session tools, check global tools + if !ok { + globalResource, rok := s.resources[request.Params.URI] + if rok { + handler = globalResource.handler + ok = true + } + } + // First try direct resource handlers - if entry, ok := s.resources[request.Params.URI]; ok { - handler := entry.handler + if ok { s.resourcesMu.RUnlock() finalHandler := handler @@ -945,7 +988,17 @@ func (s *MCPServer) handleReadResource( s.resourcesMu.RUnlock() if matched { - contents, err := matchedHandler(ctx, request) + // If a match is found, then we have a final handler and can + // apply middlewares. + s.resourceMiddlewareMu.RLock() + finalHandler := ResourceHandlerFunc(matchedHandler) + mw := s.resourceHandlerMiddlewares + // Apply middlewares in reverse order + for i := len(mw) - 1; i >= 0; i-- { + finalHandler = mw[i](finalHandler) + } + s.resourceMiddlewareMu.RUnlock() + contents, err := finalHandler(ctx, request) if err != nil { return nil, &requestError{ id: id, diff --git a/vendor/github.com/mark3labs/mcp-go/server/session.go b/vendor/github.com/mark3labs/mcp-go/server/session.go index 3d11df93..99d6db8d 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/session.go +++ b/vendor/github.com/mark3labs/mcp-go/server/session.go @@ -3,6 +3,7 @@ package server import ( "context" "fmt" + "net/url" "github.com/mark3labs/mcp-go/mcp" ) @@ -39,6 +40,17 @@ type SessionWithTools interface { SetSessionTools(tools map[string]ServerTool) } +// SessionWithResources is an extension of ClientSession that can store session-specific resource data +type SessionWithResources interface { + ClientSession + // GetSessionResources returns the resources specific to this session, if any + // This method must be thread-safe for concurrent access + GetSessionResources() map[string]ServerResource + // SetSessionResources sets resources specific to this session + // This method must be thread-safe for concurrent access + SetSessionResources(resources map[string]ServerResource) +} + // SessionWithClientInfo is an extension of ClientSession that can store client info type SessionWithClientInfo interface { ClientSession @@ -449,3 +461,155 @@ func (s *MCPServer) DeleteSessionTools(sessionID string, names ...string) error return nil } + +// AddSessionResource adds a resource for a specific session +func (s *MCPServer) AddSessionResource(sessionID string, resource mcp.Resource, handler ResourceHandlerFunc) error { + return s.AddSessionResources(sessionID, ServerResource{Resource: resource, Handler: handler}) +} + +// AddSessionResources adds resources for a specific session +func (s *MCPServer) AddSessionResources(sessionID string, resources ...ServerResource) error { + sessionValue, ok := s.sessions.Load(sessionID) + if !ok { + return ErrSessionNotFound + } + + session, ok := sessionValue.(SessionWithResources) + if !ok { + return ErrSessionDoesNotSupportResources + } + + // For session resources, we want listChanged enabled by default + s.implicitlyRegisterCapabilities( + func() bool { return s.capabilities.resources != nil }, + func() { s.capabilities.resources = &resourceCapabilities{listChanged: true} }, + ) + + // Get existing resources (this should return a thread-safe copy) + sessionResources := session.GetSessionResources() + + // Create a new map to avoid concurrent modification issues + newSessionResources := make(map[string]ServerResource, len(sessionResources)+len(resources)) + + // Copy existing resources + for k, v := range sessionResources { + newSessionResources[k] = v + } + + // Add new resources with validation + for _, resource := range resources { + // Validate that URI is non-empty + if resource.Resource.URI == "" { + return fmt.Errorf("resource URI cannot be empty") + } + + // Validate that URI conforms to RFC 3986 + if _, err := url.ParseRequestURI(resource.Resource.URI); err != nil { + return fmt.Errorf("invalid resource URI: %w", err) + } + + newSessionResources[resource.Resource.URI] = resource + } + + // Set the resources (this should be thread-safe) + session.SetSessionResources(newSessionResources) + + // It only makes sense to send resource notifications to initialized sessions -- + // if we're not initialized yet the client can't possibly have sent their + // initial resources/list message. + // + // For initialized sessions, honor resources.listChanged, which is specifically + // about whether notifications will be sent or not. + // see + if session.Initialized() && s.capabilities.resources != nil && s.capabilities.resources.listChanged { + // Send notification only to this session + if err := s.SendNotificationToSpecificClient(sessionID, "notifications/resources/list_changed", nil); err != nil { + // Log the error but don't fail the operation + // The resources were successfully added, but notification failed + if s.hooks != nil && len(s.hooks.OnError) > 0 { + hooks := s.hooks + go func(sID string, hooks *Hooks) { + ctx := context.Background() + hooks.onError(ctx, nil, "notification", map[string]any{ + "method": "notifications/resources/list_changed", + "sessionID": sID, + }, fmt.Errorf("failed to send notification after adding resources: %w", err)) + }(sessionID, hooks) + } + } + } + + return nil +} + +// DeleteSessionResources removes resources from a specific session +func (s *MCPServer) DeleteSessionResources(sessionID string, uris ...string) error { + sessionValue, ok := s.sessions.Load(sessionID) + if !ok { + return ErrSessionNotFound + } + + session, ok := sessionValue.(SessionWithResources) + if !ok { + return ErrSessionDoesNotSupportResources + } + + // Get existing resources (this should return a thread-safe copy) + sessionResources := session.GetSessionResources() + if sessionResources == nil { + return nil + } + + // Create a new map to avoid concurrent modification issues + newSessionResources := make(map[string]ServerResource, len(sessionResources)) + + // Copy existing resources except those being deleted + for k, v := range sessionResources { + newSessionResources[k] = v + } + + // Remove specified resources and track if anything was actually deleted + actuallyDeleted := false + for _, uri := range uris { + if _, exists := newSessionResources[uri]; exists { + delete(newSessionResources, uri) + actuallyDeleted = true + } + } + + // Skip no-op write if nothing was actually deleted + if !actuallyDeleted { + return nil + } + + // Set the resources (this should be thread-safe) + session.SetSessionResources(newSessionResources) + + // It only makes sense to send resource notifications to initialized sessions -- + // if we're not initialized yet the client can't possibly have sent their + // initial resources/list message. + // + // For initialized sessions, honor resources.listChanged, which is specifically + // about whether notifications will be sent or not. + // see + // Only send notification if something was actually deleted + if actuallyDeleted && session.Initialized() && s.capabilities.resources != nil && s.capabilities.resources.listChanged { + // Send notification only to this session + if err := s.SendNotificationToSpecificClient(sessionID, "notifications/resources/list_changed", nil); err != nil { + // Log the error but don't fail the operation + // The resources were successfully deleted, but notification failed + if s.hooks != nil && len(s.hooks.OnError) > 0 { + hooks := s.hooks + go func(sID string, hooks *Hooks) { + ctx := context.Background() + hooks.onError(ctx, nil, "notification", map[string]any{ + "method": "notifications/resources/list_changed", + "sessionID": sID, + }, fmt.Errorf("failed to send notification after deleting resources: %w", err)) + }(sessionID, hooks) + } + } + } + + return nil +} diff --git a/vendor/github.com/mark3labs/mcp-go/server/sse.go b/vendor/github.com/mark3labs/mcp-go/server/sse.go index 9c9766cf..250141ce 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/sse.go +++ b/vendor/github.com/mark3labs/mcp-go/server/sse.go @@ -29,6 +29,7 @@ type sseSession struct { initialized atomic.Bool loggingLevel atomic.Value tools sync.Map // stores session-specific tools + resources sync.Map // stores session-specific resources clientInfo atomic.Value // stores session-specific client info clientCapabilities atomic.Value // stores session-specific client capabilities } @@ -75,6 +76,27 @@ func (s *sseSession) GetLogLevel() mcp.LoggingLevel { return level.(mcp.LoggingLevel) } +func (s *sseSession) GetSessionResources() map[string]ServerResource { + resources := make(map[string]ServerResource) + s.resources.Range(func(key, value any) bool { + if resource, ok := value.(ServerResource); ok { + resources[key.(string)] = resource + } + return true + }) + return resources +} + +func (s *sseSession) SetSessionResources(resources map[string]ServerResource) { + // Clear existing resources + s.resources.Clear() + + // Set new resources + for name, resource := range resources { + s.resources.Store(name, resource) + } +} + func (s *sseSession) GetSessionTools() map[string]ServerTool { tools := make(map[string]ServerTool) s.tools.Range(func(key, value any) bool { @@ -125,6 +147,7 @@ func (s *sseSession) GetClientCapabilities() mcp.ClientCapabilities { var ( _ ClientSession = (*sseSession)(nil) _ SessionWithTools = (*sseSession)(nil) + _ SessionWithResources = (*sseSession)(nil) _ SessionWithLogging = (*sseSession)(nil) _ SessionWithClientInfo = (*sseSession)(nil) ) diff --git a/vendor/github.com/mark3labs/mcp-go/server/stdio.go b/vendor/github.com/mark3labs/mcp-go/server/stdio.go index d80941c3..80131f06 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/stdio.go +++ b/vendor/github.com/mark3labs/mcp-go/server/stdio.go @@ -606,7 +606,18 @@ func (s *stdioSession) handleSamplingResponse(rawMessage json.RawMessage) bool { if err := json.Unmarshal(response.Result, &result); err != nil { samplingResp.err = fmt.Errorf("failed to unmarshal sampling response: %w", err) } else { - samplingResp.result = &result + // Parse content from map[string]any to proper Content type (TextContent, ImageContent, AudioContent) + if contentMap, ok := result.Content.(map[string]any); ok { + content, err := mcp.ParseContent(contentMap) + if err != nil { + samplingResp.err = fmt.Errorf("failed to parse sampling response content: %w", err) + } else { + result.Content = content + samplingResp.result = &result + } + } else { + samplingResp.result = &result + } } } diff --git a/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go b/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go index 056dc876..8af6f147 100644 --- a/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go +++ b/vendor/github.com/mark3labs/mcp-go/server/streamable_http.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "io" + "maps" "mime" "net/http" "net/http/httptest" @@ -71,6 +72,16 @@ func WithHeartbeatInterval(interval time.Duration) StreamableHTTPOption { } } +// WithDisableStreaming prevents the server from responding to GET requests with +// a streaming response. Instead, it will respond with a 405 Method Not Allowed status. +// This can be useful in scenarios where streaming is not desired or supported. +// The default is false, meaning streaming is enabled. +func WithDisableStreaming(disable bool) StreamableHTTPOption { + return func(s *StreamableHTTPServer) { + s.disableStreaming = disable + } +} + // WithHTTPContextFunc sets a function that will be called to customise the context // to the server using the incoming request. // This can be used to inject context values from headers, for example. @@ -131,6 +142,7 @@ func WithTLSCert(certFile, keyFile string) StreamableHTTPOption { type StreamableHTTPServer struct { server *MCPServer sessionTools *sessionToolsStore + sessionResources *sessionResourcesStore sessionRequestIDs sync.Map // sessionId --> last requestID(*atomic.Int64) activeSessions sync.Map // sessionId --> *streamableHttpSession (for sampling responses) @@ -143,6 +155,7 @@ type StreamableHTTPServer struct { listenHeartbeatInterval time.Duration logger util.Logger sessionLogLevels *sessionLogLevelsStore + disableStreaming bool tlsCertFile string tlsKeyFile string @@ -157,6 +170,7 @@ func NewStreamableHTTPServer(server *MCPServer, opts ...StreamableHTTPOption) *S endpointPath: "/mcp", sessionIdManager: &InsecureStatefulSessionIdManager{}, logger: util.DefaultLogger(), + sessionResources: newSessionResourcesStore(), } // Apply all options @@ -309,7 +323,30 @@ func (s *StreamableHTTPServer) handlePost(w http.ResponseWriter, r *http.Request } } - session := newStreamableHttpSession(sessionID, s.sessionTools, s.sessionLogLevels) + // For non-initialize requests, try to reuse existing registered session + var session *streamableHttpSession + if !isInitializeRequest { + if sessionValue, ok := s.server.sessions.Load(sessionID); ok { + if existingSession, ok := sessionValue.(*streamableHttpSession); ok { + session = existingSession + } + } + } + + // Check if a persistent session exists (for sampling support), otherwise create ephemeral session + // Persistent sessions are created by GET (continuous listening) connections + if session == nil { + if sessionInterface, exists := s.activeSessions.Load(sessionID); exists { + if persistentSession, ok := sessionInterface.(*streamableHttpSession); ok { + session = persistentSession + } + } + } + + // Create ephemeral session if no persistent session exists + if session == nil { + session = newStreamableHttpSession(sessionID, s.sessionTools, s.sessionResources, s.sessionLogLevels) + } // Set the client context before handling the message ctx := s.server.WithContext(r.Context(), session) @@ -405,11 +442,31 @@ func (s *StreamableHTTPServer) handlePost(w http.ResponseWriter, r *http.Request s.logger.Errorf("Failed to write response: %v", err) } } + + // Register session after successful initialization + // Only register if not already registered (e.g., by a GET connection) + if isInitializeRequest && sessionID != "" { + if _, exists := s.server.sessions.Load(sessionID); !exists { + // Store in activeSessions to prevent duplicate registration from GET + s.activeSessions.Store(sessionID, session) + // Register the session with the MCPServer for notification support + if err := s.server.RegisterSession(ctx, session); err != nil { + s.logger.Errorf("Failed to register POST session: %v", err) + s.activeSessions.Delete(sessionID) + // Don't fail the request, just log the error + } + } + } } func (s *StreamableHTTPServer) handleGet(w http.ResponseWriter, r *http.Request) { // get request is for listening to notifications // https://modelcontextprotocol.io/specification/2025-03-26/basic/transports#listening-for-messages-from-the-server + if s.disableStreaming { + s.logger.Infof("Rejected GET request: streaming is disabled (session: %s)", r.Header.Get(HeaderKeySessionID)) + http.Error(w, "Streaming is disabled on this server", http.StatusMethodNotAllowed) + return + } sessionID := r.Header.Get(HeaderKeySessionID) // the specification didn't say we should validate the session id @@ -420,16 +477,23 @@ func (s *StreamableHTTPServer) handleGet(w http.ResponseWriter, r *http.Request) sessionID = uuid.New().String() } - session := newStreamableHttpSession(sessionID, s.sessionTools, s.sessionLogLevels) - if err := s.server.RegisterSession(r.Context(), session); err != nil { - http.Error(w, fmt.Sprintf("Session registration failed: %v", err), http.StatusBadRequest) - return + // Get or create session atomically to prevent TOCTOU races + // where concurrent GETs could both create and register duplicate sessions + var session *streamableHttpSession + newSession := newStreamableHttpSession(sessionID, s.sessionTools, s.sessionResources, s.sessionLogLevels) + actual, loaded := s.activeSessions.LoadOrStore(sessionID, newSession) + session = actual.(*streamableHttpSession) + + if !loaded { + // We created a new session, need to register it + if err := s.server.RegisterSession(r.Context(), session); err != nil { + s.activeSessions.Delete(sessionID) + http.Error(w, fmt.Sprintf("Session registration failed: %v", err), http.StatusBadRequest) + return + } + defer s.server.UnregisterSession(r.Context(), sessionID) + defer s.activeSessions.Delete(sessionID) } - defer s.server.UnregisterSession(r.Context(), sessionID) - - // Register session for sampling response delivery - s.activeSessions.Store(sessionID, session) - defer s.activeSessions.Delete(sessionID) // Set the client context before handling the message w.Header().Set("Content-Type", "text/event-stream") @@ -557,6 +621,7 @@ func (s *StreamableHTTPServer) handleDelete(w http.ResponseWriter, r *http.Reque // remove the session relateddata from the sessionToolsStore s.sessionTools.delete(sessionID) + s.sessionResources.delete(sessionID) s.sessionLogLevels.delete(sessionID) // remove current session's requstID information s.sessionRequestIDs.Delete(sessionID) @@ -626,7 +691,8 @@ func (s *StreamableHTTPServer) handleSamplingResponse(w http.ResponseWriter, r * response.err = fmt.Errorf("sampling error %d: %s", jsonrpcError.Code, jsonrpcError.Message) } } else if responseMessage.Result != nil { - // Parse result + // Store the result to be unmarshaled later + response.result = responseMessage.Result } else { response.err = fmt.Errorf("sampling response has neither result nor error") } @@ -735,6 +801,39 @@ func (s *sessionLogLevelsStore) delete(sessionID string) { delete(s.logs, sessionID) } +type sessionResourcesStore struct { + mu sync.RWMutex + resources map[string]map[string]ServerResource // sessionID -> resourceURI -> resource +} + +func newSessionResourcesStore() *sessionResourcesStore { + return &sessionResourcesStore{ + resources: make(map[string]map[string]ServerResource), + } +} + +func (s *sessionResourcesStore) get(sessionID string) map[string]ServerResource { + s.mu.RLock() + defer s.mu.RUnlock() + cloned := make(map[string]ServerResource, len(s.resources[sessionID])) + maps.Copy(cloned, s.resources[sessionID]) + return cloned +} + +func (s *sessionResourcesStore) set(sessionID string, resources map[string]ServerResource) { + s.mu.Lock() + defer s.mu.Unlock() + cloned := make(map[string]ServerResource, len(resources)) + maps.Copy(cloned, resources) + s.resources[sessionID] = cloned +} + +func (s *sessionResourcesStore) delete(sessionID string) { + s.mu.Lock() + defer s.mu.Unlock() + delete(s.resources, sessionID) +} + type sessionToolsStore struct { mu sync.RWMutex tools map[string]map[string]ServerTool // sessionID -> toolName -> tool @@ -749,13 +848,17 @@ func newSessionToolsStore() *sessionToolsStore { func (s *sessionToolsStore) get(sessionID string) map[string]ServerTool { s.mu.RLock() defer s.mu.RUnlock() - return s.tools[sessionID] + cloned := make(map[string]ServerTool, len(s.tools[sessionID])) + maps.Copy(cloned, s.tools[sessionID]) + return cloned } func (s *sessionToolsStore) set(sessionID string, tools map[string]ServerTool) { s.mu.Lock() defer s.mu.Unlock() - s.tools[sessionID] = tools + cloned := make(map[string]ServerTool, len(tools)) + maps.Copy(cloned, tools) + s.tools[sessionID] = cloned } func (s *sessionToolsStore) delete(sessionID string) { @@ -791,6 +894,7 @@ type streamableHttpSession struct { sessionID string notificationChannel chan mcp.JSONRPCNotification // server -> client notifications tools *sessionToolsStore + resources *sessionResourcesStore upgradeToSSE atomic.Bool logLevels *sessionLogLevelsStore @@ -802,11 +906,12 @@ type streamableHttpSession struct { requestIDCounter atomic.Int64 // for generating unique request IDs } -func newStreamableHttpSession(sessionID string, toolStore *sessionToolsStore, levels *sessionLogLevelsStore) *streamableHttpSession { +func newStreamableHttpSession(sessionID string, toolStore *sessionToolsStore, resourcesStore *sessionResourcesStore, levels *sessionLogLevelsStore) *streamableHttpSession { s := &streamableHttpSession{ sessionID: sessionID, notificationChannel: make(chan mcp.JSONRPCNotification, 100), tools: toolStore, + resources: resourcesStore, logLevels: levels, samplingRequestChan: make(chan samplingRequestItem, 10), elicitationRequestChan: make(chan elicitationRequestItem, 10), @@ -850,9 +955,18 @@ func (s *streamableHttpSession) SetSessionTools(tools map[string]ServerTool) { s.tools.set(s.sessionID, tools) } +func (s *streamableHttpSession) GetSessionResources() map[string]ServerResource { + return s.resources.get(s.sessionID) +} + +func (s *streamableHttpSession) SetSessionResources(resources map[string]ServerResource) { + s.resources.set(s.sessionID, resources) +} + var ( - _ SessionWithTools = (*streamableHttpSession)(nil) - _ SessionWithLogging = (*streamableHttpSession)(nil) + _ SessionWithTools = (*streamableHttpSession)(nil) + _ SessionWithResources = (*streamableHttpSession)(nil) + _ SessionWithLogging = (*streamableHttpSession)(nil) ) func (s *streamableHttpSession) UpgradeToSSEWhenReceiveNotification() { @@ -900,6 +1014,17 @@ func (s *streamableHttpSession) RequestSampling(ctx context.Context, request mcp if err := json.Unmarshal(response.result, &result); err != nil { return nil, fmt.Errorf("failed to unmarshal sampling response: %v", err) } + + // Parse content from map[string]any to proper Content type (TextContent, ImageContent, AudioContent) + // HTTP transport unmarshals Content as map[string]any, we need to convert it to the proper type + if contentMap, ok := result.Content.(map[string]any); ok { + content, err := mcp.ParseContent(contentMap) + if err != nil { + return nil, fmt.Errorf("failed to parse sampling response content: %w", err) + } + result.Content = content + } + return &result, nil case <-ctx.Done(): return nil, ctx.Err() @@ -984,29 +1109,47 @@ func (s *StatelessSessionIdManager) Terminate(sessionID string) (isNotAllowed bo return false, nil } -// InsecureStatefulSessionIdManager generate id with uuid -// It won't validate the id indeed, so it could be fake. +// InsecureStatefulSessionIdManager generate id with uuid and tracks active sessions. +// It validates both format and existence of session IDs. // For more secure session id, use a more complex generator, like a JWT. -type InsecureStatefulSessionIdManager struct{} +type InsecureStatefulSessionIdManager struct { + sessions sync.Map + terminated sync.Map +} const idPrefix = "mcp-session-" func (s *InsecureStatefulSessionIdManager) Generate() string { - return idPrefix + uuid.New().String() + sessionID := idPrefix + uuid.New().String() + s.sessions.Store(sessionID, true) + return sessionID } func (s *InsecureStatefulSessionIdManager) Validate(sessionID string) (isTerminated bool, err error) { - // validate the session id is a valid uuid if !strings.HasPrefix(sessionID, idPrefix) { return false, fmt.Errorf("invalid session id: %s", sessionID) } if _, err := uuid.Parse(sessionID[len(idPrefix):]); err != nil { return false, fmt.Errorf("invalid session id: %s", sessionID) } + if _, exists := s.terminated.Load(sessionID); exists { + return true, nil + } + if _, exists := s.sessions.Load(sessionID); !exists { + return false, fmt.Errorf("session not found: %s", sessionID) + } return false, nil } func (s *InsecureStatefulSessionIdManager) Terminate(sessionID string) (isNotAllowed bool, err error) { + if _, exists := s.terminated.Load(sessionID); exists { + return false, nil + } + if _, exists := s.sessions.Load(sessionID); !exists { + return false, nil + } + s.terminated.Store(sessionID, true) + s.sessions.Delete(sessionID) return false, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 881ec391..a8d920a9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -228,8 +228,8 @@ github.com/liggitt/tabwriter github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mark3labs/mcp-go v0.41.1 -## explicit; go 1.23 +# github.com/mark3labs/mcp-go v0.42.0 +## explicit; go 1.23.0 github.com/mark3labs/mcp-go/client github.com/mark3labs/mcp-go/client/transport github.com/mark3labs/mcp-go/mcp