Skip to content

fix error removing policy from policy engine if no policies for existing deployment. #692

fix error removing policy from policy engine if no policies for existing deployment.

fix error removing policy from policy engine if no policies for existing deployment. #692

name: Operator Integration Test
on:
workflow_dispatch:
pull_request:
branches:
- main
paths:
- 'gateway/**'
- 'kubernetes/**'
- '.github/workflows/operator-integration-test.yml'
env:
DOCKER_REGISTRY: localhost
VERSION: test
KIND_CLUSTER_NAME: operator-test
jobs:
integration-test:
runs-on: ubuntu-24.04
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.25'
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
platforms: linux/amd64,linux/arm64
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
driver: docker
- name: Create Kind cluster
uses: helm/kind-action@v1
with:
cluster_name: ${{ env.KIND_CLUSTER_NAME }}
wait: 300s
- name: Build Gateway
run: |
cd gateway
export DOCKER_REGISTRY=${{ env.DOCKER_REGISTRY }}
make build VERSION=${{ env.VERSION }}
- name: Build Operator image
run: |
cd kubernetes/gateway-operator
IMG=${{ env.DOCKER_REGISTRY }}/gateway-operator:${{ env.VERSION }} make docker-build
- name: Build Mock JWKS image
run: |
cd tests/mock-servers/mock-jwks
docker build -t ${{ env.DOCKER_REGISTRY }}/mock-jwks:${{ env.VERSION }} .
- name: Load images into Kind
run: |
kind load docker-image ${{ env.DOCKER_REGISTRY }}/gateway-controller:${{ env.VERSION }} --name ${{ env.KIND_CLUSTER_NAME }}
kind load docker-image ${{ env.DOCKER_REGISTRY }}/gateway-runtime:${{ env.VERSION }} --name ${{ env.KIND_CLUSTER_NAME }}
kind load docker-image ${{ env.DOCKER_REGISTRY }}/gateway-operator:${{ env.VERSION }} --name ${{ env.KIND_CLUSTER_NAME }}
kind load docker-image ${{ env.DOCKER_REGISTRY }}/mock-jwks:${{ env.VERSION }} --name ${{ env.KIND_CLUSTER_NAME }}
- name: Deploy OCI Registry (HTTP)
run: |
# Create namespace
kubectl create namespace registry
# Deploy registry without TLS (plain HTTP for testing)
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry
spec:
replicas: 1
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
---
apiVersion: v1
kind: Service
metadata:
name: registry
namespace: registry
spec:
selector:
app: registry
ports:
- port: 5000
targetPort: 5000
EOF
kubectl wait --for=condition=available deployment/registry -n registry --timeout=120s
kubectl wait --for=condition=ready pod -l app=registry -n registry --timeout=120s
- name: Package and push Gateway Helm chart to OCI Registry
run: |
cd kubernetes/helm/gateway-helm-chart
helm package . --version 0.0.0-test
# Port forward registry to push chart (plain HTTP)
kubectl port-forward svc/registry -n registry 5000:5000 &
sleep 5
# Push chart to OCI registry (plain HTTP, use insecure flag)
helm push gateway-0.0.0-test.tgz oci://localhost:5000/charts --plain-http
# Kill port forward
pkill -f "kubectl port-forward.*registry" || true
- name: Deploy mock httpbin backend
run: |
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: httpbin
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: httpbin
template:
metadata:
labels:
app: httpbin
spec:
containers:
- name: httpbin
image: kennethreitz/httpbin:latest
ports:
- containerPort: 80
---
apiVersion: v1
kind: Service
metadata:
name: httpbin
namespace: default
spec:
selector:
app: httpbin
ports:
- port: 80
targetPort: 80
EOF
kubectl wait --for=condition=ready pod -l app=httpbin --timeout=120s
- name: Deploy Mock JWKS Service
run: |
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: mock-jwks
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: mock-jwks
template:
metadata:
labels:
app: mock-jwks
spec:
containers:
- name: mock-jwks
image: ${{ env.DOCKER_REGISTRY }}/mock-jwks:${{ env.VERSION }}
ports:
- containerPort: 8080
---
apiVersion: v1
kind: Service
metadata:
name: mock-jwks
namespace: default
spec:
selector:
app: mock-jwks
ports:
- port: 8080
targetPort: 8080
EOF
kubectl wait --for=condition=ready pod -l app=mock-jwks --timeout=120s
- name: Install cert-manager
run: |
helm upgrade --install cert-manager oci://quay.io/jetstack/charts/cert-manager \
--version v1.17.2 \
--namespace cert-manager \
--create-namespace \
--set crds.enabled=true \
--wait --timeout 5m
- name: Install Operator Helm chart
run: |
helm upgrade --install gateway-operator ./kubernetes/helm/operator-helm-chart \
--namespace operator \
--create-namespace \
--set image.repository=${{ env.DOCKER_REGISTRY }}/gateway-operator \
--set image.tag=${{ env.VERSION }} \
--set image.pullPolicy=Never \
--set gateway.helm.chartName=oci://registry.registry.svc.cluster.local:5000/charts/gateway \
--set gateway.helm.chartVersion=0.0.0-test \
--set gateway.helm.plainHTTP=true \
--wait --timeout 5m
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gateway-operator -n operator --timeout=120s
- name: Create test APIGateway
run: |
# Create ConfigMap with gateway values pointing to test images
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: ConfigMap
metadata:
name: test-gateway-config
namespace: default
data:
values.yaml: |
# Default values for the Open Choreo Gateway chart.
# This file is intended to be exhaustive and well documented; adjust per environment.
nameOverride: ""
fullnameOverride: ""
imagePullSecrets: []
commonLabels: {}
commonAnnotations: {}
serviceAccount:
create: true
annotations: {}
name: ""
## moved to each component's deployment block below
# Component-level configuration
gateway:
# Shared configuration for gateway-controller and policy-engine
# This config is mounted to both components as /configs/config.yaml
config:
gateway_controller:
auth:
# Local basic authentication configuration
basic:
enabled: true
# Local users
users:
# Example user entries:
# - username: "user1"
# password: "$argon2id.." # encrypted hash or plain string depending on implementation
# password_hashed: true
# roles: ["developer", "viewer"]
- username: "admin"
password: "admin"
password_hashed: false
roles: ["admin"]
# Server configuration
server:
# REST API port for gateway management
api_port: 9090
# xDS gRPC port for Envoy communication
xds_port: 18000
# Graceful shutdown timeout
shutdown_timeout: 15s
# Policy xDS Server configuration
policyserver:
# Enable or disable the policy xDS server
enabled: true
# Policy xDS gRPC port for policy distribution
port: 18001
# TLS configuration for secure policy xDS communication
tls:
# Enable or disable TLS
enabled: false
# Path to TLS certificate file (required if TLS is enabled)
cert_file: "./certs/server.crt"
# Path to TLS private key file (required if TLS is enabled)
key_file: "./certs/server.key"
# Storage configuration
storage:
# Storage type: "sqlite", "postgres" (future), or "memory"
# - sqlite: Use SQLite embedded database for persistence
# - postgres: Use PostgreSQL database for persistence (future support)
# - memory: No persistent storage, all configs lost on restart (useful for testing)
type: sqlite
# SQLite configuration (used when type=sqlite)
sqlite:
path: ./data/gateway.db
# Policy configuration
policies:
# Directory containing policy definitions
definitions_path: ./default-policies
# Router (Envoy) configuration
router:
# Gateway host for incoming requests
gateway_host: "*"
# Access logs configuration
access_logs:
# Enable or disable access logs
enabled: true
# Log format: "json" or "text"
# - json: Structured JSON format (recommended for log aggregation)
# - text: Human-readable text format
format: json
# JSON format fields - key-value pairs for structured logging
# Uses Envoy command operators: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage
json_fields:
start_time: "%START_TIME%"
method: "%REQ(:METHOD)%"
path: "%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%"
protocol: "%PROTOCOL%"
response_code: "%RESPONSE_CODE%"
response_flags: "%RESPONSE_FLAGS%"
response_flags_long: "%RESPONSE_FLAGS_LONG%"
bytes_received: "%BYTES_RECEIVED%"
bytes_sent: "%BYTES_SENT%"
duration: "%DURATION%"
upstream_service_time: "%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)%"
x_forwarded_for: "%REQ(X-FORWARDED-FOR)%"
user_agent: "%REQ(USER-AGENT)%"
request_id: "%REQ(X-REQUEST-ID)%"
authority: "%REQ(:AUTHORITY)%"
upstream_host: "%UPSTREAM_HOST%"
upstream_cluster: "%UPSTREAM_CLUSTER%"
# Text format template - used when format is "text"
# Uses Envoy command operators: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log/usage
text_format: |
[%START_TIME%] "%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% %PROTOCOL%" %RESPONSE_CODE% %RESPONSE_FLAGS% %BYTES_RECEIVED% %BYTES_SENT% %DURATION% "%REQ(X-FORWARDED-FOR)%" "%REQ(USER-AGENT)%" "%REQ(X-REQUEST-ID)%" "%REQ(:AUTHORITY)%" "%UPSTREAM_HOST%"
# Listener port for incoming HTTP traffic (Envoy proxy port)
listener_port: 8080
# HTTPS listener configuration
https_enabled: true # Enable/disable HTTPS listener
https_port: 8443 # HTTPS listener port
# Downstream TLS configuration (for HTTPS listener)
downstream_tls:
# Path to server certificate (PEM format)
cert_path: "./listener-certs/default-listener.crt"
# Path to server private key (PEM format)
key_path: "./listener-certs/default-listener.key"
# Minimum TLS protocol version (TLS1_0, TLS1_1, TLS1_2, TLS1_3)
minimum_protocol_version: TLS1_2
# Maximum TLS protocol version (TLS1_0, TLS1_1, TLS1_2, TLS1_3)
maximum_protocol_version: TLS1_3
# Cipher suites (comma-separated)
ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,AES128-GCM-SHA256,AES128-SHA,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,AES256-GCM-SHA384,AES256-SHA"
# Upstream configuration (TLS and timeouts for Envoy upstream)
upstream:
# TLS configuration for upstream connections
tls:
minimum_protocol_version: TLS1_2
maximum_protocol_version: TLS1_3
ciphers: "ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,AES128-GCM-SHA256,AES128-SHA,ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,AES256-GCM-SHA384,AES256-SHA"
trusted_cert_path: /etc/ssl/certs/ca-certificates.crt
custom_certs_path: ./certificates # Directory containing custom trusted certificates (e.g., self-signed certs)
verify_host_name: true
disable_ssl_verification: false
# Timeout configurations for upstream connections (milliseconds)
timeouts:
route_timeout_in_ms: 60000
route_idle_timeout_in_ms: 300000
connect_timeout_in_ms: 5000
# Policy Engine ext_proc filter configuration
policy_engine:
# Enable or disable policy engine ext_proc filter
enabled: true
# Policy engine host (hostname or IP address)
host: policy-engine
# Policy engine ext_proc port
port: 9001
# Timeout for gRPC service connection (in milliseconds)
timeout_ms: 60000
# Failure mode: false = fail closed (deny requests on error), true = fail open (allow requests on error)
failure_mode_allow: false
# Route cache action: DEFAULT, RETAIN, or CLEAR
# RETAIN: Maintain the route cache across requests (recommended for performance)
route_cache_action: RETAIN
# Allow per-route override of ext_proc configuration
allow_mode_override: true
# Request header processing mode: DEFAULT, SEND, or SKIP
# SEND: Forward request headers to policy engine for processing
request_header_mode: SEND
# Message timeout for policy engine processing (in milliseconds)
message_timeout_ms: 60000
# TLS configuration for policy engine connection
tls:
# Enable TLS for secure communication with policy engine
enabled: false
# Client certificate path (for mutual TLS authentication)
cert_path: ""
# Client private key path (for mutual TLS authentication)
key_path: ""
# CA certificate path (for server certificate validation)
ca_path: ""
# Server name for SNI (optional, defaults to host)
server_name: ""
# Skip server certificate verification (insecure, development only)
skip_verify: false
# Logging configuration
logging:
# Log level: "debug", "info", "warn", or "error"
level: debug
# Log format: "json" or "text"
# - json: Structured JSON format (recommended for production)
# - text: Human-readable text format (recommended for development)
format: json
policy_engine:
server:
# Port for ext_proc gRPC server (receives requests from Envoy)
extproc_port: 9001
# Admin HTTP server configuration
admin:
# Enable admin HTTP server for debugging endpoints
enabled: true
# Port for admin HTTP server
port: 9002
# IP addresses allowed to access the admin API
# Defaults to localhost only for security
allowed_ips:
- "*"
- "127.0.0.1"
config_mode:
# Configuration mode: "file" or "xds"
# - file: Load policy chains from static YAML file
# - xds: Subscribe to xDS server for dynamic configuration updates
mode: xds
# xDS client configuration
xds:
# Enable xDS client
enabled: true
# xDS server address (typically the gateway-controller xDS server)
server_address: gateway-controller:18001
# Node identifier for this policy engine instance
# Used by xDS server to identify this client
node_id: policy-engine-1
# Cluster identifier for this policy engine instance
# Used for grouping multiple policy engine instances
cluster: policy-engine-cluster
# Connection timeout for establishing initial connection
connect_timeout: 10s
# Request timeout for individual xDS requests
request_timeout: 5s
# Initial delay before reconnecting after connection failure
initial_reconnect_delay: 1s
# Maximum delay between reconnection attempts (exponential backoff)
max_reconnect_delay: 60s
# TLS configuration for xDS connection
tls:
enabled: false
# cert_path: /path/to/client-cert.pem
# key_path: /path/to/client-key.pem
# ca_path: /path/to/ca-cert.pem
# File-based configuration (not used in xDS mode)
file_config:
path: ""
# Logging configuration
logging:
# Log level: debug, info, warn, error
level: debug
# Log format: json, text
format: json
policy_configurations:
jwtauth_v010:
keymanagers:
- name: WSO2KeyManager1
issuer: https://api.asgardeo.io/t/tharsanan1995/oauth2/token
jwks:
remote:
uri: https://api.asgardeo.io/t/tharsanan1995/oauth2/jwks
skipTlsVerify: false
- name: WSO2KeyManager2
issuer: https://api.asgardeo.io/t/testorgforsecurity/oauth2/token
jwks:
remote:
uri: https://api.asgardeo.io/t/testorgforsecurity/oauth2/jwks
skipTlsVerify: false
- name: MockKeyManager
issuer: http://mock-jwks.default.svc.cluster.local:8080/token
jwks:
remote:
uri: http://mock-jwks.default.svc.cluster.local:8080/jwks
skipTlsVerify: true
jwkscachettl: "5m"
jwksfetchtimeout: "5s"
jwksfetchretrycount: 3
jwksfetchretryinterval: "2s"
allowedalgorithms:
- RS256
- ES256
leeway: "30s"
authheaderscheme: Bearer
headername: Authorization
onfailurestatuscode: 401
errormessageformat: json
errormessage: "Authentication failed."
validateissuer: true
# metadata for the generated shared ConfigMap (annotations / labels)
configMap:
annotations: {}
labels: {}
controller:
image:
repository: localhost/gateway-controller
tag: test
pullPolicy: Never
imagePullSecrets: []
service:
type: ClusterIP
annotations: {}
labels: {}
ports:
rest: 9090
xds: 18000
policy: 18001
controlPlane:
host: host.docker.internal
port: 8443
token:
value: ""
secretName: ""
key: token
# TLS certificate configuration for downstream (listener) HTTPS
tls:
# Enable TLS certificate management
enabled: true
# Certificate provisioning method: "cert-manager", "secret", or "none"
# - cert-manager: Use cert-manager to automatically provision certificates
# - secret: Use an existing Kubernetes secret containing tls.crt and tls.key
# - none: No TLS configuration (certificates must be provided manually)
certificateProvider: cert-manager
# cert-manager configuration (used when certificateProvider=cert-manager)
certManager:
# Create a Certificate resource
create: true
# Create a self-signed Issuer automatically
# If true, a self-signed Issuer will be created with the release name
# If false, you must provide an existing Issuer/ClusterIssuer via issuerRef
createIssuer: true
# Issuer reference for cert-manager
# Note: If createIssuer=true, the 'name' field is ignored and the chart generates a unique name
# Only set this if createIssuer=false to reference an existing Issuer/ClusterIssuer
issuerRef:
name: selfsigned-issuer # Ignored when createIssuer=true
# Use "Issuer" for namespace-scoped issuer (recommended)
# Use "ClusterIssuer" for cluster-wide issuer
kind: Issuer
# group: cert-manager.io # Optional, defaults to cert-manager.io
# Certificate common name
commonName: localhost
# DNS names for the certificate
dnsNames:
- localhost
- "*.localhost"
# Certificate duration (default: 2160h = 90 days)
duration: 2160h
# Certificate renewal time (default: 720h = 30 days before expiry)
renewBefore: 720h
# Secret configuration (used when certificateProvider=secret)
secret:
# Name of existing secret containing tls.crt and tls.key
name: gateway-tls
# Key names in the secret
certKey: tls.crt
keyKey: tls.key
# Upstream certificate configuration for backend TLS verification
upstreamCerts:
# Enable custom upstream certificates
enabled: false
# Existing secret containing custom CA certificates
# Secret should contain one or more keys with PEM-encoded certificates
secretName: ""
# Existing ConfigMap containing custom CA certificates
# ConfigMap should contain one or more keys with PEM-encoded certificates
configMapName: ""
logging:
level: info
storage:
type: sqlite
sqlitePath: ./data/gateway.db
persistence:
enabled: true
existingClaim: ""
accessModes:
- ReadWriteOnce
size: 100Mi
storageClass: ""
deployment:
enabled: true
replicaCount: 1
volumeMountPath: /app/data
extraEnv: []
env:
xdsServerAddress: ""
extraVolumeMounts: []
extraVolumes: []
labels: {}
annotations: {}
podAnnotations: {}
podLabels: {}
priorityClassName: ""
livenessProbe:
httpGet:
path: /health
port: rest
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: rest
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
# Resource limits and requests
# Uncomment and adjust based on your workload
resources: {}
# limits:
# cpu: 500m
# memory: 512Mi
# requests:
# cpu: 250m
# memory: 256Mi
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
gatewayRuntime:
image:
repository: localhost/gateway-runtime
tag: "test"
pullPolicy: Never
imagePullSecrets: []
service:
type: ClusterIP
annotations: {}
labels: {}
ports:
http: 8080
https: 8443
envoyAdmin: 9901
policyEngineAdmin: 9002
policyEngineMetrics: 9003
deployment:
enabled: true
replicaCount: 1
extraEnv: []
labels: {}
annotations: {}
podAnnotations: {}
podLabels: {}
priorityClassName: ""
livenessProbe:
httpGet:
path: /server_info
port: envoy-admin
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /server_info
port: envoy-admin
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 6
resources: {}
podSecurityContext: {}
securityContext: {}
nodeSelector: {}
tolerations: []
affinity: {}
EOF
# Create Gateway CR
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: APIGateway
metadata:
name: test-gateway
namespace: default
spec:
apiSelector:
scope: Cluster
configRef:
name: test-gateway-config
EOF
- name: Wait for APIGateway to be ready
run: |
echo "Waiting for APIGateway to be programmed..."
kubectl wait --for=condition=Programmed apigateway/test-gateway --timeout=180s
echo "APIGateway status:"
kubectl get apigateway test-gateway -o yaml
- name: Create test RestApi
run: |
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: test-api
namespace: default
spec:
displayName: test-api
version: v1.0
context: /test
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
operations:
- method: GET
path: /get
- method: POST
path: /post
EOF
- name: Wait for API to be programmed
run: |
echo "Waiting for RestApi to be programmed..."
kubectl wait --for=condition=Programmed restapi/test-api --timeout=120s
echo "RestApi status:"
kubectl get restapi test-api -o yaml
- name: Test API invocation
run: |
# Port forward the router
sleep 10
kubectl port-forward svc/test-gateway-gateway-gateway-runtime 8080:8080 &
sleep 10
echo "Testing API invocation..."
# Test GET request
response=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/test/get)
echo "GET /test/get returned: $response"
if [ "$response" != "200" ]; then
echo "API test failed with status $response"
curl -v http://localhost:8080/test/get || true
exit 1
fi
echo "API invocation test passed!"
- name: Update test RestApi
run: |
echo "Updating test-api to add PUT /put..."
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: test-api
namespace: default
spec:
displayName: test-api
version: v1.0
context: /test
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
operations:
- method: GET
path: /get
- method: POST
path: /post
- method: PUT
path: /put
EOF
- name: Wait for updated API to be programmed
run: |
echo "Waiting for updated RestApi to be programmed..."
# Give it a few seconds to trigger reconciliation
sleep 10
kubectl wait --for=condition=Programmed restapi/test-api --timeout=120s
echo "RestApi status after update:"
kubectl get restapi test-api -o yaml
- name: Test updated API invocation
run: |
echo "Testing updated API invocation (PUT /test/put)..."
# Test PUT request
response=$(curl -s -o /dev/null -w "%{http_code}" -X PUT http://localhost:8080/test/put)
echo "PUT /test/put returned: $response"
if [ "$response" != "200" ]; then
echo "Updated API test failed with status $response"
curl -v -X PUT http://localhost:8080/test/put || true
exit 1
fi
echo "Updated API invocation test passed!"
- name: Create JWT Protected API
run: |
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: jwt-api
namespace: default
spec:
displayName: jwt-api
version: v1.0
context: /secure
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
policies:
- name: jwt-auth
version: v0
params:
issuers:
- MockKeyManager
operations:
- method: GET
path: /get
EOF
- name: Wait for JWT API to be programmed
run: |
echo "Waiting for JWT API to be programmed..."
kubectl wait --for=condition=Programmed restapi/jwt-api --timeout=120s
echo "RestApi status:"
kubectl get restapi jwt-api -o yaml
- name: Test JWT API invocation
run: |
# Get a token from the mock service (using kubectl exec/curl or port-forward)
# We can use the existing port-forward for the registry if we had one, but we don't for mock-jwks
# Let's port forward mock-jwks temporarily
kubectl port-forward svc/mock-jwks 8081:8080 &
PID=$!
sleep 5
TOKEN=$(curl -s http://localhost:8081/token)
echo "Got token: ${TOKEN:0:20}..."
# Kill port forward
kill $PID || true
# The router port forward is already running on 8080 from previous step
# But we killed it? No, the previous step backgrounded it "kubectl port-forward ... &"
# but usually we should re-establish or ensure it's there.
# The previous step "Test API invocation" started it.
# Let's check if it's still running or just start a new one to be safe on a different port if needed.
# The previous step used 8080. We should probably use that.
echo "Testing JWT API invocation..."
# 1. Test without token (should fail)
status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/secure/get)
echo "GET /secure/get (no token) returned: $status"
if [ "$status" != "401" ]; then
echo "Expected 401, got $status"
exit 1
fi
# 2. Test with token (should pass)
status=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $TOKEN" http://localhost:8080/secure/get)
echo "GET /secure/get (with token) returned: $status"
if [ "$status" != "200" ]; then
echo "Expected 200, got $status"
curl -v -H "Authorization: Bearer $TOKEN" http://localhost:8080/secure/get || true
exit 1
fi
echo "JWT API invocation test passed!"
- name: Update JWT RestApi with dummy issuer
run: |
echo "Updating jwt-api to use DummyKeyManager..."
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: jwt-api
namespace: default
spec:
displayName: jwt-api
version: v1.0
context: /secure
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
policies:
- name: jwt-auth
version: v0
params:
issuers:
- DummyKeyManager
operations:
- method: GET
path: /get
EOF
- name: Wait for updated JWT API to be programmed
run: |
echo "Waiting for updated JWT API to be programmed..."
# Give it a few seconds to trigger reconciliation
sleep 10
kubectl wait --for=condition=Programmed restapi/jwt-api --timeout=120s
echo "RestApi status after update:"
kubectl get restapi jwt-api -o yaml
- name: Test updated JWT API invocation
run: |
# Get a token from the mock service again to be sure
kubectl port-forward svc/mock-jwks 8081:8080 &
PID=$!
sleep 5
TOKEN=$(curl -s http://localhost:8081/token)
kill $PID || true
echo "Testing updated JWT API invocation (should fail with 401)..."
# Test with previously valid token (should now fail because issuer is different)
status=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: Bearer $TOKEN" http://localhost:8080/secure/get)
echo "GET /secure/get (with token) returned: $status"
if [ "$status" != "401" ]; then
echo "Expected 401, got $status. The dummy issuer configuration did not reject the token."
exit 1
fi
echo "Updated JWT API invocation test passed (correctly rejected)!"
- name: Test Gateway Helm Upgrade
run: |
echo "Updating ConfigMap to trigger Helm Upgrade..."
kubectl get configmap test-gateway-config -o jsonpath='{.data.values\.yaml}' > values.yaml
# Replace annotations to trigger upgrade
sed -i 's/annotations: {}/annotations: {helm-upgrade-test: "true"}/g' values.yaml
kubectl create configmap test-gateway-config --from-file=values.yaml=values.yaml --dry-run=client -o yaml | kubectl apply -f -
echo "Waiting for deployment update..."
# Give the operator a moment to detect and start the upgrade
sleep 5
kubectl wait --for=jsonpath='{.metadata.annotations.helm-upgrade-test}=true' deployment/test-gateway-gateway-gateway-runtime --timeout=180s
echo "Deployment metadata updated. Waiting for rollout to complete..."
# Wait for the deployments to finish rolling out to ensure stability
kubectl rollout status deployment/test-gateway-gateway-controller --timeout=300s
kubectl rollout status deployment/test-gateway-gateway-gateway-runtime --timeout=300s
echo "Waiting for Gateway CR to be Programmed..."
kubectl wait --for=condition=Programmed apigateway/test-gateway --timeout=180s
echo "APIGateway rollout complete. Verifying Controller health..."
# Give it a moment to stabilize
sleep 20
# Port forward the new controller to check health
kubectl port-forward svc/test-gateway-gateway-controller 9091:9090 &
PF_PID=$!
sleep 5
HEALTH_STATUS=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:9091/health)
echo "Controller Health Status: $HEALTH_STATUS"
kill $PF_PID || true
if [ "$HEALTH_STATUS" != "200" ]; then
echo "APIGateway Controller is not healthy after upgrade!"
exit 1
fi
echo "Helm upgrade, rollout, and health check verified."
- name: Multi-namespace API test
run: |
# Re-establish port-forward (previous one died during Helm upgrade rollout)
pkill -f "kubectl port-forward.*8080:8080" || true
sleep 2
kubectl port-forward svc/test-gateway-gateway-gateway-runtime 8080:8080 &
sleep 5
echo "Creating namespace test-ns..."
kubectl create namespace test-ns
echo "Creating RestApi in test-ns..."
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: multi-ns-api
namespace: test-ns
spec:
displayName: multi-ns-api
version: v1.0
context: /multi-ns
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
operations:
- method: GET
path: /get
EOF
echo "Waiting for multi-namespace API to be programmed..."
# Give it a few seconds to trigger reconciliation
sleep 10
kubectl wait --for=condition=Programmed restapi/multi-ns-api -n test-ns --timeout=120s
echo "Testing multi-namespace API invocation..."
response=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/multi-ns/get)
echo "GET /multi-ns/get returned: $response"
if [ "$response" != "200" ]; then
echo "Multi-namespace API test failed with status $response"
curl -v http://localhost:8080/multi-ns/get || true
exit 1
fi
echo "Multi-namespace API invocation test passed!"
echo "Cleaning up multi-namespace API..."
kubectl delete restapi multi-ns-api -n test-ns
kubectl delete namespace test-ns
- name: Delete test RestApi
run: |
echo "Deleting RestApi test-api..."
kubectl delete restapi test-api
echo "Waiting for routes to be removed..."
# Give it some time for the operator to sync and Envoy to update
sleep 15
echo "Verifying API is no longer reachable (expecting 404)..."
status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8080/test/get)
echo "GET /test/get returned: $status"
if [ "$status" != "404" ]; then
echo "Expected 404, got $status. API route was not removed correctly."
exit 1
fi
echo "RestApi deletion and route removal verified!"
- name: Delete test APIGateway
run: |
echo "Deleting APIGateway test-gateway..."
kubectl delete apigateway test-gateway
echo "Waiting for resources to be cleaned up..."
# Cleanup can take a bit longer as it involves uninstalling Helm charts
sleep 30
echo "Verifying gateway resources are removed..."
# Check for pods with the gateway labels
pods=$(kubectl get pods -l app.kubernetes.io/instance=test-gateway-gateway --no-headers | wc -l)
echo "Found $pods pods remaining."
if [ "$pods" -ne "0" ]; then
echo "APIGateway pods were not removed!"
kubectl get pods -A
exit 1
fi
# Check for services
services=$(kubectl get svc -l app.kubernetes.io/instance=test-gateway-gateway --no-headers | wc -l)
echo "Found $services services remaining."
if [ "$services" -ne "0" ]; then
echo "APIGateway services were not removed!"
kubectl get svc -A
exit 1
fi
echo "APIGateway deletion and resource cleanup verified!"
- name: Verify Scoped Mode
run: |
echo "=== Starting Scoped Mode Integration Test ==="
# 1. Teardown existing operator (from prior steps)
echo "Uninstalling global operator..."
helm uninstall gateway-operator -n operator
kubectl delete ns operator --ignore-not-found
# 2. Install Scoped Operator
echo "Creating scoped-test namespace..."
kubectl create ns scoped-test --dry-run=client -o yaml | kubectl apply -f -
echo "Installing operator in scoped mode (watchNamespaces={scoped-test})..."
helm install gateway-operator ./kubernetes/helm/operator-helm-chart \
--namespace scoped-test \
--set image.repository=${{ env.DOCKER_REGISTRY }}/gateway-operator \
--set image.tag=${{ env.VERSION }} \
--set image.pullPolicy=Never \
--set gateway.helm.chartName=oci://registry.registry.svc.cluster.local:5000/charts/gateway \
--set gateway.helm.chartVersion=0.0.0-test \
--set gateway.helm.plainHTTP=true \
--set watchNamespaces="{scoped-test}" \
--wait --timeout 5m
echo "Verifying operator pod is running..."
kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=gateway-operator -n scoped-test --timeout=120s
echo "Verifying RBAC (Scoped Mode)..."
# Expect Role/RoleBinding in scoped-test
if ! kubectl get role -n scoped-test gateway-operator-manager-role; then
echo "FAILED: Role not found in scoped-test"
exit 1
fi
if ! kubectl get rolebinding -n scoped-test gateway-operator-manager-rolebinding; then
echo "FAILED: RoleBinding not found in scoped-test"
exit 1
fi
# Expect NO ClusterRole/ClusterRoleBinding
if kubectl get clusterrole gateway-operator-manager-role; then
echo "FAILED: ClusterRole found but should be disabled in scoped mode"
exit 1
fi
if kubectl get clusterrolebinding gateway-operator-manager-binding; then
echo "FAILED: ClusterRoleBinding found but should be disabled in scoped mode"
exit 1
fi
# 3. Positive Test: Deploy Gateway & API in scoped-test
echo "Deploying APIGateway in scoped-test..."
# Configure Gateway to use test images (same configmap as before but in new namespace)
kubectl create configmap test-gateway-config --from-file=values.yaml=<(kubectl get cm test-gateway-config -n default -o jsonpath='{.data.values\.yaml}') -n scoped-test
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: APIGateway
metadata:
name: scoped-gateway
namespace: scoped-test
spec:
apiSelector:
scope: Cluster
configRef:
name: test-gateway-config
EOF
echo "Waiting for scoped-gateway to be Ready..."
kubectl wait --for=condition=Programmed apigateway/scoped-gateway -n scoped-test --timeout=180s
echo "Deploying RestApi in scoped-test..."
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: scoped-api
namespace: scoped-test
spec:
displayName: scoped-api
version: v1.0
context: /scoped
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
operations:
- method: GET
path: /get
EOF
echo "Waiting for scoped-api..."
sleep 5 # Give operator time to reconcile
kubectl wait --for=condition=Programmed restapi/scoped-api -n scoped-test --timeout=120s
echo "Invoking scoped API..."
# Wait for the router deployment to be fully ready
echo "Waiting for scoped gateway router deployment rollout..."
kubectl rollout status deployment/scoped-gateway-gateway-gateway-runtime -n scoped-test --timeout=120s
# Additional wait for Envoy to initialize and start listening
echo "Waiting for router to be ready to accept connections..."
sleep 10
# Port forward the scoped gateway router
kubectl port-forward svc/scoped-gateway-gateway-gateway-runtime -n scoped-test 9090:8080 &
PF_PID=$!
sleep 5
# Retry logic for API invocation
MAX_RETRIES=5
RETRY_DELAY=5
CODE=""
for i in $(seq 1 $MAX_RETRIES); do
echo "Attempt $i of $MAX_RETRIES..."
CODE=$(curl -s -o /dev/null -w "%{http_code}" --connect-timeout 5 --max-time 10 http://localhost:9090/scoped/get 2>/dev/null || echo "000")
echo "Scoped API returned: $CODE"
if [ "$CODE" = "200" ]; then
break
fi
if [ "$i" -lt "$MAX_RETRIES" ]; then
echo "Retrying in ${RETRY_DELAY}s..."
sleep $RETRY_DELAY
fi
done
kill $PF_PID || true
if [ "$CODE" != "200" ]; then
echo "FAILED: Scoped API invocation failed after $MAX_RETRIES attempts"
exit 1
fi
# 4. Negative Test: Deploy API in ignored-ns
echo "Creating ignored-ns..."
kubectl create ns ignored-ns --dry-run=client -o yaml | kubectl apply -f -
echo "Deploying RestApi in ignored-ns (should be ignored)..."
cat <<EOF | kubectl apply -f -
apiVersion: gateway.api-platform.wso2.com/v1alpha1
kind: RestApi
metadata:
name: ignored-api
namespace: ignored-ns
spec:
displayName: scoped-api
version: v1.0
context: /ignored-api
upstream:
main:
url: http://httpbin.default.svc.cluster.local:80
operations:
- method: GET
path: /get
EOF
echo "Checking status of ignored-api (should be empty)..."
sleep 10
STATUS=$(kubectl get restapi ignored-api -n ignored-ns -o jsonpath='{.status}')
if [ -n "$STATUS" ] && [ "$STATUS" != "<nil>" ]; then
echo "FAILED: RestApi in ignored-ns has status! Operator is NOT restricted properly."
echo "Status: $STATUS"
exit 1
fi
echo "SUCCESS: RestApi in ignored-ns was ignored (no status update)."
# Cleanup
echo "Cleaning up scoped resources..."
kubectl delete apigateway scoped-gateway -n scoped-test --ignore-not-found
kubectl delete restapi scoped-api -n scoped-test --ignore-not-found
# Wait for resources to be deleted to ensure finalizers are handled
kubectl wait --for=delete apigateway/scoped-gateway -n scoped-test --timeout=60s || true
kubectl wait --for=delete restapi/scoped-api -n scoped-test --timeout=60s || true
helm uninstall gateway-operator -n scoped-test
kubectl delete ns scoped-test ignored-ns --ignore-not-found
- name: Debug on failure - Dump logs
if: failure()
run: |
echo "=== Operator Logs ==="
kubectl logs -n operator -l app.kubernetes.io/name=gateway-operator --tail=200 || true
echo ""
echo "=== Scoped Operator Logs ==="
kubectl logs -n scoped-test -l app.kubernetes.io/name=gateway-operator --tail=200 || true
echo ""
echo "=== APIGateway Status ==="
kubectl describe apigateway test-gateway || true
echo ""
echo "=== Scoped APIGateway Status ==="
kubectl describe apigateway scoped-gateway -n scoped-test || true
echo ""
echo "=== RestApi Status ==="
kubectl describe restapi test-api || true
echo ""
echo "=== Scoped RestApi Status ==="
kubectl describe restapi scoped-api -n scoped-test || true
echo ""
echo "=== All Pods Description ==="
kubectl describe pods --all-namespaces || true
echo ""
echo "=== Gateway Config ConfigMap ==="
kubectl get configmap -l app.kubernetes.io/instance=ap-gw -o yaml || true
echo ""
echo "=== APIGateway Controller Logs ==="
kubectl logs -l app.kubernetes.io/component=controller --tail=200 || true
echo ""
echo "=== Scoped APIGateway Controller Logs ==="
kubectl logs -n scoped-test -l app.kubernetes.io/component=controller --tail=200 || true
echo ""
echo "=== Gateway Runtime Logs ==="
kubectl logs -l app.kubernetes.io/component=gateway-runtime --tail=200 || true
echo ""
echo "=== Scoped Gateway Runtime Logs ==="
kubectl logs -n scoped-test -l app.kubernetes.io/component=gateway-runtime --tail=200 || true
echo ""
echo "=== All Pods ==="
kubectl get pods -A
echo ""
echo "=== All Services ==="
kubectl get svc -A