Skip to content

Commit ed0058b

Browse files
authored
Dev env to support user auth (#79)
* [cherry-pick] dev-env to use external-secrets * dev-env: to support user-auth this requires a separate shared kratos to be spun up for dev endpoints will change to the following format: previously: <developer-namespace>.<domain> auth endpoints: dev.<domain> backend endpoints: <developer-namespace>.dev.<domain> and cookies for auth would be set on dev.<domain> so can share session
1 parent 1123dad commit ed0058b

File tree

5 files changed

+67
-32
lines changed

5 files changed

+67
-32
lines changed

doc-site/docs/guides/dev-experience.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,5 +16,5 @@ Now your service will have access to any dependencies within a namespace running
1616

1717
1. Run `start-dev-env.sh` - You will be dropped into a shell that is the same as your local machine, but works as if it were running inside a pod in your k8s cluster
1818
2. Change code and run the server - As you run your local server, using local code, it will have access to remote dependencies, and will be sent traffic by the load balancer
19-
3. Test on your cloud environment with real dependencies - `https://<your name>-<DOMAIN>`
19+
3. Test on your cloud environment with real dependencies - `https://<your name>.dev.<DOMAIN>`
2020
4. git commit & auto-deploy to Staging through the build pipeline

templates/kubernetes/overlays/dev/auth.yml

Lines changed: 21 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,13 @@ kind: Rule
66
metadata:
77
name: public-backend-endpoints
88
spec:
9+
upstream:
10+
# DEV_NAMESPACE is filled in by start-dev-env.sh
11+
url: http://<% .Name %>.{{ DEV_NAMESPACE }}
12+
stripPath: /{{ DEV_NAMESPACE }}
13+
preserveHost: true
914
match:
10-
url: http://<% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>/<(status|webhook)\/.*>
15+
url: http://{{ DEV_NAMESPACE }}.dev.<% index .Params `stagingHostRoot` %>/<(status|webhook)\/.*>
1116
---
1217
## Backend User-restricted endpoint
1318
# pattern: http://<proxy>/<not `status`/`.ory/kratos`>, everything else should be authenticated
@@ -20,5 +25,19 @@ kind: Rule
2025
metadata:
2126
name: authenticated-backend-endpoints
2227
spec:
28+
upstream:
29+
preserveHost: true
30+
url: http://<% .Name %>.{{ DEV_NAMESPACE }}
31+
stripPath: /{{ DEV_NAMESPACE }}
2332
match:
24-
url: http://<% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>/<(?!(status|webhook|\.ory\/kratos)).*>
33+
url: http://{{ DEV_NAMESPACE }}.dev.<% index .Params `stagingHostRoot` %>/<(?!(status|webhook|\.ory\/kratos)).*>
34+
authenticators:
35+
- handler: cookie_session
36+
config:
37+
check_session_url: http://kratos-development-public.user-auth/sessions/whoami
38+
mutators:
39+
- handler: id_token
40+
config:
41+
issuer_url: https://dev.<% index .Params `stagingHostRoot` %>
42+
- handler: header
43+

templates/kubernetes/overlays/dev/external-secret.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,4 +8,4 @@ metadata:
88
spec:
99
backendType: secretsManager
1010
dataFrom:
11-
- <% .Name %>/kubernetes/stage/<% .Name %>
11+
- <% .Name %>/kubernetes/stage/devenv-<% .Name %>

templates/kubernetes/overlays/dev/ingress.yml

Lines changed: 21 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,14 @@
1+
<%- if eq (index .Params `userAuth`) "yes" %>
2+
## Allows cross namespace ingress -> service
3+
apiVersion: v1
4+
kind: Service
5+
metadata:
6+
name: oathkeeper
7+
spec:
8+
type: ExternalName
9+
externalName: oathkeeper-<% .Name %>-proxy.user-auth.svc.cluster.local
10+
---
11+
<%- end %>
112
apiVersion: extensions/v1beta1
213
kind: Ingress
314
metadata:
@@ -12,7 +23,7 @@ metadata:
1223
# CORS
1324
## to support both frontend origin and 'localhost', need 'configuration-snippet' implementation here, because 'cors-allow-origin' field doesn't support multiple originss yet.
1425
nginx.ingress.kubernetes.io/configuration-snippet: |
15-
if ($http_origin ~* "^https?://((?:<% index .Params `stagingFrontendSubdomain` %><% index .Params `stagingHostRoot` %>)|(?:localhost))") {
26+
if ($http_origin ~* "^https?://((?:<% index .Params `stagingFrontendSubdomain` %><% index .Params `stagingHostRoot` %>)|(?:localhost))|(?:127.0.0.1))") {
1627
set $cors "true";
1728
}
1829
if ($request_method = 'OPTIONS') {
@@ -21,15 +32,15 @@ metadata:
2132
2233
if ($cors = "true") {
2334
add_header 'Access-Control-Allow-Origin' "$http_origin" always;
24-
add_header 'Access-Control-Allow-Credentials' 'true';
35+
add_header 'Access-Control-Allow-Credentials' 'true' always;
2536
add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS' always;
2637
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization' always;
2738
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range' always;
2839
}
2940
3041
if ($cors = "trueoptions") {
3142
add_header 'Access-Control-Allow-Origin' "$http_origin";
32-
add_header 'Access-Control-Allow-Credentials' 'true';
43+
add_header 'Access-Control-Allow-Credentials' 'true' always;
3344
add_header 'Access-Control-Allow-Methods' 'GET, PUT, POST, DELETE, PATCH, OPTIONS';
3445
add_header 'Access-Control-Allow-Headers' 'DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization';
3546
add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range';
@@ -41,14 +52,19 @@ metadata:
4152
4253
spec:
4354
rules:
44-
- host: <% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>
55+
- host: {{ DEV_NAMESPACE }}.dev.<% index .Params `stagingHostRoot` %>
4556
http:
4657
paths:
4758
- path: /(.*)
4859
backend:
60+
<%- if eq (index .Params `userAuth`) "yes" %>
61+
serviceName: oathkeeper
62+
servicePort: 4455
63+
<%- else %>
4964
serviceName: <% .Name %>
5065
servicePort: http
66+
<%- end %>
5167
tls:
5268
- hosts:
53-
- <% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>
69+
- {{ DEV_NAMESPACE }}.dev.<% index .Params `stagingHostRoot` %>
5470
secretName: <% .Name %>-tls-secret

templates/start-dev-env.sh

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ PROJECT_NAME=<% .Name %>
77
ENVIRONMENT=stage
88
ACCOUNT_ID=<% index .Params `accountId` %>
99
REGION=<% index .Params `region` %>
10+
CLUSTER_CONTEXT=${PROJECT_NAME}-${ENVIRONMENT}-${REGION}
1011

1112
# common functions
1213
function usage() {
@@ -52,15 +53,10 @@ DEV_PROJECT_ID=${1:-""}
5253
echo '[Dev Environment]'
5354

5455
# Validate cluster
55-
CLUSTER_CONTEXT=${PROJECT_NAME}-${ENVIRONMENT}-${REGION}
5656
echo " Cluster context: ${CLUSTER_CONTEXT}"
5757

5858
# Validate secret
5959
NAMESPACE=${PROJECT_NAME}
60-
SECRET_NAME=${PROJECT_NAME}
61-
DEV_SECRET_NAME=devenv${PROJECT_NAME}
62-
DEV_SECRET_JSON=$(kubectl --context ${CLUSTER_CONTEXT} get secret ${DEV_SECRET_NAME} -n ${NAMESPACE} -o json)
63-
[[ -z "${DEV_SECRET_JSON}" ]] && error_exit "The secret ${DEV_SECRET_NAME} is not existing in namespace '${NAMESPACE}'."
6460

6561
# Check installations
6662
if ! command_exist kustomize || ! command_exist telepresence; then
@@ -79,10 +75,6 @@ kubectl --context ${CLUSTER_CONTEXT} get namespace ${DEV_NAMESPACE} >& /dev/null
7975
kubectl --context ${CLUSTER_CONTEXT} create namespace ${DEV_NAMESPACE})
8076
echo " Namespace: ${DEV_NAMESPACE}"
8177

82-
# Setup dev secret from pre-configed one
83-
kubectl --context ${CLUSTER_CONTEXT} get secret ${SECRET_NAME} -n ${DEV_NAMESPACE} >& /dev/null || \
84-
echo ${DEV_SECRET_JSON} | jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' | sed "s/${DEV_SECRET_NAME}/${SECRET_NAME}/g" | kubectl --context ${CLUSTER_CONTEXT} apply -n ${DEV_NAMESPACE} -f -
85-
echo " Secret: ${SECRET_NAME}"
8678

8779
# Setup dev service account from pre-configured one
8880
SERVICE_ACCOUNT=backend-service
@@ -92,35 +84,39 @@ kubectl --context ${CLUSTER_CONTEXT} get sa ${SERVICE_ACCOUNT} -n ${DEV_NAMESPAC
9284
# Setup dev k8s manifests, configuration, docker login etc
9385
CONFIG_ENVIRONMENT="dev"
9486
EXT_HOSTNAME=<% index .Params `stagingBackendSubdomain` %><% index .Params `stagingHostRoot` %>
95-
MY_EXT_HOSTNAME=${DEV_NAMESPACE}-${EXT_HOSTNAME}
87+
MY_EXT_HOSTNAME="${DEV_NAMESPACE}.dev.${EXT_HOSTNAME}"
9688
ECR_REPO=${ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/${PROJECT_NAME}
9789
VERSION_TAG=latest
9890
DATABASE_NAME=<% index .Params `databaseName` %>
99-
DEV_DATABASE_NAME=$(echo "dev${MY_USERNAME}" | tr -dc 'A-Za-z0-9')
91+
DEV_DATABASE_NAME=$(echo "dev_${MY_USERNAME}" | tr -dc 'A-Za-z0-9_')
10092
echo " Domain: ${MY_EXT_HOSTNAME}"
10193
echo " Database Name: ${DEV_DATABASE_NAME}"
10294

10395
# Apply migration
10496
MIGRATION_NAME=${PROJECT_NAME}-migration
10597
SQL_DIR="${PWD}/database/migration"
98+
if [ `ls ${SQL_DIR}/*.sql 2>/dev/null | wc -l` -gt 0 ] ; then
10699
## launch migration job
107-
(cd kubernetes/migration && \
108-
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create configmap ${MIGRATION_NAME} $(ls ${SQL_DIR}/*.sql | xargs printf '\-\-from\-file %s ') || error_exit "Failed to apply kubernetes migration configmap" && \
109-
cat job.yml | \
110-
sed "s|/${DATABASE_NAME}|/${DEV_DATABASE_NAME}|g" | \
111-
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create -f - ) || error_exit "Failed to apply kubernetes migration"
112-
## confirm migration job done
113-
if ! kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} wait --for=condition=complete --timeout=180s job/${MIGRATION_NAME} ; then
114-
echo "${MIGRATION_NAME} run failed:"
115-
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe job ${MIGRATION_NAME}
116-
error_exit "Failed migration. Leaving namespace ${DEV_NAMESPACE} for debugging"
100+
(cd kubernetes/migration && \
101+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create configmap ${MIGRATION_NAME} $(ls ${SQL_DIR}/*.sql | xargs printf '\-\-from\-file %s ') || error_exit "Failed to apply kubernetes migration configmap" && \
102+
cat job.yml | \
103+
sed "s|/${DATABASE_NAME}|/${DEV_DATABASE_NAME}|g" | \
104+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} create -f - ) || error_exit "Failed to apply kubernetes migration"
105+
## confirm migration job done
106+
if ! kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} wait --for=condition=complete --timeout=180s job/${MIGRATION_NAME} ; then
107+
echo "${MIGRATION_NAME} run failed:"
108+
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} describe job ${MIGRATION_NAME}
109+
error_exit "Failed migration. Leaving namespace ${DEV_NAMESPACE} for debugging"
110+
fi
117111
fi
118112

119113
# Apply manifests
120114
(cd kubernetes/overlays/${CONFIG_ENVIRONMENT} && \
121115
kustomize build . | \
122116
sed "s|${EXT_HOSTNAME}|${MY_EXT_HOSTNAME}|g" | \
123-
sed "s|DATABASE_NAME: ${DATABASE_NAME}|DATABASE_NAME: ${DEV_DATABASE_NAME}|g" | \
117+
sed "s|{{ DEV_NAMESPACE }}|${DEV_NAMESPACE}|g" | \
118+
sed "s|DATABASE_NAME: ${DATABASE_NAME}|DATABASE_NAME: ${DEV_DATABASE_NAME}|g" > kustomizebuild
119+
exit 1
124120
kubectl --context ${CLUSTER_CONTEXT} -n ${DEV_NAMESPACE} apply -f - ) || error_exit "Failed to apply kubernetes manifests"
125121

126122
# Confirm deployment
@@ -155,7 +151,11 @@ echo
155151

156152
# Starting dev environment with telepresence shell
157153
echo
158-
telepresence --context ${CLUSTER_CONTEXT} --swap-deployment ${PROJECT_NAME} --namespace ${DEV_NAMESPACE} --expose 80 --run-shell
154+
telepresence \
155+
--context ${CLUSTER_CONTEXT} --namespace ${DEV_NAMESPACE} \
156+
intercept ${PROJECT_NAME} \
157+
--port 80 \
158+
-- bash
159159

160160
# Ending dev environment
161161
## delete the most of resources (except ingress related, as we hit rate limit of certificate issuer(letsencrypt)

0 commit comments

Comments
 (0)