Skip to content

Commit 8c1012a

Browse files
author
Mladen Rusev
committed
remove revert to original test.sh; create new test_ci.sh; add mounts to values; add kind cluster setup
1 parent 964f548 commit 8c1012a

File tree

6 files changed

+269
-82
lines changed

6 files changed

+269
-82
lines changed

hack/e2e/test.sh

Lines changed: 0 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -83,19 +83,6 @@ if ! gcloud container clusters get-credentials "${CLUSTER_NAME}"; then
8383
fi
8484
kubectl create ns venafi || true
8585

86-
kubectl apply -n venafi -f - <<EOF
87-
apiVersion: v1
88-
kind: PersistentVolumeClaim
89-
metadata:
90-
name: coverage-pvc
91-
spec:
92-
accessModes:
93-
- ReadWriteOnce
94-
resources:
95-
requests:
96-
storage: 1Gi
97-
EOF
98-
9986
# Pull secret for Venafi OCI registry
10087
# IMPORTANT: we pick the first team as the owning team for the registry and
10188
# workload identity service account as it doesn't matter.
@@ -136,13 +123,10 @@ venctl components kubernetes apply \
136123
--venafi-kubernetes-agent \
137124
--venafi-kubernetes-agent-version "${RELEASE_HELM_CHART_VERSION}" \
138125
--venafi-kubernetes-agent-values-files "${script_dir}/values.venafi-kubernetes-agent.yaml" \
139-
--venafi-kubernetes-agent-values-files "${script_dir}/values.coverage-pvc.yaml" \
140126
--venafi-kubernetes-agent-custom-image-registry "${OCI_BASE}/images" \
141127
--venafi-kubernetes-agent-custom-chart-repository "oci://${OCI_BASE}/charts"
142128

143129
kubectl apply -n venafi -f venafi-components.yaml
144-
kubectl set env deployments/venafi-kubernetes-agent -n venafi GOCOVERDIR=/coverage
145-
kubectl rollout status deployment/venafi-kubernetes-agent -n venafi --timeout=2m
146130

147131
subject="system:serviceaccount:venafi:venafi-components"
148132
audience="https://${VEN_API_HOST}"
@@ -249,57 +233,3 @@ getCertificate() {
249233

250234
# Wait 5 minutes for the certificate to appear.
251235
for ((i=0;;i++)); do if getCertificate; then exit 0; fi; sleep 30; done | timeout -v -- 5m cat
252-
253-
#export AGENT_POD_NAME=$(kubectl get pods -n venafi -l app.kubernetes.io/name=venafi-kubernetes-agent -o jsonpath="{.items[0].metadata.name}")
254-
#
255-
#echo "Sending SIGQUIT to agent pod '${AGENT_POD_NAME}' to trigger graceful shutdown and flush coverage..."
256-
## Use kubectl debug to attach a busybox container to the running pod.
257-
## --target specifies the container to share the process space with.
258-
## --share-processes allows our new container to see and signal the agent process.
259-
## We then run 'kill -s QUIT 1' to signal PID 1 (the agent) to quit gracefully.
260-
#kubectl debug -q -n venafi "${AGENT_POD_NAME}" \
261-
# --image=busybox:1.36 \
262-
# --target=venafi-kubernetes-agent \
263-
# --share-processes \
264-
# -- sh -c 'kill -s QUIT 1'
265-
#
266-
#echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate gracefully..."
267-
## The pod will now terminate because its main process is exiting.
268-
## We wait for Kubernetes to recognize this and delete the pod object.
269-
#kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
270-
#
271-
#echo "Scaling down deployment to prevent pod from restarting..."
272-
## Now that the pod is gone and coverage is flushed, we scale the deployment
273-
## to ensure the ReplicaSet controller doesn't create a new one.
274-
#kubectl scale deployment venafi-kubernetes-agent -n venafi --replicas=0
275-
#echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate as a result of the scale-down..."
276-
#kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
277-
#echo "Starting helper pod to retrieve coverage files from the PVC..."
278-
#
279-
#kubectl apply -n venafi -f - <<EOF
280-
#apiVersion: v1
281-
#kind: Pod
282-
#metadata:
283-
# name: coverage-helper-pod
284-
#spec:
285-
# containers:
286-
# - name: helper
287-
# image: alpine:latest
288-
# command: ["sleep", "infinity"]
289-
# volumeMounts:
290-
# - name: coverage-storage
291-
# mountPath: /coverage-data
292-
# volumes:
293-
# - name: coverage-storage
294-
# persistentVolumeClaim:
295-
# claimName: coverage-pvc
296-
#EOF
297-
#
298-
#echo "Waiting for the helper pod to be ready..."
299-
#kubectl wait --for=condition=Ready pod/coverage-helper-pod -n venafi --timeout=2m
300-
#
301-
#echo "Copying coverage files from the helper pod..."
302-
#mkdir -p $COVERAGE_HOST_PATH
303-
#kubectl cp -n venafi "coverage-helper-pod:/coverage-data/." $COVERAGE_HOST_PATH
304-
#echo "Coverage files retrieved. Listing contents:"
305-
#ls -la $COVERAGE_HOST_PATH

hack/e2e/test_ci.sh

Lines changed: 230 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,230 @@
1+
#!/usr/bin/env bash
2+
#
3+
# Build and install venafi-kubernetes-agent for VenafiConnection based authentication.
4+
# Wait for it to log a message indicating successful data upload.
5+
#
6+
# This script is designed to be executed by a `make` target that has already
7+
# provisioned a Kubernetes cluster (e.g., via `make kind-cluster`).
8+
# It assumes `kubectl` is pre-configured to point to the correct test cluster.
9+
#
10+
# A VenafiConnection resource is created which uses workload identity federation.
11+
#
12+
# Prerequisites (expected to be available in the execution environment):
13+
# * kubectl, venctl, jq, step, curl, envsubst, docker
14+
15+
set -o nounset
16+
set -o errexit
17+
set -o pipefail
18+
# Commenting out for CI, uncomment for local debugging
19+
#set -o xtrace
20+
21+
script_dir=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)
22+
root_dir=$(cd "${script_dir}/../.." && pwd)
23+
export TERM=dumb
24+
25+
# Your Venafi Cloud API key.
26+
: ${VEN_API_KEY?}
27+
# Separate API Key for getting a pull secret.
28+
: ${VEN_API_KEY_PULL?}
29+
# The Venafi Cloud zone.
30+
: ${VEN_ZONE?}
31+
# The hostname of the Venafi API server (e.g., api.venafi.cloud).
32+
: ${VEN_API_HOST?}
33+
# The region of the Venafi API server (e.g., "us" or "eu").
34+
: ${VEN_VCP_REGION?}
35+
# The base URL of the OCI registry (e.g., ttl.sh/some-random-uuid).
36+
: ${OCI_BASE?}
37+
38+
REMOTE_AGENT_IMAGE="${OCI_BASE}/venafi-kubernetes-agent-e2e"
39+
40+
cd "${script_dir}"
41+
42+
# Build and PUSH agent image and Helm chart to the anonymous registry
43+
echo ">>> Building and pushing agent to '${REMOTE_AGENT_IMAGE}'..."
44+
pushd "${root_dir}"
45+
> release.env
46+
make release \
47+
OCI_SIGN_ON_PUSH=false \
48+
oci_platforms=linux/amd64 \
49+
oci_preflight_image_name=${REMOTE_AGENT_IMAGE} \
50+
helm_chart_image_name=$OCI_BASE/charts/venafi-kubernetes-agent \
51+
GITHUB_OUTPUT=release.env
52+
source release.env
53+
popd
54+
55+
AGENT_IMAGE_WITH_TAG="${REMOTE_AGENT_IMAGE}:${RELEASE_HELM_CHART_VERSION}"
56+
echo ">>> Successfully pushed image: ${AGENT_IMAGE_WITH_TAG}"
57+
58+
kubectl create ns venafi || true
59+
60+
# Create pull secret for Venafi's OCI registry if it doesn't exist.
61+
if ! kubectl get secret venafi-image-pull-secret -n venafi; then
62+
echo ">>> Creating Venafi OCI registry pull secret..."
63+
venctl iam service-accounts registry create \
64+
--api-key $VEN_API_KEY_PULL \
65+
--no-prompts \
66+
--owning-team "$(curl --fail-with-body -sS "https://${VEN_API_HOST}/v1/teams" -H "tppl-api-key: ${VEN_API_KEY_PULL}" | jq '.teams[0].id' -r)" \
67+
--name "venafi-kubernetes-agent-e2e-registry-${RANDOM}" \
68+
--scopes enterprise-cert-manager,enterprise-venafi-issuer,enterprise-approver-policy \
69+
| jq '{
70+
"apiVersion": "v1",
71+
"kind": "Secret",
72+
"metadata": {
73+
"name": "venafi-image-pull-secret"
74+
},
75+
"type": "kubernetes.io/dockerconfigjson",
76+
"stringData": {
77+
".dockerconfigjson": {
78+
"auths": {
79+
"\(.oci_registry)": {
80+
"username": .username,
81+
"password": .password
82+
}
83+
}
84+
} | tostring
85+
}
86+
}' \
87+
| kubectl create -n venafi -f -
88+
fi
89+
90+
echo ">>> Generating temporary Helm values for the custom agent image..."
91+
cat <<EOF > /tmp/agent-image-values.yaml
92+
image:
93+
repository: ${REMOTE_AGENT_IMAGE}
94+
tag: ${RELEASE_HELM_CHART_VERSION}
95+
pullPolicy: IfNotPresent
96+
EOF
97+
98+
echo ">>> Applying Venafi components to the cluster..."
99+
export VENAFI_KUBERNETES_AGENT_CLIENT_ID="not-used-but-required-by-venctl"
100+
venctl components kubernetes apply \
101+
--region $VEN_VCP_REGION \
102+
--cert-manager \
103+
--venafi-enhanced-issuer \
104+
--approver-policy-enterprise \
105+
--venafi-kubernetes-agent \
106+
--venafi-kubernetes-agent-version "${RELEASE_HELM_CHART_VERSION}" \
107+
--venafi-kubernetes-agent-values-files "${script_dir}/values.venafi-kubernetes-agent.yaml" \
108+
--venafi-kubernetes-agent-values-files "/tmp/agent-image-values.yaml" \
109+
--venafi-kubernetes-agent-custom-chart-repository "oci://${OCI_BASE}/charts"
110+
111+
kubectl apply -n venafi -f venafi-components.yaml
112+
113+
# Configure Workload Identity Federation with Venafi Cloud
114+
echo ">>> Configuring Workload Identity Federation..."
115+
subject="system:serviceaccount:venafi:venafi-components"
116+
audience="https://${VEN_API_HOST}"
117+
issuerURL=$(kubectl get --raw /.well-known/openid-configuration | jq -r '.issuer')
118+
openidDiscoveryURL="${issuerURL}/.well-known/openid-configuration"
119+
jwksURI=$(curl --fail-with-body -sSL ${openidDiscoveryURL} | jq -r '.jwks_uri')
120+
121+
# Create the Venafi agent service account if one does not already exist
122+
echo ">>> Ensuring Venafi Cloud service account exists for the agent..."
123+
while true; do
124+
tenantID=$(curl --fail-with-body -sSL -H "tppl-api-key: $VEN_API_KEY" https://${VEN_API_HOST}/v1/serviceaccounts \
125+
| jq -r '.[] | select(.issuerURL==$issuerURL and .subject == $subject) | .companyId' \
126+
--arg issuerURL "${issuerURL}" \
127+
--arg subject "${subject}")
128+
129+
if [[ "${tenantID}" != "" ]]; then
130+
echo "Service account already exists."
131+
break
132+
fi
133+
134+
echo "Service account not found, creating it..."
135+
jq -n '{
136+
"name": "venafi-kubernetes-agent-e2e-agent-\($random)",
137+
"authenticationType": "rsaKeyFederated",
138+
"scopes": ["kubernetes-discovery-federated", "certificate-issuance"],
139+
"subject": $subject,
140+
"audience": $audience,
141+
"issuerURL": $issuerURL,
142+
"jwksURI": $jwksURI,
143+
"owner": $owningTeamID
144+
}' \
145+
--arg random "${RANDOM}" \
146+
--arg subject "${subject}" \
147+
--arg audience "${audience}" \
148+
--arg issuerURL "${issuerURL}" \
149+
--arg jwksURI "${jwksURI}" \
150+
--arg owningTeamID "$(curl --fail-with-body -sS "https://${VEN_API_HOST}/v1/teams" -H "tppl-api-key: $VEN_API_KEY" | jq '.teams[0].id' -r)" \
151+
| curl "https://${VEN_API_HOST}/v1/serviceaccounts" \
152+
-H "tppl-api-key: $VEN_API_KEY" \
153+
--fail-with-body \
154+
-sSL --json @-
155+
done
156+
157+
# Create the VenafiConnection resource
158+
echo ">>> Applying VenafiConnection resource..."
159+
kubectl apply -n venafi -f - <<EOF
160+
apiVersion: jetstack.io/v1alpha1
161+
kind: VenafiConnection
162+
metadata:
163+
name: venafi-components
164+
spec:
165+
allowReferencesFrom: {}
166+
vcp:
167+
url: https://${VEN_API_HOST}
168+
accessToken:
169+
- serviceAccountToken:
170+
name: venafi-components
171+
audiences:
172+
- ${audience}
173+
- vcpOAuth:
174+
tenantID: ${tenantID}
175+
EOF
176+
177+
# Test certificate issuance
178+
echo ">>> Testing certificate issuance..."
179+
envsubst <application-team-1.yaml | kubectl apply -f -
180+
kubectl -n team-1 wait certificate app-0 --for=condition=Ready --timeout=5m
181+
182+
# Wait for the agent to successfully send data to Venafi Cloud
183+
echo ">>> Waiting for agent log message confirming successful data upload..."
184+
set +o pipefail
185+
kubectl logs deployments/venafi-kubernetes-agent \
186+
--follow \
187+
--namespace venafi \
188+
| timeout 60s jq 'if .msg | test("Data sent successfully") then . | halt_error(0) end'
189+
set -o pipefail
190+
191+
# Create a unique TLS secret and verify its discovery by the agent
192+
echo ">>> Testing discovery of a manually created TLS secret..."
193+
commonname="venafi-kubernetes-agent-e2e.$(uuidgen | tr '[:upper:]' '[:lower:]')"
194+
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=$commonname"
195+
kubectl create secret tls "$commonname" --cert=/tmp/tls.crt --key=/tmp/tls.key -o yaml --dry-run=client | kubectl apply -f -
196+
197+
getCertificate() {
198+
jq -n '{
199+
"expression": {
200+
"field": "subjectCN",
201+
"operator": "MATCH",
202+
"value": $commonname
203+
},
204+
"ordering": {
205+
"orders": [
206+
{ "direction": "DESC", "field": "certificatInstanceModificationDate" }
207+
]
208+
},
209+
"paging": { "pageNumber": 0, "pageSize": 10 }
210+
}' --arg commonname "${commonname}" \
211+
| curl "https://${VEN_API_HOST}/outagedetection/v1/certificatesearch?excludeSupersededInstances=true&ownershipTree=true" \
212+
-fsSL \
213+
-H "tppl-api-key: $VEN_API_KEY" \
214+
--json @- \
215+
| jq 'if .count == 0 then . | halt_error(1) end'
216+
}
217+
218+
# Wait up to 5 minutes for the certificate to appear in the Venafi inventory
219+
echo ">>> Waiting for certificate '${commonname}' to appear in Venafi Cloud inventory..."
220+
for ((i=0;;i++)); do
221+
if getCertificate; then
222+
echo "Successfully found certificate in Venafi Cloud."
223+
exit 0;
224+
fi;
225+
echo "Certificate not found yet, retrying in 30 seconds..."
226+
sleep 30;
227+
done | timeout -v -- 5m cat
228+
229+
echo "!!! Test Failed: Timed out waiting for certificate to appear in Venafi Cloud."
230+
exit 1

hack/e2e/values.coverage-pvc.yaml

Lines changed: 0 additions & 9 deletions
This file was deleted.

hack/e2e/values.venafi-kubernetes-agent.yaml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,3 +11,12 @@ authentication:
1111
extraArgs:
1212
- --logging-format=json
1313
- --log-level=4
14+
15+
volumeMounts:
16+
- name: coverage-volume
17+
mountPath: /coverage
18+
volumes:
19+
- name: coverage-volume
20+
hostPath:
21+
path: /coverage
22+
type: DirectoryOrCreate

make/02_mod.mk

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,14 @@
11
include make/test-unit.mk
22
include make/ark/02_mod.mk
33

4+
$(kind_cluster_config): make/config/kind/cluster.yaml | $(bin_dir)/scratch
5+
@echo "--- COVERAGE_HOST_PATH is $(COVERAGE_HOST_PATH) ---"
6+
mkdir -p $(COVERAGE_HOST_PATH)
7+
@cat $< | \
8+
sed -e 's|{{KIND_IMAGES}}|$(CURDIR)/$(images_tar_dir)|g' | \
9+
sed -e 's|{{COVERAGE_HOST_PATH}}|$(COVERAGE_HOST_PATH)|g' \
10+
> $@
11+
412
GITHUB_OUTPUT ?= /dev/stderr
513
.PHONY: release
614
## Publish all release artifacts (image + helm chart)
@@ -51,9 +59,8 @@ shared_generate_targets += generate-crds-venconn
5159
## Wait for it to log a message indicating successful data upload.
5260
## See `hack/e2e/test.sh` for the full test script.
5361
## @category Testing
54-
test-e2e-gke: | $(NEEDS_HELM) $(NEEDS_STEP) $(NEEDS_VENCTL)
55-
COVERAGE_HOST_PATH="$(COVERAGE_HOST_PATH)" ./hack/e2e/test.sh
56-
#./hack/e2e/test.sh
62+
test-e2e-gke: | kind-cluster $(NEEDS_HELM) $(NEEDS_STEP) $(NEEDS_VENCTL)
63+
COVERAGE_HOST_PATH="$(COVERAGE_HOST_PATH)" ./hack/e2e/test_ci.sh
5764

5865
.PHONY: test-helm
5966
## Run `helm unittest`.

make/config/kind/cluster.yaml

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
apiVersion: kind.x-k8s.io/v1alpha4
2+
kind: Cluster
3+
kubeadmConfigPatches:
4+
- |
5+
kind: ClusterConfiguration
6+
metadata:
7+
name: config
8+
etcd:
9+
local:
10+
extraArgs:
11+
unsafe-no-fsync: "true"
12+
networking:
13+
serviceSubnet: 10.0.0.0/16
14+
nodes:
15+
- role: control-plane
16+
extraMounts:
17+
- hostPath: {{KIND_IMAGES}}
18+
containerPath: /mounted_images
19+
- hostPath: {{COVERAGE_HOST_PATH}}
20+
containerPath: /coverage

0 commit comments

Comments
 (0)