@@ -83,6 +83,19 @@ if ! gcloud container clusters get-credentials "${CLUSTER_NAME}"; then
8383fi
8484kubectl create ns venafi || true
8585
86+ kubectl apply -n venafi -f - << EOF
87+ apiVersion: v1
88+ kind: PersistentVolumeClaim
89+ metadata:
90+ name: coverage-pvc
91+ spec:
92+ accessModes:
93+ - ReadWriteOnce
94+ resources:
95+ requests:
96+ storage: 1Gi
97+ EOF
98+
8699# Pull secret for Venafi OCI registry
87100# IMPORTANT: we pick the first team as the owning team for the registry and
88101# workload identity service account as it doesn't matter.
@@ -127,6 +140,8 @@ venctl components kubernetes apply \
127140 --venafi-kubernetes-agent-custom-chart-repository " oci://${OCI_BASE} /charts"
128141
129142kubectl apply -n venafi -f venafi-components.yaml
143+ kubectl set env deployments/venafi-kubernetes-agent -n venafi GOCOVERDIR=/coverage
144+ kubectl rollout status deployment/venafi-kubernetes-agent -n venafi --timeout=2m
130145
131146subject=" system:serviceaccount:venafi:venafi-components"
132147audience=" https://${VEN_API_HOST} "
@@ -233,3 +248,57 @@ getCertificate() {
233248
234249# Wait 5 minutes for the certificate to appear.
235250for (( i= 0 ;;i++ )) ; do if getCertificate; then exit 0; fi ; sleep 30; done | timeout -v -- 5m cat
251+
252+ # export AGENT_POD_NAME=$(kubectl get pods -n venafi -l app.kubernetes.io/name=venafi-kubernetes-agent -o jsonpath="{.items[0].metadata.name}")
253+ #
254+ # echo "Sending SIGQUIT to agent pod '${AGENT_POD_NAME}' to trigger graceful shutdown and flush coverage..."
255+ # # Use kubectl debug to attach a busybox container to the running pod.
256+ # # --target specifies the container to share the process space with.
257+ # # --share-processes allows our new container to see and signal the agent process.
258+ # # We then run 'kill -s QUIT 1' to signal PID 1 (the agent) to quit gracefully.
259+ # kubectl debug -q -n venafi "${AGENT_POD_NAME}" \
260+ # --image=busybox:1.36 \
261+ # --target=venafi-kubernetes-agent \
262+ # --share-processes \
263+ # -- sh -c 'kill -s QUIT 1'
264+ #
265+ # echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate gracefully..."
266+ # # The pod will now terminate because its main process is exiting.
267+ # # We wait for Kubernetes to recognize this and delete the pod object.
268+ # kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
269+ #
270+ # echo "Scaling down deployment to prevent pod from restarting..."
271+ # # Now that the pod is gone and coverage is flushed, we scale the deployment
272+ # # to ensure the ReplicaSet controller doesn't create a new one.
273+ # kubectl scale deployment venafi-kubernetes-agent -n venafi --replicas=0
274+ # echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate as a result of the scale-down..."
275+ # kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
276+ # echo "Starting helper pod to retrieve coverage files from the PVC..."
277+ #
278+ # kubectl apply -n venafi -f - <<EOF
279+ # apiVersion: v1
280+ # kind: Pod
281+ # metadata:
282+ # name: coverage-helper-pod
283+ # spec:
284+ # containers:
285+ # - name: helper
286+ # image: alpine:latest
287+ # command: ["sleep", "infinity"]
288+ # volumeMounts:
289+ # - name: coverage-storage
290+ # mountPath: /coverage-data
291+ # volumes:
292+ # - name: coverage-storage
293+ # persistentVolumeClaim:
294+ # claimName: coverage-pvc
295+ # EOF
296+ #
297+ # echo "Waiting for the helper pod to be ready..."
298+ # kubectl wait --for=condition=Ready pod/coverage-helper-pod -n venafi --timeout=2m
299+ #
300+ # echo "Copying coverage files from the helper pod..."
301+ # mkdir -p $COVERAGE_HOST_PATH
302+ # kubectl cp -n venafi "coverage-helper-pod:/coverage-data/." $COVERAGE_HOST_PATH
303+ # echo "Coverage files retrieved. Listing contents:"
304+ # ls -la $COVERAGE_HOST_PATH
0 commit comments