Skip to content

Commit 334914e

Browse files
author
Mladen Rusev
committed
dont kill the agent
1 parent 2366d01 commit 334914e

File tree

1 file changed

+53
-53
lines changed

1 file changed

+53
-53
lines changed

hack/e2e/test.sh

Lines changed: 53 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -250,56 +250,56 @@ getCertificate() {
250250
# Wait 5 minutes for the certificate to appear.
251251
for ((i=0;;i++)); do if getCertificate; then exit 0; fi; sleep 30; done | timeout -v -- 5m cat
252252

253-
export AGENT_POD_NAME=$(kubectl get pods -n venafi -l app.kubernetes.io/name=venafi-kubernetes-agent -o jsonpath="{.items[0].metadata.name}")
254-
255-
echo "Sending SIGQUIT to agent pod '${AGENT_POD_NAME}' to trigger graceful shutdown and flush coverage..."
256-
# Use kubectl debug to attach a busybox container to the running pod.
257-
# --target specifies the container to share the process space with.
258-
# --share-processes allows our new container to see and signal the agent process.
259-
# We then run 'kill -s QUIT 1' to signal PID 1 (the agent) to quit gracefully.
260-
kubectl debug -q -n venafi "${AGENT_POD_NAME}" \
261-
--image=busybox:1.36 \
262-
--target=venafi-kubernetes-agent \
263-
--share-processes \
264-
-- sh -c 'kill -s QUIT 1'
265-
266-
echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate gracefully..."
267-
# The pod will now terminate because its main process is exiting.
268-
# We wait for Kubernetes to recognize this and delete the pod object.
269-
kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
270-
271-
echo "Scaling down deployment to prevent pod from restarting..."
272-
# Now that the pod is gone and coverage is flushed, we scale the deployment
273-
# to ensure the ReplicaSet controller doesn't create a new one.
274-
kubectl scale deployment venafi-kubernetes-agent -n venafi --replicas=0
275-
echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate as a result of the scale-down..."
276-
kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
277-
echo "Starting helper pod to retrieve coverage files from the PVC..."
278-
279-
kubectl apply -n venafi -f - <<EOF
280-
apiVersion: v1
281-
kind: Pod
282-
metadata:
283-
name: coverage-helper-pod
284-
spec:
285-
containers:
286-
- name: helper
287-
image: alpine:latest
288-
command: ["sleep", "infinity"]
289-
volumeMounts:
290-
- name: coverage-storage
291-
mountPath: /coverage-data
292-
volumes:
293-
- name: coverage-storage
294-
persistentVolumeClaim:
295-
claimName: coverage-pvc
296-
EOF
297-
298-
echo "Waiting for the helper pod to be ready..."
299-
kubectl wait --for=condition=Ready pod/coverage-helper-pod -n venafi --timeout=2m
300-
301-
echo "Copying coverage files from the helper pod..."
302-
mkdir -p $COVERAGE_HOST_PATH
303-
kubectl cp -n venafi "coverage-helper-pod:/coverage-data/." $COVERAGE_HOST_PATH
304-
echo "Coverage files retrieved. Listing contents:"
305-
ls -la $COVERAGE_HOST_PATH
253+
#export AGENT_POD_NAME=$(kubectl get pods -n venafi -l app.kubernetes.io/name=venafi-kubernetes-agent -o jsonpath="{.items[0].metadata.name}")
254+
#
255+
#echo "Sending SIGQUIT to agent pod '${AGENT_POD_NAME}' to trigger graceful shutdown and flush coverage..."
256+
## Use kubectl debug to attach a busybox container to the running pod.
257+
## --target specifies the container to share the process space with.
258+
## --share-processes allows our new container to see and signal the agent process.
259+
## We then run 'kill -s QUIT 1' to signal PID 1 (the agent) to quit gracefully.
260+
#kubectl debug -q -n venafi "${AGENT_POD_NAME}" \
261+
# --image=busybox:1.36 \
262+
# --target=venafi-kubernetes-agent \
263+
# --share-processes \
264+
# -- sh -c 'kill -s QUIT 1'
265+
#
266+
#echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate gracefully..."
267+
## The pod will now terminate because its main process is exiting.
268+
## We wait for Kubernetes to recognize this and delete the pod object.
269+
#kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
270+
#
271+
#echo "Scaling down deployment to prevent pod from restarting..."
272+
## Now that the pod is gone and coverage is flushed, we scale the deployment
273+
## to ensure the ReplicaSet controller doesn't create a new one.
274+
#kubectl scale deployment venafi-kubernetes-agent -n venafi --replicas=0
275+
#echo "Waiting for agent pod '${AGENT_POD_NAME}' to terminate as a result of the scale-down..."
276+
#kubectl wait --for=delete pod/${AGENT_POD_NAME} -n venafi --timeout=90s
277+
#echo "Starting helper pod to retrieve coverage files from the PVC..."
278+
#
279+
#kubectl apply -n venafi -f - <<EOF
280+
#apiVersion: v1
281+
#kind: Pod
282+
#metadata:
283+
# name: coverage-helper-pod
284+
#spec:
285+
# containers:
286+
# - name: helper
287+
# image: alpine:latest
288+
# command: ["sleep", "infinity"]
289+
# volumeMounts:
290+
# - name: coverage-storage
291+
# mountPath: /coverage-data
292+
# volumes:
293+
# - name: coverage-storage
294+
# persistentVolumeClaim:
295+
# claimName: coverage-pvc
296+
#EOF
297+
#
298+
#echo "Waiting for the helper pod to be ready..."
299+
#kubectl wait --for=condition=Ready pod/coverage-helper-pod -n venafi --timeout=2m
300+
#
301+
#echo "Copying coverage files from the helper pod..."
302+
#mkdir -p $COVERAGE_HOST_PATH
303+
#kubectl cp -n venafi "coverage-helper-pod:/coverage-data/." $COVERAGE_HOST_PATH
304+
#echo "Coverage files retrieved. Listing contents:"
305+
#ls -la $COVERAGE_HOST_PATH

0 commit comments

Comments
 (0)