diff --git a/acceptance/features/decommissioning.feature b/acceptance/features/decommissioning.feature index e13c3648e..b83d7a89c 100644 --- a/acceptance/features/decommissioning.feature +++ b/acceptance/features/decommissioning.feature @@ -8,6 +8,6 @@ Feature: Decommissioning brokers When I physically shutdown a kubernetes node for cluster "decommissioning" And cluster "decommissioning" is unhealthy And cluster "decommissioning" has only 2 remaining nodes - And I prune any kubernetes node that is now in a NotReady status + And I prune kubernetes node that was removed in previous step Then cluster "decommissioning" should recover And cluster "decommissioning" should be stable with 3 nodes diff --git a/acceptance/steps/cluster.go b/acceptance/steps/cluster.go index 226afb186..b169d660a 100644 --- a/acceptance/steps/cluster.go +++ b/acceptance/steps/cluster.go @@ -143,7 +143,7 @@ func checkClusterHealthCondition(ctx context.Context, t framework.TestingT, clus t.Logf("Cluster %q contains Healthy reason %q!", clusterName, reason) } -func shutdownRandomClusterNode(ctx context.Context, t framework.TestingT, clusterName string) { +func shutdownRandomClusterNode(ctx context.Context, t framework.TestingT, clusterName string) context.Context { var clusterSet appsv1.StatefulSet key := t.ResourceKey(clusterName) @@ -164,9 +164,10 @@ func shutdownRandomClusterNode(ctx context.Context, t framework.TestingT, cluste pod := pods.Items[index] t.ShutdownNode(ctx, pod.Spec.NodeName) + return context.WithValue(ctx, recordedVariable("ShutdownNodeName"), pod.Spec.NodeName) } -func shutdownNodeOfPod(ctx context.Context, t framework.TestingT, podName string) { +func shutdownNodeOfPod(ctx context.Context, t framework.TestingT, podName string) context.Context { t.ResourceKey(podName) var pod corev1.Pod @@ -183,6 +184,7 @@ func shutdownNodeOfPod(ctx context.Context, t framework.TestingT, podName string require.NoError(t, t.Update(ctx, &node)) t.ShutdownNode(ctx, pod.Spec.NodeName) + return context.WithValue(ctx, recordedVariable("ShutdownNodeName"), pod.Spec.NodeName) } func deleteNotReadyKubernetesNodes(ctx context.Context, t framework.TestingT) { @@ -198,6 +200,12 @@ func deleteNotReadyKubernetesNodes(ctx context.Context, t framework.TestingT) { } } +func deleteKubernetesNodesFromContext(ctx context.Context, t framework.TestingT) { + shutdownNodeName := ctx.Value(recordedVariable("ShutdownNodeName")) + t.Logf("Deleting Kubernetes node: %q", shutdownNodeName) + t.DeleteNode(ctx, shutdownNodeName.(string)) +} + func checkClusterNodeCount(ctx context.Context, t framework.TestingT, clusterName string, nodeCount int32) { var cluster redpandav1alpha2.Redpanda var actualNodeCount int32 diff --git a/acceptance/steps/register.go b/acceptance/steps/register.go index 4b0949a1f..969be40e3 100644 --- a/acceptance/steps/register.go +++ b/acceptance/steps/register.go @@ -109,6 +109,7 @@ func init() { framework.RegisterStep(`^I prune any kubernetes node that is now in a NotReady status$`, deleteNotReadyKubernetesNodes) framework.RegisterStep(`I stop the Node running Pod "([^"]+)"`, shutdownNodeOfPod) framework.RegisterStep(`^cluster "([^"]*)" has only (\d+) remaining nodes$`, checkClusterNodeCount) + framework.RegisterStep(`^I prune kubernetes node that was removed in previous step$`, deleteKubernetesNodesFromContext) // Operator upgrade scenario steps framework.RegisterStep(`^I install local CRDs from "([^"]*)"`, iInstallLocalCRDs)