|
| 1 | +/* |
| 2 | +Copyright 2025 The Kubernetes Authors. |
| 3 | +
|
| 4 | +Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +you may not use this file except in compliance with the License. |
| 6 | +You may obtain a copy of the License at |
| 7 | +
|
| 8 | + http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +
|
| 10 | +Unless required by applicable law or agreed to in writing, software |
| 11 | +distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +See the License for the specific language governing permissions and |
| 14 | +limitations under the License. |
| 15 | +*/ |
| 16 | + |
| 17 | +package acceptance |
| 18 | + |
| 19 | +import ( |
| 20 | + "context" |
| 21 | + "crypto/rand" |
| 22 | + "errors" |
| 23 | + "testing" |
| 24 | + "time" |
| 25 | + |
| 26 | + corev1 "k8s.io/api/core/v1" |
| 27 | + "k8s.io/client-go/rest" |
| 28 | + |
| 29 | + "sigs.k8s.io/controller-runtime/pkg/client" |
| 30 | + |
| 31 | + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" |
| 32 | + "sigs.k8s.io/multicluster-runtime/pkg/multicluster" |
| 33 | +) |
| 34 | + |
| 35 | +// ClusterGenerator is a function that generates a new cluster. |
| 36 | +// The cluster is expected to be available through the provider. |
| 37 | +// The return values are the cluster name, the rest.Config to access the |
| 38 | +// cluster, and an error if the cluster could not be created. |
| 39 | +// |
| 40 | +// The context is cancelled when the cluster is to be removed. |
| 41 | +// |
| 42 | +// The ErrorHandler can be used to report errors from goroutines started |
| 43 | +// by the generator. |
| 44 | +type ClusterGenerator func(context.Context, ErrorHandler) (string, *rest.Config, error) |
| 45 | + |
| 46 | +// ErrorHandler is a function that handles errors from goroutines |
| 47 | +// started by consumers of the accetpance tests. |
| 48 | +// The ErrorHandler will automatically ignore nil values and context.Canceled. |
| 49 | +type ErrorHandler func(error) |
| 50 | + |
| 51 | +// UnknownClusterName is a random cluster name used to test the |
| 52 | +// return of a correct error for non-existing clusters. |
| 53 | +// Providers may use this in their test to verify their generated |
| 54 | +// names do not accidentally collide with this name. |
| 55 | +var UnknownClusterName = rand.Text() |
| 56 | + |
| 57 | +// RandomClusterName generates a random cluster name that is not |
| 58 | +// UnknownClusterName. |
| 59 | +func RandomClusterName() string { |
| 60 | + name := rand.Text() |
| 61 | + if name == UnknownClusterName { |
| 62 | + return RandomClusterName() |
| 63 | + } |
| 64 | + return name |
| 65 | +} |
| 66 | + |
| 67 | +// Run runs the acceptance tests. |
| 68 | +// The manager must not be started, it will be started and stopped as |
| 69 | +// part of the acceptance tests. |
| 70 | +// If the provider needs to be started it must implement the |
| 71 | +// multicluster.ProviderRunnable interface. |
| 72 | +// |
| 73 | +// TODO multi manager support for controller sharding? |
| 74 | +// TODO two executions, one with and one without sharding? |
| 75 | +func Run(t testing.TB, clusterGenerator ClusterGenerator, manager mcmanager.Manager) { |
| 76 | + t.Log("Starting acceptance tests") |
| 77 | + |
| 78 | + t.Log("Creating a cluster before starting the manager") |
| 79 | + clusterBeforeName, clusterBeforeCfg := createCluster(t, clusterGenerator) |
| 80 | + clusterBeforeTest := "created before manager start" |
| 81 | + writeConfigMap(t, clusterBeforeCfg, "before", clusterBeforeTest) |
| 82 | + |
| 83 | + t.Log("Starting the manager") |
| 84 | + managerCtx, managerCancel := context.WithCancel(t.Context()) |
| 85 | + defer managerCancel() |
| 86 | + go func() { |
| 87 | + if err := ignoreCanceled(manager.Start(managerCtx)); err != nil { |
| 88 | + // TODO This _SHOULD_ be t.Errorf _but_ Ginkgo maps their |
| 89 | + // equivalent of testing.TB.Errorf to Ginkgo.Errorf which is |
| 90 | + // virtually equivalent to Fatalf because it immediately |
| 91 | + // stop execution of the test by panicking. |
| 92 | + // This is caught by ginkgo later, BUT causes it to discard |
| 93 | + // ALL information about the run and just print |
| 94 | + // a boilerplate about using GinkgoRecover. |
| 95 | + // |
| 96 | + // So now its just logged and will implement a separate |
| 97 | + // check later. |
| 98 | + t.Logf("Error in manager: %v", err) |
| 99 | + } |
| 100 | + }() |
| 101 | + |
| 102 | + t.Log("Wait for manager to win the election") |
| 103 | + // TODO: Not sure if this is needed. Then again it doesn't hurt. |
| 104 | + func() { |
| 105 | + timeoutCtx, timeoutCancel := context.WithTimeout(t.Context(), WaitTimeout) |
| 106 | + defer timeoutCancel() |
| 107 | + select { |
| 108 | + case <-manager.Elected(): |
| 109 | + t.Log("Manager elected") |
| 110 | + case <-timeoutCtx.Done(): |
| 111 | + if !errors.Is(timeoutCtx.Err(), context.DeadlineExceeded) { |
| 112 | + t.Fatalf("Manager not elected within timeout") |
| 113 | + } |
| 114 | + } |
| 115 | + }() |
| 116 | + |
| 117 | + t.Logf("Retrieve cluster %q, created before manager", clusterBeforeName) |
| 118 | + clusterBefore := getCluster(t, manager, clusterBeforeName, clusterBeforeCfg) |
| 119 | + clusterBeforeData := getConfigMap(t, clusterBefore.GetConfig(), "before") |
| 120 | + if clusterBeforeData != clusterBeforeTest { |
| 121 | + t.Errorf("Cluster data mismatch: got %q, want %q", clusterBeforeData, "created before manager start") |
| 122 | + } |
| 123 | + |
| 124 | + t.Logf("Creating a cluster after starting the manager") |
| 125 | + clusterAfterName, clusterAfterCfg := createCluster(t, clusterGenerator) |
| 126 | + clusterAfterTest := "created after manager start" |
| 127 | + writeConfigMap(t, clusterAfterCfg, "after", clusterAfterTest) |
| 128 | + |
| 129 | + t.Logf("Retrieve cluster %q, created after the manager", clusterAfterName) |
| 130 | + clusterAfter := getCluster(t, manager, clusterAfterName, clusterAfterCfg) |
| 131 | + clusterAfterData := getConfigMap(t, clusterAfter.GetConfig(), "after") |
| 132 | + if clusterAfterData != clusterAfterTest { |
| 133 | + t.Errorf("Cluster data mismatch: got %q, want %q", clusterAfterData, "created after manager start") |
| 134 | + } |
| 135 | + |
| 136 | + // TODO could be subtest/function |
| 137 | + t.Logf("Verify return of %q for unknown cluster", multicluster.ErrClusterNotFound) |
| 138 | + _, err := manager.GetCluster(t.Context(), UnknownClusterName) |
| 139 | + if !errors.Is(err, multicluster.ErrClusterNotFound) { |
| 140 | + t.Errorf("GetCluster(%q) = %v, want ErrClusterNotFound", UnknownClusterName, err) |
| 141 | + } |
| 142 | + |
| 143 | + // TODO entire index could be its own function |
| 144 | + t.Logf("Index configmap data.data field") |
| 145 | + if err := manager.GetFieldIndexer().IndexField(t.Context(), &corev1.ConfigMap{}, "data", |
| 146 | + func(obj client.Object) []string { |
| 147 | + cm, ok := obj.(*corev1.ConfigMap) |
| 148 | + if !ok { |
| 149 | + return nil |
| 150 | + } |
| 151 | + if val, ok := cm.Data["data"]; ok { |
| 152 | + return []string{val} |
| 153 | + } |
| 154 | + return []string{} |
| 155 | + }, |
| 156 | + ); err != nil { |
| 157 | + t.Errorf("Failed to index configmap data.data field: %v", err) |
| 158 | + } |
| 159 | + t.Logf("Field indexed, retrieving configmap by field") |
| 160 | + cms := &corev1.ConfigMapList{} |
| 161 | + timeoutCtx, timeoutCancel := context.WithTimeout(t.Context(), 10*time.Second) // TODO temporary, odd failure |
| 162 | + defer timeoutCancel() |
| 163 | + if err := clusterBefore.GetCache().List(timeoutCtx, cms, client.MatchingFields{"data": clusterBeforeTest}); err != nil { |
| 164 | + t.Fatalf("Failed to list configmaps in cluster %q: %v", clusterBeforeName, err) |
| 165 | + } |
| 166 | + if len(cms.Items) != 1 { |
| 167 | + t.Errorf("Expected 1 configmap in cluster %q, got %d", clusterBeforeName, len(cms.Items)) |
| 168 | + } |
| 169 | + |
| 170 | + t.Log("Create new cluster after indexing field") |
| 171 | + clusterIndexName, clusterIndexCfg := createCluster(t, clusterGenerator) |
| 172 | + clusterIndexTest := "created after indexing" |
| 173 | + writeConfigMap(t, clusterIndexCfg, "index", clusterIndexTest) |
| 174 | + |
| 175 | + t.Logf("Retrieve cluster %q, created after indexing field", clusterIndexName) |
| 176 | + clusterIndex := getCluster(t, manager, clusterIndexName, clusterIndexCfg) |
| 177 | + cms = &corev1.ConfigMapList{} |
| 178 | + if err := clusterIndex.GetCache().List(t.Context(), cms, client.MatchingFields{"data": clusterIndexTest}); err != nil { |
| 179 | + t.Fatalf("Failed to list configmaps in cluster %q: %v", clusterIndexName, err) |
| 180 | + } |
| 181 | + if len(cms.Items) != 1 { |
| 182 | + t.Errorf("Expected 1 configmap in cluster %q, got %d", clusterIndexName, len(cms.Items)) |
| 183 | + } |
| 184 | + // end index |
| 185 | + |
| 186 | + // TODO cluster removal can be its own function |
| 187 | + t.Log("Test that a cluster is removed when the backing cluster is gone") |
| 188 | + clusterCtx, clusterCancel := context.WithCancel(t.Context()) |
| 189 | + clusterToRemoveName, clusterToRemoveCfg, err := clusterGenerator(clusterCtx, errorHandler(t, "removable cluster")) |
| 190 | + if err != nil { |
| 191 | + t.Fatalf("Failed to create cluster: %v", err) |
| 192 | + } |
| 193 | + |
| 194 | + t.Logf("Validate that cluster to remove is available, %q", clusterToRemoveName) |
| 195 | + getCluster(t, manager, clusterToRemoveName, clusterToRemoveCfg) |
| 196 | + |
| 197 | + t.Logf("Cancelling context for cluster %q", clusterToRemoveName) |
| 198 | + clusterCancel() |
| 199 | + eventually(t, func() error { |
| 200 | + _, err := manager.GetCluster(t.Context(), clusterToRemoveName) |
| 201 | + if err == nil { |
| 202 | + return errors.New("cluster still exists") |
| 203 | + } |
| 204 | + return nil |
| 205 | + }, WaitTimeout, PollInterval, "cluster %q not removed", clusterToRemoveName) |
| 206 | + // end cluster removal |
| 207 | + |
| 208 | + t.Log("Cancelling the manager context") |
| 209 | + managerCancel() |
| 210 | + // TODO check with manager and provider |
| 211 | +} |
0 commit comments