forked from openshift/compliance-operator
-
Notifications
You must be signed in to change notification settings - Fork 38
Expand file tree
/
Copy pathmain_entry.go
More file actions
218 lines (185 loc) · 6.69 KB
/
main_entry.go
File metadata and controls
218 lines (185 loc) · 6.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
package framework
import (
"context"
"flag"
"fmt"
"log"
"os"
"time"
"github.com/go-logr/logr"
ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
compv1alpha1 "github.com/ComplianceAsCode/compliance-operator/pkg/apis/compliance/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func NewFramework() *Framework {
fopts := &frameworkOpts{}
fopts.addToFlagSet(flag.CommandLine)
kcFlag := flag.Lookup(KubeConfigFlag)
if kcFlag == nil {
flag.StringVar(&fopts.kubeconfigPath, KubeConfigFlag, "", "path to kubeconfig")
}
flag.Parse()
if kcFlag != nil {
fopts.kubeconfigPath = kcFlag.Value.String()
}
f, err := newFramework(fopts)
if err != nil {
log.Fatalf("Failed to create framework: %v", err)
}
Global = f
// This is required because controller-runtime expects its consumers to
// set a logger through log.SetLogger within 30 seconds of the program's
// initalization. If not set, the entire debug stack is printed as an
// error, see: https://github.com/kubernetes-sigs/controller-runtime/blob/ed8be90/pkg/log/log.go#L59
// Since we have our own logging and don't care about controller-runtime's
// logger, we configure it's logger to do nothing.
ctrllog.SetLogger(logr.New(ctrllog.NullLogSink{}))
return f
}
func (f *Framework) CleanUpOnError() bool {
return f.cleanupOnError
}
func (f *Framework) SetUp() error {
log.Printf("switching to %s directory to setup and execute tests", f.projectRoot)
err := os.Chdir(f.projectRoot)
if err != nil {
return fmt.Errorf("failed to change directory to project root: %w", err)
}
err = f.ensureTestNamespaceExists()
if err != nil {
return fmt.Errorf("unable to create or use namespace %s for testing: %w", f.OperatorNamespace, err)
}
log.Printf("creating cluster resources in %s", f.globalManPath)
err = f.createFromYAMLFile(&f.globalManPath)
if err != nil {
return fmt.Errorf("failed to setup test resources: %w", err)
}
err = f.addFrameworks()
if err != nil {
return err
}
err = f.replaceNamespaceFromManifest()
if err != nil {
return err
}
log.Printf("creating namespaced resources in %s", *f.NamespacedManPath)
err = f.createFromYAMLFile(f.NamespacedManPath)
if err != nil {
return fmt.Errorf("failed to setup test resources: %w", err)
}
err = f.initializeMetricsTestResources()
if err != nil {
return fmt.Errorf("failed to initialize cluster resources for metrics: %v", err)
}
retryInterval := time.Second * 5
timeout := time.Minute * 30
err = f.WaitForDeployment("compliance-operator", 1, retryInterval, timeout)
if err != nil {
return fmt.Errorf("timed out waiting for deployment to become available: %w", err)
}
err = f.WaitForProfileBundleStatus("rhcos4", compv1alpha1.DataStreamValid)
if err != nil {
return err
}
err = f.WaitForProfileBundleStatus("ocp4", compv1alpha1.DataStreamValid)
if err != nil {
return err
}
err = f.updateScanSettingsForDebug()
if err != nil {
return fmt.Errorf("failed to set scan setting bindings to debug: %w", err)
}
err = f.ensureE2EScanSettings()
if err != nil {
return fmt.Errorf("failed to configure scan settings for tests: %w", err)
}
err = f.createMachineConfigPool("e2e")
if err != nil {
return fmt.Errorf("failed to create Machine Config Pool %s: %w", "e2e", err)
}
err = f.createInvalidMachineConfigPool("e2e-invalid")
if err != nil {
return fmt.Errorf("failed to create Machine Config Pool %s: %w", "e2e-invalid", err)
}
return nil
}
// tearDown performs any tasks necessary to cleanup resources leftover from testing
// and assumes a specific order. All namespaced resources must be cleaned up before
// deleting the cluster-wide resources, like roles, service accounts, or the deployment.
// If we don't properly cleanup resources before deleting CRDs, it leaves resources in a
// terminating state, making them harder to cleanup.
func (f *Framework) TearDown() error {
// Make sure all scans are cleaned up before we delete the CRDs. Scans should be cleaned up
// because they're owned by ScanSettingBindings or ScanSuites, which should be cleaned up
// by each individual test either directly or through deferred cleanup. If the test fails
// because there are scans that haven't been cleaned up, we could have a bug in the
// tests.
err := f.waitForScanCleanup()
if err != nil {
return err
}
log.Printf("cleaning up Profile Bundles")
err = f.cleanUpProfileBundle("rhcos4")
if err != nil {
return fmt.Errorf("failed to cleanup rhcos4 profile bundle: %w", err)
}
err = f.cleanUpProfileBundle("ocp4")
if err != nil {
return fmt.Errorf("failed to cleanup ocp4 profile bundle: %w", err)
}
err = f.deleteScanSettings("e2e-default")
if err != nil {
return err
}
err = f.deleteScanSettings("e2e-default-auto-apply")
if err != nil {
return err
}
// unlabel nodes
err = f.restoreNodeLabelsForPool("e2e")
if err != nil {
return err
}
err = f.cleanUpMachineConfigPool("e2e")
if err != nil {
return err
}
// e2e-invalid mcp
err = f.cleanUpMachineConfigPool("e2e-invalid")
if err != nil {
return err
}
// Clean up these resources explicitly in this method because it's guaranteed to run
// after all the tests execute. It's also safer to clean up resources that require
// a specific cleanup order explicitly than to rely on Go's defer function. Defer
// is implemented as a stack, and doesn't guarantee safety across go routines
// (which may be the case with parallel tests), making it possible for some
// resources to get cleaned up before others. We don't want that to happen with
// cluster resources like CRDs, because it will orphan custom resource instances
// that haven't been cleaned up, yet.
log.Printf("cleaning up namespaced resources in %s\n", f.OperatorNamespace)
err = f.cleanUpFromYAMLFile(f.NamespacedManPath)
if err != nil {
return err
}
log.Println("cleaning up cluster resources")
err = f.cleanUpFromYAMLFile(&f.globalManPath)
if err != nil {
return err
}
log.Printf("cleaning up namespace %s\n", f.OperatorNamespace)
err = f.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), f.OperatorNamespace, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("failed to cleanup namespace %s: %w", f.OperatorNamespace, err)
}
// Verify namespace deletion completes successfully
// This ensures that all resources, including those with finalizers, are properly cleaned up
// and that the operator can be deleted without resources getting stuck in terminating state
log.Printf("Verifying namespace %s deletion \n", f.OperatorNamespace)
err = f.waitForNamespaceDeletion(f.OperatorNamespace, time.Second*5, time.Minute*5)
if err != nil {
return fmt.Errorf("namespace %s deletion did not complete: %w", f.OperatorNamespace, err)
}
log.Printf("Namespace %s successfully deleted\n", f.OperatorNamespace)
return nil
}