-
Notifications
You must be signed in to change notification settings - Fork 150
Expand file tree
/
Copy pathinitializing_controller.go
More file actions
349 lines (303 loc) · 12.9 KB
/
initializing_controller.go
File metadata and controls
349 lines (303 loc) · 12.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
// Copyright (c) 2020-2026 Tigera, Inc. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package initializer
import (
"context"
"fmt"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
operatorv1 "github.com/tigera/operator/api/v1"
"github.com/tigera/operator/pkg/controller/options"
"github.com/tigera/operator/pkg/controller/status"
"github.com/tigera/operator/pkg/controller/utils"
"github.com/tigera/operator/pkg/ctrlruntime"
"github.com/tigera/operator/pkg/render"
"github.com/tigera/operator/pkg/render/logstorage/kibana"
)
var log = logf.Log.WithName("controller_logstorage")
const (
DefaultElasticsearchStorageClass = "tigera-elasticsearch"
TigeraStatusName = "log-storage"
defaultEckOperatorMemorySetting = "512Mi"
TigeraStatusLogStorageKubeController = "log-storage-kubecontrollers"
TigeraStatusLogStorageAccess = "log-storage-access"
TigeraStatusLogStorageElastic = "log-storage-elastic"
TigeraStatusLogStorageSecrets = "log-storage-secrets"
TigeraStatusLogStorageUsers = "log-storage-users"
TigeraStatusLogStorageESMetrics = "log-storage-esmetrics"
TigeraStatusLogStorageDashboards = "log-storage-dashboards"
)
// Add creates a new LogStorage Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager, opts options.ControllerOptions) error {
if !opts.EnterpriseCRDExists {
return nil
}
// Create the reconciler
r := &LogStorageInitializer{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
multiTenant: opts.MultiTenant,
externalES: opts.ElasticExternal,
status: status.New(mgr.GetClient(), TigeraStatusName, opts.KubernetesVersion),
}
r.status.Run(opts.ShutdownContext)
// Create a controller using the reconciler and register it with the manager to receive reconcile calls.
c, err := ctrlruntime.NewController("log-storage-initializing-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Configure watches for operator.tigera.io APIs this controller cares about.
if err = c.WatchObject(&operatorv1.LogStorage{}, &handler.EnqueueRequestForObject{}); err != nil {
return fmt.Errorf("log-storage-initializing-controller failed to watch LogStorage resource: %w", err)
}
if err = c.WatchObject(&operatorv1.Installation{}, &handler.EnqueueRequestForObject{}); err != nil {
return fmt.Errorf("log-storage-initializing-controller failed to watch Installation resource: %w", err)
}
return nil
}
var _ reconcile.Reconciler = &LogStorageInitializer{}
// LogStorageInitializer is responsible for performing validation and defaulting on the LogStorage object,
// and creating the base namespaces for other controllers to deploy into. It updates the status of the LogStorage
// object to indicate that it has completed its work to other controllers.
type LogStorageInitializer struct {
client client.Client
scheme *runtime.Scheme
status status.StatusManager
provider operatorv1.Provider
multiTenant bool
externalES bool
}
// FillDefaults populates the default values onto an LogStorage object.
func FillDefaults(opr *operatorv1.LogStorage) {
if opr.Spec.Retention == nil {
opr.Spec.Retention = &operatorv1.Retention{}
}
if opr.Spec.Retention.Flows == nil {
var fr int32 = 8
opr.Spec.Retention.Flows = &fr
}
if opr.Spec.Retention.AuditReports == nil {
var arr int32 = 91
opr.Spec.Retention.AuditReports = &arr
}
if opr.Spec.Retention.Snapshots == nil {
var sr int32 = 91
opr.Spec.Retention.Snapshots = &sr
}
if opr.Spec.Retention.ComplianceReports == nil {
var crr int32 = 91
opr.Spec.Retention.ComplianceReports = &crr
}
if opr.Spec.Retention.DNSLogs == nil {
var dlr int32 = 8
opr.Spec.Retention.DNSLogs = &dlr
}
if opr.Spec.Retention.BGPLogs == nil {
var bgp int32 = 8
opr.Spec.Retention.BGPLogs = &bgp
}
if opr.Spec.Indices == nil {
opr.Spec.Indices = &operatorv1.Indices{}
}
if opr.Spec.Indices.Replicas == nil {
var replicas int32 = render.DefaultElasticsearchReplicas
opr.Spec.Indices.Replicas = &replicas
}
if opr.Spec.StorageClassName == "" {
opr.Spec.StorageClassName = DefaultElasticsearchStorageClass
}
if opr.Spec.Nodes == nil {
opr.Spec.Nodes = &operatorv1.Nodes{Count: 1}
}
if opr.Spec.ComponentResources == nil {
limits := corev1.ResourceList{}
requests := corev1.ResourceList{}
limits[corev1.ResourceMemory] = resource.MustParse(defaultEckOperatorMemorySetting)
requests[corev1.ResourceMemory] = resource.MustParse(defaultEckOperatorMemorySetting)
opr.Spec.ComponentResources = []operatorv1.LogStorageComponentResource{
{
ComponentName: operatorv1.ComponentNameECKOperator,
ResourceRequirements: &corev1.ResourceRequirements{
Limits: limits,
Requests: requests,
},
},
}
}
}
func validateLogStorage(spec *operatorv1.LogStorageSpec) (error, string) {
err, warning := validateReplicasForNodeCount(spec)
if err != nil {
return err, ""
}
if err := validateComponentResources(spec); err != nil {
return err, ""
}
return nil, warning
}
func validateReplicasForNodeCount(spec *operatorv1.LogStorageSpec) (error, string) {
if spec.Nodes == nil || spec.Indices == nil || spec.Indices.Replicas == nil {
return nil, ""
}
replicas := int(*spec.Indices.Replicas)
nodeCount := int(spec.Nodes.Count)
if replicas > 0 && nodeCount <= replicas {
return fmt.Errorf("LogStorage spec.indices.replicas (%d) must be less than spec.nodes.count (%d); replica shards cannot be allocated when there are not enough nodes. For a single-node Elasticsearch cluster, set spec.indices.replicas to 0", replicas, nodeCount), ""
}
if replicas > 0 && nodeCount == replicas+1 {
return nil, fmt.Sprintf("LogStorage spec.nodes.count (%d) is only 1 more than spec.indices.replicas (%d); this may prevent voluntary pod evictions (e.g., node repaving) due to PodDisruptionBudget constraints. If this is expected for your environment, no action is needed. Otherwise, consider setting spec.nodes.count to at least %d", nodeCount, replicas, replicas+2)
}
return nil, ""
}
func validateComponentResources(spec *operatorv1.LogStorageSpec) error {
if spec.ComponentResources == nil {
return fmt.Errorf("LogStorage spec.ComponentResources is nil %+v", spec)
}
// Currently the only supported component is ECKOperator.
if len(spec.ComponentResources) > 1 {
return fmt.Errorf("LogStorage spec.ComponentResources contains unsupported components %+v", spec.ComponentResources)
}
if spec.ComponentResources[0].ComponentName != operatorv1.ComponentNameECKOperator {
return fmt.Errorf("LogStorage spec.ComponentResources.ComponentName %s is not supported", spec.ComponentResources[0].ComponentName)
}
return nil
}
func (r *LogStorageInitializer) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling LogStorage")
ls := &operatorv1.LogStorage{}
key := utils.DefaultTSEEInstanceKey
err := r.client.Get(ctx, key, ls)
if errors.IsNotFound(err) {
r.status.OnCRNotFound()
return reconcile.Result{}, nil
} else if err != nil {
// An actual error ocurred when attempting to query the LogStorage API.
r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying LogStorage", err, reqLogger)
return reconcile.Result{}, err
}
// We found the LogStorage instance.
r.status.OnCRFound()
// Get Installation resource.
_, install, err := utils.GetInstallation(context.Background(), r.client)
if err != nil {
if errors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceNotFound, "Installation not found", err, reqLogger)
return reconcile.Result{}, err
}
r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying Installation", err, reqLogger)
return reconcile.Result{}, err
}
// Check if there is a management cluster connection. ManagementClusterConnection is a managed cluster only resource.
if err = r.client.Get(ctx, utils.DefaultTSEEInstanceKey, &operatorv1.ManagementClusterConnection{}); err == nil {
// LogStorage isn't valid for managed clusters.
r.setConditionDegraded(ctx, ls, reqLogger)
r.status.SetDegraded(operatorv1.InvalidConfigurationError, "LogStorage is not valid for a managed cluster", nil, reqLogger)
return reconcile.Result{}, nil
} else if !errors.IsNotFound(err) {
// An actual error ocurred when attempting to query the ManagementClusterConnection API.
r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurred while querying ManagementClusterConnection", err, reqLogger)
return reconcile.Result{}, err
}
// Create a snapshot of the pre-defaulting LogStorage state to use when performing a
// merge patch to update the resource later.
preDefaultingPatchFrom := client.MergeFrom(ls.DeepCopy())
// Default and validate the object.
FillDefaults(ls)
err, warning := validateLogStorage(&ls.Spec)
if err != nil {
// Invalid - mark it as such and return.
r.setConditionDegraded(ctx, ls, reqLogger)
r.status.SetDegraded(operatorv1.ResourceValidationError, "An error occurred while validating LogStorage", err, reqLogger)
return reconcile.Result{}, err
}
if warning != "" {
reqLogger.Info(warning)
}
pullSecrets, err := utils.GetNetworkingPullSecrets(install, r.client)
if err != nil {
r.status.SetDegraded(operatorv1.ResourceReadError, "An error occurring while retrieving the pull secrets", err, reqLogger)
return reconcile.Result{}, err
}
// Before we can create secrets, we need to ensure the tigera-elasticsearch namespace exists.
hdler := utils.NewComponentHandler(reqLogger, r.client, r.scheme, ls)
components := []render.Component{render.NewSetup(&render.SetUpConfiguration{
OpenShift: r.provider.IsOpenShift(),
Installation: install,
PullSecrets: pullSecrets,
Namespace: render.ElasticsearchNamespace,
PSS: r.elasticsearchPSS(),
CreateNamespace: true,
})}
// Multitenant clusters do not get kibana, so namespace creation can be skipped.
if !r.multiTenant {
components = append(components, render.NewSetup(&render.SetUpConfiguration{
OpenShift: r.provider.IsOpenShift(),
Installation: install,
PullSecrets: pullSecrets,
Namespace: kibana.Namespace,
PSS: render.PSSBaseline,
CreateNamespace: true,
}))
}
for _, component := range components {
if err = hdler.CreateOrUpdateOrDelete(ctx, component, r.status); err != nil {
r.status.SetDegraded(operatorv1.ResourceUpdateError, "Error creating / updating resource", err, reqLogger)
return reconcile.Result{}, err
}
}
// Write the logstorage back to the datastore with its newly applied defaults.
if err = r.client.Patch(ctx, ls, preDefaultingPatchFrom); err != nil {
r.status.SetDegraded(operatorv1.ResourcePatchError, "Failed to write defaults", err, reqLogger)
return reconcile.Result{}, err
}
if err = r.setConditionReady(ctx, ls, reqLogger); err != nil {
r.status.SetDegraded(operatorv1.ResourceUpdateError, "Failed to update LogStorage status", err, reqLogger)
return reconcile.Result{}, err
}
defer r.status.SetMetaData(&ls.ObjectMeta)
// Mark the status as available.
r.status.ReadyToMonitor()
r.status.ClearDegraded()
return reconcile.Result{}, nil
}
func (r *LogStorageInitializer) elasticsearchPSS() render.PodSecurityStandard {
if r.externalES {
return render.PSSBaseline
}
return render.PSSPrivileged
}
func (r *LogStorageInitializer) setConditionReady(ctx context.Context, ls *operatorv1.LogStorage, log logr.Logger) error {
ls.Status.State = operatorv1.TigeraStatusReady
if err := r.client.Status().Update(ctx, ls); err != nil {
log.Error(err, "Failed to update LogStorage status")
return err
}
return nil
}
func (r *LogStorageInitializer) setConditionDegraded(ctx context.Context, ls *operatorv1.LogStorage, log logr.Logger) {
ls.Status.State = operatorv1.TigeraStatusDegraded
if err := r.client.Status().Update(ctx, ls); err != nil {
log.Error(err, "Failed to update LogStorage status")
}
}