Skip to content

Commit fd477e4

Browse files
author
Vivek Reddy
committed
removed commented code
1 parent a24388d commit fd477e4

File tree

3 files changed

+0
-31
lines changed

3 files changed

+0
-31
lines changed

pkg/splunk/enterprise/clustermanager.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -350,7 +350,6 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *
350350
// Reconciler can be called for multiple reasons. If we are waiting on configMap update to happen,
351351
// do not increment the Retry Count unless the last check was 5 seconds ago.
352352
// This helps, to wait for the required time
353-
//eventPublisher, _ := newK8EventPublisher(c, cr)
354353

355354
currentEpoch := time.Now().Unix()
356355
if cr.Status.BundlePushTracker.LastCheckInterval+5 > currentEpoch {
@@ -385,7 +384,6 @@ func PerformCmBundlePush(ctx context.Context, c splcommon.ControllerClient, cr *
385384
cr.Status.BundlePushTracker.NeedToPushManagerApps = false
386385
}
387386

388-
//eventPublisher.Warning(ctx, "BundlePush", fmt.Sprintf("Bundle push failed %s", err.Error()))
389387
return err
390388
}
391389

pkg/splunk/enterprise/clustermaster.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -334,7 +334,6 @@ func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient,
334334
// Reconciler can be called for multiple reasons. If we are waiting on configMap update to happen,
335335
// do not increment the Retry Count unless the last check was 5 seconds ago.
336336
// This helps, to wait for the required time
337-
//eventPublisher, _ := newK8EventPublisher(c, cr)
338337

339338
currentEpoch := time.Now().Unix()
340339
if cr.Status.BundlePushTracker.LastCheckInterval+5 > currentEpoch {
@@ -369,7 +368,6 @@ func PerformCmasterBundlePush(ctx context.Context, c splcommon.ControllerClient,
369368
cr.Status.BundlePushTracker.NeedToPushMasterApps = false
370369
}
371370

372-
//eventPublisher.Warning(ctx, "BundlePush", fmt.Sprintf("Bundle push failed %s", err.Error()))
373371
return err
374372
}
375373

pkg/splunk/enterprise/searchheadcluster.go

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,6 @@ var newSearchHeadClusterPodManager = func(client splcommon.ControllerClient, log
273273
// ApplyShcSecret checks if any of the search heads have a different shc_secret from namespace scoped secret and changes it
274274
func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, replicas int32, podExecClient splutil.PodExecClientImpl) error {
275275

276-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
277276

278277
// Get namespace scoped secret
279278
namespaceSecret, err := splutil.ApplyNamespaceScopedSecretObject(ctx, mgr.c, mgr.cr.GetNamespace())
@@ -330,7 +329,6 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
330329

331330
// If shc secret is different from namespace scoped secret change it
332331
if shcSecret != nsShcSecret {
333-
//eventPublisher.Normal(ctx, "ApplyShcSecret", fmt.Sprintf("shcSecret different from namespace scoped secret, changing shc secret for pod %s", shPodName))
334332
scopedLog.Info("shcSecret different from namespace scoped secret, changing shc secret")
335333
// If shc secret already changed, ignore
336334
if i < int32(len(mgr.cr.Status.ShcSecretChanged)) {
@@ -355,7 +353,6 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
355353
if err != nil {
356354
return err
357355
}
358-
//eventPublisher.Normal(ctx, "ApplyShcSecret", fmt.Sprintf("Restarted Splunk for pod %s", shPodName))
359356
scopedLog.Info("Restarted Splunk")
360357

361358
// Set the shc_secret changed flag to true
@@ -383,7 +380,6 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
383380
if err != nil {
384381
return err
385382
}
386-
//eventPublisher.Normal(ctx, "ApplyShcSecret", fmt.Sprintf("admin password changed on the splunk instance of pod %s", shPodName))
387383
scopedLog.Info("admin password changed on the splunk instance of pod")
388384

389385
// Get client for Pod and restart splunk instance on pod
@@ -392,7 +388,6 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
392388
if err != nil {
393389
return err
394390
}
395-
//eventPublisher.Normal(ctx, "ApplyShcSecret", fmt.Sprintf("Restarted Splunk for pod %s", shPodName))
396391
scopedLog.Info("Restarted Splunk")
397392

398393
// Set the adminSecretChanged changed flag to true
@@ -409,7 +404,6 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
409404
return err
410405
}
411406
mgr.cr.Status.AdminPasswordChangedSecrets[podSecret.GetName()] = true
412-
//eventPublisher.Normal(ctx, "ApplyShcSecret", fmt.Sprintf("Secret mounted on pod %s added to map", shPodName))
413407
scopedLog.Info("Secret mounted on pod(to be changed) added to map")
414408
}
415409
}
@@ -426,15 +420,13 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
426420
for podSecretName := range mgr.cr.Status.AdminPasswordChangedSecrets {
427421
podSecret, err := splutil.GetSecretByName(ctx, mgr.c, mgr.cr.GetNamespace(), mgr.cr.GetName(), podSecretName)
428422
if err != nil {
429-
//eventPublisher.Warning(ctx, "GetSecretByName", fmt.Sprintf("could not read secret %s, reason - %v", podSecretName, err))
430423
return fmt.Errorf("could not read secret %s, reason - %v", podSecretName, err)
431424
}
432425
podSecret.Data["password"] = []byte(nsAdminSecret)
433426
_, err = splctrl.ApplySecret(ctx, mgr.c, podSecret)
434427
if err != nil {
435428
return err
436429
}
437-
//eventPublisher.Normal(ctx, "ApplyShcSecret", fmt.Sprintf("admin password changed on the secret mounted on pod %s", podSecretName))
438430
scopedLog.Info("admin password changed on the secret mounted on pod")
439431
}
440432
}
@@ -445,7 +437,6 @@ func ApplyShcSecret(ctx context.Context, mgr *searchHeadClusterPodManager, repli
445437
// Update for searchHeadClusterPodManager handles all updates for a statefulset of search heads
446438
func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.ControllerClient, statefulSet *appsv1.StatefulSet, desiredReplicas int32) (enterpriseApi.Phase, error) {
447439

448-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
449440
// Assign client
450441
if mgr.c == nil {
451442
mgr.c = c
@@ -469,7 +460,6 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
469460
// update CR status with SHC information
470461
err = mgr.updateStatus(ctx, statefulSet)
471462
if err != nil || mgr.cr.Status.ReadyReplicas == 0 || !mgr.cr.Status.Initialized || !mgr.cr.Status.CaptainReady {
472-
//eventPublisher.Normal(ctx, "Update", fmt.Sprintf("Search head cluster is not ready %s", err))
473463
mgr.log.Info("Search head cluster is not ready", "reason ", err)
474464
return enterpriseApi.PhasePending, nil
475465
}
@@ -480,7 +470,6 @@ func (mgr *searchHeadClusterPodManager) Update(ctx context.Context, c splcommon.
480470

481471
// PrepareScaleDown for searchHeadClusterPodManager prepares search head pod to be removed via scale down event; it returns true when ready
482472
func (mgr *searchHeadClusterPodManager) PrepareScaleDown(ctx context.Context, n int32) (bool, error) {
483-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
484473

485474
// start by quarantining the pod
486475
result, err := mgr.PrepareRecycle(ctx, n)
@@ -490,7 +479,6 @@ func (mgr *searchHeadClusterPodManager) PrepareScaleDown(ctx context.Context, n
490479

491480
// pod is quarantined; decommission it
492481
memberName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), n)
493-
//eventPublisher.Normal(ctx, "PrepareScaleDown", fmt.Sprintf("Removing member from search head cluster %s", memberName))
494482
mgr.log.Info("Removing member from search head cluster", "memberName", memberName)
495483
c := mgr.getClient(ctx, n)
496484
err = c.RemoveSearchHeadClusterMember()
@@ -504,14 +492,12 @@ func (mgr *searchHeadClusterPodManager) PrepareScaleDown(ctx context.Context, n
504492

505493
// PrepareRecycle for searchHeadClusterPodManager prepares search head pod to be recycled for updates; it returns true when ready
506494
func (mgr *searchHeadClusterPodManager) PrepareRecycle(ctx context.Context, n int32) (bool, error) {
507-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
508495

509496
memberName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), n)
510497

511498
switch mgr.cr.Status.Members[n].Status {
512499
case "Up":
513500
// Detain search head
514-
//eventPublisher.Normal(ctx, "PrepareRecycle", fmt.Sprintf("Detaining search head cluster member %s", memberName))
515501
mgr.log.Info("Detaining search head cluster member", "memberName", memberName)
516502
c := mgr.getClient(ctx, n)
517503
podExecClient := splutil.GetPodExecClient(mgr.c, mgr.cr, getApplicablePodNameForK8Probes(mgr.cr, n))
@@ -520,7 +506,6 @@ func (mgr *searchHeadClusterPodManager) PrepareRecycle(ctx context.Context, n in
520506
// During the Recycle, our reconcile loop is entered multiple times. If the Pod is already down,
521507
// there is a chance of readiness probe failing, in which case, even the podExec will not be successful.
522508
// So, just log the message, and ignore the error.
523-
//eventPublisher.Normal(ctx, "SetProbeLevel", fmt.Sprintf("Setting Probe level failed. Probably, the Pod is already down %s", err.Error()))
524509
mgr.log.Info("Setting Probe level failed. Probably, the Pod is already down", "memberName", memberName)
525510
}
526511

@@ -530,10 +515,8 @@ func (mgr *searchHeadClusterPodManager) PrepareRecycle(ctx context.Context, n in
530515
// Wait until active searches have drained
531516
searchesComplete := mgr.cr.Status.Members[n].ActiveHistoricalSearchCount+mgr.cr.Status.Members[n].ActiveRealtimeSearchCount == 0
532517
if searchesComplete {
533-
//eventPublisher.Normal(ctx, "PrepareRecycle", fmt.Sprintf("Detention complete %s", memberName))
534518
mgr.log.Info("Detention complete", "memberName", memberName)
535519
} else {
536-
//eventPublisher.Normal(ctx, "PrepareRecycle", fmt.Sprintf("Waiting for active searches to complete %s", memberName))
537520
mgr.log.Info("Waiting for active searches to complete", "memberName", memberName)
538521
}
539522
return searchesComplete, nil
@@ -549,7 +532,6 @@ func (mgr *searchHeadClusterPodManager) PrepareRecycle(ctx context.Context, n in
549532

550533
// FinishRecycle for searchHeadClusterPodManager completes recycle event for search head pod; it returns true when complete
551534
func (mgr *searchHeadClusterPodManager) FinishRecycle(ctx context.Context, n int32) (bool, error) {
552-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
553535
memberName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), n)
554536

555537
switch mgr.cr.Status.Members[n].Status {
@@ -559,7 +541,6 @@ func (mgr *searchHeadClusterPodManager) FinishRecycle(ctx context.Context, n int
559541

560542
case "ManualDetention":
561543
// release from detention
562-
//eventPublisher.Normal(ctx, "FinishRecycle", fmt.Sprintf("Releasing search head cluster member from detention %s", memberName))
563544
mgr.log.Info("Releasing search head cluster member from detention", "memberName", memberName)
564545
c := mgr.getClient(ctx, n)
565546
return false, c.SetSearchHeadDetention(false)
@@ -571,7 +552,6 @@ func (mgr *searchHeadClusterPodManager) FinishRecycle(ctx context.Context, n int
571552

572553
// getClient for searchHeadClusterPodManager returns a SplunkClient for the member n
573554
func (mgr *searchHeadClusterPodManager) getClient(ctx context.Context, n int32) *splclient.SplunkClient {
574-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
575555
reqLogger := log.FromContext(ctx)
576556
scopedLog := reqLogger.WithName("searchHeadClusterPodManager.getClient").WithValues("name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace())
577557

@@ -585,7 +565,6 @@ func (mgr *searchHeadClusterPodManager) getClient(ctx context.Context, n int32)
585565
// Retrieve admin password from Pod
586566
adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, mgr.c, memberName, mgr.cr.GetNamespace(), "password")
587567
if err != nil {
588-
//eventPublisher.Warning(ctx, "GetSpecificSecretTokenFromPod", fmt.Sprintf("Couldn't retrieve the admin password from Pod %s", memberName))
589568
scopedLog.Error(err, "Couldn't retrieve the admin password from Pod")
590569
}
591570

@@ -606,7 +585,6 @@ var GetSearchHeadCaptainInfo = func(ctx context.Context, mgr *searchHeadClusterP
606585

607586
// updateStatus for searchHeadClusterPodManager uses the REST API to update the status for a SearcHead custom resource
608587
func (mgr *searchHeadClusterPodManager) updateStatus(ctx context.Context, statefulSet *appsv1.StatefulSet) error {
609-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
610588
// populate members status using REST API to get search head cluster member info
611589
mgr.cr.Status.Captain = ""
612590
mgr.cr.Status.CaptainReady = false
@@ -616,7 +594,6 @@ func (mgr *searchHeadClusterPodManager) updateStatus(ctx context.Context, statef
616594
}
617595
gotCaptainInfo := false
618596
for n := int32(0); n < statefulSet.Status.Replicas; n++ {
619-
//c := mgr.getClient(ctx, n)
620597
memberName := GetSplunkStatefulsetPodName(SplunkSearchHead, mgr.cr.GetName(), n)
621598
memberStatus := enterpriseApi.SearchHeadClusterMemberStatus{Name: memberName}
622599
memberInfo, err := GetSearchHeadClusterMemberInfo(ctx, mgr, n)
@@ -627,7 +604,6 @@ func (mgr *searchHeadClusterPodManager) updateStatus(ctx context.Context, statef
627604
memberStatus.ActiveHistoricalSearchCount = memberInfo.ActiveHistoricalSearchCount
628605
memberStatus.ActiveRealtimeSearchCount = memberInfo.ActiveRealtimeSearchCount
629606
} else {
630-
//eventPublisher.Warning(ctx, "GetSearchHeadClusterMemberInfo", fmt.Sprintf("Unable to retrieve search head cluster member info %s", err))
631607
mgr.log.Error(err, "Unable to retrieve search head cluster member info", "memberName", memberName)
632608
}
633609

@@ -642,7 +618,6 @@ func (mgr *searchHeadClusterPodManager) updateStatus(ctx context.Context, statef
642618
mgr.cr.Status.MaintenanceMode = captainInfo.MaintenanceMode
643619
gotCaptainInfo = true
644620
} else {
645-
//eventPublisher.Warning(ctx, "GetSearchHeadCaptainInfo", fmt.Sprintf("Unable to retrieve captain info %s", err))
646621
mgr.cr.Status.CaptainReady = false
647622
mgr.log.Error(err, "Unable to retrieve captain info", "memberName", memberName)
648623
}
@@ -709,15 +684,13 @@ func validateSearchHeadClusterSpec(ctx context.Context, c splcommon.ControllerCl
709684

710685
// helper function to get the list of SearchHeadCluster types in the current namespace
711686
func getSearchHeadClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.SearchHeadClusterList, error) {
712-
//eventPublisher := ctx.Value("eventPublisher").(*K8EventPublisher)
713687
reqLogger := log.FromContext(ctx)
714688
scopedLog := reqLogger.WithName("getSearchHeadClusterList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace())
715689

716690
objectList := enterpriseApi.SearchHeadClusterList{}
717691

718692
err := c.List(context.TODO(), &objectList, listOpts...)
719693
if err != nil {
720-
//eventPublisher.Warning(ctx, "SearchHeadCluster types not found in namespace", fmt.Sprintf("Couldn't get SearchHeadCluster types %s", err))
721694
scopedLog.Error(err, "SearchHeadCluster types not found in namespace", "namsespace", cr.GetNamespace())
722695
return objectList, err
723696
}

0 commit comments

Comments
 (0)