Skip to content

Commit 301462c

Browse files
authored
remove streams delete and extend unit tests (#2737)
1 parent 4929dd2 commit 301462c

File tree

2 files changed

+105
-63
lines changed

2 files changed

+105
-63
lines changed

pkg/cluster/streams.go

Lines changed: 1 addition & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -453,15 +453,6 @@ func (c *Cluster) syncStream(appId string) error {
453453
if stream.Spec.ApplicationId != appId {
454454
continue
455455
}
456-
if streamExists {
457-
c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId)
458-
if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil {
459-
c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err)
460-
} else {
461-
c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId)
462-
}
463-
continue
464-
}
465456
streamExists = true
466457
desiredStreams := c.generateFabricEventStream(appId)
467458
if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) {
@@ -484,6 +475,7 @@ func (c *Cluster) syncStream(appId string) error {
484475
c.Streams[appId] = updatedStream
485476
c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId)
486477
}
478+
break
487479
}
488480

489481
if !streamExists {

pkg/cluster/streams_test.go

Lines changed: 104 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ var (
9090
Namespace: namespace,
9191
Labels: map[string]string{
9292
"application": "spilo",
93-
"cluster-name": fmt.Sprintf("%s-2", clusterName),
93+
"cluster-name": clusterName,
9494
"team": "acid",
9595
},
9696
OwnerReferences: []metav1.OwnerReference{
@@ -494,14 +494,13 @@ func TestSyncStreams(t *testing.T) {
494494
OpConfig: config.Config{
495495
PodManagementPolicy: "ordered_ready",
496496
Resources: config.Resources{
497-
ClusterLabels: map[string]string{"application": "spilo"},
498-
ClusterNameLabel: "cluster-name",
499-
DefaultCPURequest: "300m",
500-
DefaultCPULimit: "300m",
501-
DefaultMemoryRequest: "300Mi",
502-
DefaultMemoryLimit: "300Mi",
503-
EnableOwnerReferences: util.True(),
504-
PodRoleLabel: "spilo-role",
497+
ClusterLabels: map[string]string{"application": "spilo"},
498+
ClusterNameLabel: "cluster-name",
499+
DefaultCPURequest: "300m",
500+
DefaultCPULimit: "300m",
501+
DefaultMemoryRequest: "300Mi",
502+
DefaultMemoryLimit: "300Mi",
503+
PodRoleLabel: "spilo-role",
505504
},
506505
},
507506
}, client, pg, logger, eventRecorder)
@@ -514,33 +513,17 @@ func TestSyncStreams(t *testing.T) {
514513
err = cluster.syncStream(appId)
515514
assert.NoError(t, err)
516515

517-
// create a second stream with same spec but with different name
518-
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
519-
context.TODO(), fes, metav1.CreateOptions{})
516+
// sync the stream again
517+
err = cluster.syncStream(appId)
520518
assert.NoError(t, err)
521-
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
522519

523-
// check that two streams exist
520+
// check that only one stream remains after sync
524521
listOptions := metav1.ListOptions{
525522
LabelSelector: cluster.labelsSet(true).String(),
526523
}
527524
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
528525
assert.NoError(t, err)
529-
assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items))
530-
531-
// sync the stream which should remove the redundant stream
532-
err = cluster.syncStream(appId)
533-
assert.NoError(t, err)
534-
535-
// check that only one stream remains after sync
536-
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
537-
assert.NoError(t, err)
538526
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
539-
540-
// check owner references
541-
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
542-
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
543-
}
544527
}
545528

546529
func TestSameStreams(t *testing.T) {
@@ -663,13 +646,14 @@ func TestUpdateStreams(t *testing.T) {
663646
OpConfig: config.Config{
664647
PodManagementPolicy: "ordered_ready",
665648
Resources: config.Resources{
666-
ClusterLabels: map[string]string{"application": "spilo"},
667-
ClusterNameLabel: "cluster-name",
668-
DefaultCPURequest: "300m",
669-
DefaultCPULimit: "300m",
670-
DefaultMemoryRequest: "300Mi",
671-
DefaultMemoryLimit: "300Mi",
672-
PodRoleLabel: "spilo-role",
649+
ClusterLabels: map[string]string{"application": "spilo"},
650+
ClusterNameLabel: "cluster-name",
651+
DefaultCPURequest: "300m",
652+
DefaultCPULimit: "300m",
653+
DefaultMemoryRequest: "300Mi",
654+
DefaultMemoryLimit: "300Mi",
655+
EnableOwnerReferences: util.True(),
656+
PodRoleLabel: "spilo-role",
673657
},
674658
},
675659
}, client, pg, logger, eventRecorder)
@@ -678,10 +662,31 @@ func TestUpdateStreams(t *testing.T) {
678662
context.TODO(), &pg, metav1.CreateOptions{})
679663
assert.NoError(t, err)
680664

681-
// create the stream
665+
// create stream with different owner reference
666+
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
667+
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
668+
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
669+
context.TODO(), fes, metav1.CreateOptions{})
670+
assert.NoError(t, err)
671+
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
672+
673+
// sync the stream which should update the owner reference
682674
err = cluster.syncStream(appId)
683675
assert.NoError(t, err)
684676

677+
// check that only one stream exists after sync
678+
listOptions := metav1.ListOptions{
679+
LabelSelector: cluster.labelsSet(true).String(),
680+
}
681+
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
682+
assert.NoError(t, err)
683+
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
684+
685+
// compare owner references
686+
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
687+
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
688+
}
689+
685690
// change specs of streams and patch CRD
686691
for i, stream := range pg.Spec.Streams {
687692
if stream.ApplicationId == appId {
@@ -694,10 +699,7 @@ func TestUpdateStreams(t *testing.T) {
694699
}
695700

696701
// compare stream returned from API with expected stream
697-
listOptions := metav1.ListOptions{
698-
LabelSelector: cluster.labelsSet(true).String(),
699-
}
700-
streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
702+
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
701703
result := cluster.generateFabricEventStream(appId)
702704
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
703705
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
@@ -716,9 +718,51 @@ func TestUpdateStreams(t *testing.T) {
716718
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
717719
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
718720
}
721+
}
719722

720-
mockClient := k8sutil.NewMockKubernetesClient()
721-
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
723+
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
724+
patchData, err := specPatch(pgSpec)
725+
assert.NoError(t, err)
726+
727+
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
728+
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
729+
assert.NoError(t, err)
730+
731+
cluster.Postgresql.Spec = pgPatched.Spec
732+
err = cluster.syncStream(appId)
733+
assert.NoError(t, err)
734+
735+
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
736+
assert.NoError(t, err)
737+
738+
return streams
739+
}
740+
741+
func TestDeleteStreams(t *testing.T) {
742+
pg.Name = fmt.Sprintf("%s-4", pg.Name)
743+
var cluster = New(
744+
Config{
745+
OpConfig: config.Config{
746+
PodManagementPolicy: "ordered_ready",
747+
Resources: config.Resources{
748+
ClusterLabels: map[string]string{"application": "spilo"},
749+
ClusterNameLabel: "cluster-name",
750+
DefaultCPURequest: "300m",
751+
DefaultCPULimit: "300m",
752+
DefaultMemoryRequest: "300Mi",
753+
DefaultMemoryLimit: "300Mi",
754+
PodRoleLabel: "spilo-role",
755+
},
756+
},
757+
}, client, pg, logger, eventRecorder)
758+
759+
_, err := cluster.KubeClient.Postgresqls(namespace).Create(
760+
context.TODO(), &pg, metav1.CreateOptions{})
761+
assert.NoError(t, err)
762+
763+
// create the stream
764+
err = cluster.syncStream(appId)
765+
assert.NoError(t, err)
722766

723767
// remove streams from manifest
724768
pg.Spec.Streams = nil
@@ -729,26 +773,32 @@ func TestUpdateStreams(t *testing.T) {
729773
appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams)
730774
cluster.cleanupRemovedStreams(appIds)
731775

732-
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
733-
if len(streams.Items) > 0 || err != nil {
734-
t.Errorf("stream resource has not been removed or unexpected error %v", err)
776+
// check that streams have been deleted
777+
listOptions := metav1.ListOptions{
778+
LabelSelector: cluster.labelsSet(true).String(),
735779
}
736-
}
737-
738-
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
739-
patchData, err := specPatch(pgSpec)
780+
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
740781
assert.NoError(t, err)
782+
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
741783

742-
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
743-
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
784+
// create stream to test deleteStreams code
785+
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
786+
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
787+
_, err = cluster.KubeClient.FabricEventStreams(namespace).Create(
788+
context.TODO(), fes, metav1.CreateOptions{})
744789
assert.NoError(t, err)
745790

746-
cluster.Postgresql.Spec = pgPatched.Spec
791+
// sync it once to cluster struct
747792
err = cluster.syncStream(appId)
748793
assert.NoError(t, err)
749794

795+
// we need a mock client because deleteStreams checks for CRD existance
796+
mockClient := k8sutil.NewMockKubernetesClient()
797+
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
798+
cluster.deleteStreams()
799+
800+
// check that streams have been deleted
750801
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
751802
assert.NoError(t, err)
752-
753-
return streams
803+
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
754804
}

0 commit comments

Comments
 (0)