@@ -450,26 +450,34 @@ func getEmptyDeploymentOptions() deploymentOptions {
450
450
// TestPrepareScaleDownShardedCluster tests the scale down operation for config servers and mongods per shard. It checks
451
451
// that all members that will be removed are marked as unvoted
452
452
func TestPrepareScaleDownShardedCluster_ConfigMongodsUp (t * testing.T ) {
453
- t .Skip ("This test is too fragile to be executed; it's based on status and not deployment state and test internal interactions that are no longer true. Either we rewrite it to full Reconcile or remove it." )
454
453
ctx := context .Background ()
454
+
455
+ initialState := MultiClusterShardedScalingStep {
456
+ shardCount : 2 ,
457
+ configServerDistribution : map [string ]int {
458
+ multicluster .LegacyCentralClusterName : 3 ,
459
+ },
460
+ shardDistribution : map [string ]int {
461
+ multicluster .LegacyCentralClusterName : 4 ,
462
+ },
463
+ }
464
+
455
465
scBeforeScale := test .DefaultClusterBuilder ().
456
- SetConfigServerCountStatus (3 ).
457
466
SetConfigServerCountSpec (3 ).
458
- SetMongodsPerShardCountStatus (4 ).
459
467
SetMongodsPerShardCountSpec (4 ).
460
468
Build ()
461
469
462
- omConnectionFactory := om .NewCachedOMConnectionFactoryWithInitializedConnection (om .NewMockedOmConnection (createDeploymentFromShardedCluster (t , scBeforeScale )))
463
- _ , reconcileHelper , _ , _ , _ := defaultClusterReconciler (ctx , scBeforeScale , nil )
464
-
465
- // TODO prepareScaleDownShardedCluster is getting data from deployment state so modify it instead of passing state in MongoDB object
466
470
scAfterScale := test .DefaultClusterBuilder ().
467
- SetConfigServerCountStatus (3 ).
468
471
SetConfigServerCountSpec (2 ).
469
- SetMongodsPerShardCountStatus (4 ).
470
472
SetMongodsPerShardCountSpec (3 ).
471
473
Build ()
472
474
475
+ omConnectionFactory := om .NewCachedOMConnectionFactoryWithInitializedConnection (om .NewMockedOmConnection (createDeploymentFromShardedCluster (t , scBeforeScale )))
476
+ kubeClient , _ := mock .NewDefaultFakeClient (scAfterScale )
477
+ // Store the initial scaling status in state configmap
478
+ assert .NoError (t , createMockStateConfigMap (kubeClient , mock .TestNamespace , scBeforeScale .Name , initialState ))
479
+ _ , reconcileHelper , err := newShardedClusterReconcilerFromResource (ctx , scAfterScale , nil , kubeClient , omConnectionFactory )
480
+ assert .NoError (t , err )
473
481
assert .NoError (t , reconcileHelper .prepareScaleDownShardedCluster (omConnectionFactory .GetConnection (), zap .S ()))
474
482
475
483
// create the expected deployment from the sharded cluster that has not yet scaled
@@ -493,16 +501,31 @@ func TestPrepareScaleDownShardedCluster_ConfigMongodsUp(t *testing.T) {
493
501
// TestPrepareScaleDownShardedCluster_ShardsUpMongodsDown checks the situation when shards count increases and mongods
494
502
// count per shard is decreased - scale down operation is expected to be called only for existing shards
495
503
func TestPrepareScaleDownShardedCluster_ShardsUpMongodsDown (t * testing.T ) {
496
- t .Skip ("This test is too fragile to be executed; it's based on status and not deployment state and test internal interactions that are no longer true. Either we rewrite it to full Reconcile or remove it." )
497
504
ctx := context .Background ()
505
+
506
+ initialState := MultiClusterShardedScalingStep {
507
+ shardCount : 4 ,
508
+ shardDistribution : map [string ]int {
509
+ multicluster .LegacyCentralClusterName : 4 ,
510
+ },
511
+ }
512
+
498
513
scBeforeScale := test .DefaultClusterBuilder ().
499
- SetShardCountStatus (4 ).
500
514
SetShardCountSpec (4 ).
501
- SetMongodsPerShardCountStatus (4 ).
502
515
SetMongodsPerShardCountSpec (4 ).
503
516
Build ()
504
517
505
- _ , reconcileHelper , _ , omConnectionFactory , _ := defaultClusterReconciler (ctx , scBeforeScale , nil )
518
+ scAfterScale := test .DefaultClusterBuilder ().
519
+ SetShardCountSpec (2 ).
520
+ SetMongodsPerShardCountSpec (3 ).
521
+ Build ()
522
+
523
+ omConnectionFactory := om .NewCachedOMConnectionFactoryWithInitializedConnection (om .NewMockedOmConnection (createDeploymentFromShardedCluster (t , scBeforeScale )))
524
+ kubeClient , _ := mock .NewDefaultFakeClient (scAfterScale )
525
+ assert .NoError (t , createMockStateConfigMap (kubeClient , mock .TestNamespace , scBeforeScale .Name , initialState ))
526
+ _ , reconcileHelper , err := newShardedClusterReconcilerFromResource (ctx , scAfterScale , nil , kubeClient , omConnectionFactory )
527
+ assert .NoError (t , err )
528
+
506
529
omConnectionFactory .SetPostCreateHook (func (connection om.Connection ) {
507
530
deployment := createDeploymentFromShardedCluster (t , scBeforeScale )
508
531
if _ , err := connection .UpdateDeployment (deployment ); err != nil {
@@ -512,17 +535,6 @@ func TestPrepareScaleDownShardedCluster_ShardsUpMongodsDown(t *testing.T) {
512
535
connection .(* om.MockedOmConnection ).CleanHistory ()
513
536
})
514
537
515
- // TODO prepareScaleDownShardedCluster is getting data from deployment state so modify it instead of passing state in MongoDB object
516
- scAfterScale := test .DefaultClusterBuilder ().
517
- SetShardCountStatus (4 ).
518
- SetShardCountSpec (2 ).
519
- SetMongodsPerShardCountStatus (4 ).
520
- SetMongodsPerShardCountSpec (3 ).
521
- Build ()
522
-
523
- // necessary otherwise next omConnectionFactory.GetConnection() will return nil as the connectionFactoryFunc hasn't been called yet
524
- initializeOMConnection (t , ctx , reconcileHelper , scAfterScale , zap .S (), omConnectionFactory )
525
-
526
538
assert .NoError (t , reconcileHelper .prepareScaleDownShardedCluster (omConnectionFactory .GetConnection (), zap .S ()))
527
539
528
540
// expected change of state: rs members are marked unvoted only for two shards (old state)
@@ -589,17 +601,34 @@ func initializeOMConnection(t *testing.T, ctx context.Context, reconcileHelper *
589
601
// TestUpdateOmDeploymentShardedCluster_HostsRemovedFromMonitoring verifies that if scale down operation was performed -
590
602
// hosts are removed
591
603
func TestUpdateOmDeploymentShardedCluster_HostsRemovedFromMonitoring (t * testing.T ) {
592
- t .Skip ("This test is too fragile to be executed; it's based on status and not deployment state and test internal interactions that are no longer true. Either we rewrite it to full Reconcile or remove it." )
593
604
ctx := context .Background ()
594
- // TODO use deployment state instead of status
605
+
606
+ initialState := MultiClusterShardedScalingStep {
607
+ mongosDistribution : map [string ]int {
608
+ multicluster .LegacyCentralClusterName : 2 ,
609
+ },
610
+ configServerDistribution : map [string ]int {
611
+ multicluster .LegacyCentralClusterName : 4 ,
612
+ },
613
+ }
614
+
595
615
sc := test .DefaultClusterBuilder ().
596
- SetMongosCountStatus (2 ).
597
616
SetMongosCountSpec (2 ).
598
- SetConfigServerCountStatus (4 ).
599
617
SetConfigServerCountSpec (4 ).
600
618
Build ()
601
619
602
- _ , reconcileHelper , _ , omConnectionFactory , _ := defaultClusterReconciler (ctx , sc , nil )
620
+ // we need to create a different sharded cluster that is currently in the process of scaling down
621
+ scScaledDown := test .DefaultClusterBuilder ().
622
+ SetMongosCountSpec (1 ).
623
+ SetConfigServerCountSpec (3 ).
624
+ Build ()
625
+
626
+ omConnectionFactory := om .NewCachedOMConnectionFactoryWithInitializedConnection (om .NewMockedOmConnection (createDeploymentFromShardedCluster (t , sc )))
627
+ kubeClient , _ := mock .NewDefaultFakeClient (sc )
628
+ assert .NoError (t , createMockStateConfigMap (kubeClient , mock .TestNamespace , sc .Name , initialState ))
629
+ _ , reconcileHelper , err := newShardedClusterReconcilerFromResource (ctx , scScaledDown , nil , kubeClient , omConnectionFactory )
630
+ assert .NoError (t , err )
631
+
603
632
omConnectionFactory .SetPostCreateHook (func (connection om.Connection ) {
604
633
// the initial deployment we create should have all processes
605
634
deployment := createDeploymentFromShardedCluster (t , sc )
@@ -609,20 +638,6 @@ func TestUpdateOmDeploymentShardedCluster_HostsRemovedFromMonitoring(t *testing.
609
638
connection .(* om.MockedOmConnection ).AddHosts (deployment .GetAllHostnames ())
610
639
connection .(* om.MockedOmConnection ).CleanHistory ()
611
640
})
612
- // necessary otherwise next omConnectionFactory.GetConnection() will return nil as the connectionFactoryFunc hasn't been called yet
613
- initializeOMConnection (t , ctx , reconcileHelper , sc , zap .S (), omConnectionFactory )
614
-
615
- // we need to create a different sharded cluster that is currently in the process of scaling down
616
- // TODO use deployment state instead of status
617
- scScaledDown := test .DefaultClusterBuilder ().
618
- SetMongosCountStatus (2 ).
619
- SetMongosCountSpec (1 ).
620
- SetConfigServerCountStatus (4 ).
621
- SetConfigServerCountSpec (3 ).
622
- Build ()
623
-
624
- // necessary otherwise next omConnectionFactory.GetConnection() will return nil as the connectionFactoryFunc hasn't been called yet
625
- initializeOMConnection (t , ctx , reconcileHelper , scScaledDown , zap .S (), omConnectionFactory )
626
641
627
642
// updateOmDeploymentShardedCluster checks an element from ac.Auth.DeploymentAuthMechanisms
628
643
// so we need to ensure it has a non-nil value. An empty list implies no authentication
0 commit comments