diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index adb4462b8..f2efdf157 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -6,6 +6,7 @@ ## Other Changes * Optional permissions for `PersistentVolumeClaim` moved to a separate role. When managing the operator with Helm it is possible to disable permissions for `PersistentVolumeClaim` resources by setting `operator.enablePVCResize` value to `false` (`true` by default). When enabled, previously these permissions were part of the primary operator role. With this change, permissions have a separate role. * `subresourceEnabled` Helm value was removed. This setting used to be `true` by default and made it possible to exclude subresource permissions from the operator role by specifying `false` as the value. We are removing this configuration option, making the operator roles always have subresource permissions. This setting was introduced as a temporary solution for [this](https://bugzilla.redhat.com/show_bug.cgi?id=1803171) OpenShift issue. The issue has since been resolved and the setting is no longer needed. +* The `MDB_ASSUME_ENTERPRISE_IMAGE` environment variable has been removed. This undocumented environment variable, when set to `true`, forced the `-ent` suffix for the database image version in static architecture. If you are mirroring images and were using this variable, ensure that you do not rename the server image. The name must contain `mongodb-enterprise-server`; otherwise, the operator will not function correctly. diff --git a/changelog/20250722_other_MDB_ASSUME_ENTERPRISE_IMAGE_env_var_removed.md b/changelog/20250722_other_MDB_ASSUME_ENTERPRISE_IMAGE_env_var_removed.md new file mode 100644 index 000000000..93f8abec3 --- /dev/null +++ b/changelog/20250722_other_MDB_ASSUME_ENTERPRISE_IMAGE_env_var_removed.md @@ -0,0 +1,7 @@ +--- +title: MDB_ASSUME_ENTERPRISE_IMAGE environment variable has been removed +kind: other +date: 2025-07-22 +--- + +* The `MDB_ASSUME_ENTERPRISE_IMAGE` environment variable has been removed. This undocumented environment variable, when set to `true`, forced the `-ent` suffix for the database image version in static architecture. If you are mirroring images and were using this variable, ensure that you do not rename the server image. The name must contain `mongodb-enterprise-server`; otherwise, the operator will not function correctly. diff --git a/controllers/om/deployment/om_deployment_test.go b/controllers/om/deployment/om_deployment_test.go index 9826188aa..801cd1f9c 100644 --- a/controllers/om/deployment/om_deployment_test.go +++ b/controllers/om/deployment/om_deployment_test.go @@ -24,14 +24,14 @@ func init() { func TestPrepareScaleDown_OpsManagerRemovedMember(t *testing.T) { // This is deployment with 2 members (emulating that OpsManager removed the 3rd one) rs := mdbv1.NewReplicaSetBuilder().SetName("bam").SetMembers(2).Build() - oldDeployment := CreateFromReplicaSet("fake-mongoDBImage", false, rs) + oldDeployment := CreateFromReplicaSet("fake-mongoDBImage", rs) mockedOmConnection := om.NewMockedOmConnection(oldDeployment) // We try to prepare two members for scale down, but one of them will fail (bam-2) rsWithThreeMembers := map[string][]string{"bam": {"bam-1", "bam-2"}} assert.NoError(t, replicaset.PrepareScaleDownFromMap(mockedOmConnection, rsWithThreeMembers, rsWithThreeMembers["bam"], zap.S())) - expectedDeployment := CreateFromReplicaSet("fake-mongoDBImage", false, rs) + expectedDeployment := CreateFromReplicaSet("fake-mongoDBImage", rs) assert.NoError(t, expectedDeployment.MarkRsMembersUnvoted("bam", []string{"bam-1"})) diff --git a/controllers/om/deployment/testing_utils.go b/controllers/om/deployment/testing_utils.go index 922d25024..e5ff028d9 100644 --- a/controllers/om/deployment/testing_utils.go +++ b/controllers/om/deployment/testing_utils.go @@ -18,7 +18,7 @@ import ( // different packages. And test files are only compiled // when testing that specific package // https://github.com/golang/go/issues/10184#issuecomment-84465873 -func CreateFromReplicaSet(mongoDBImage string, forceEnterprise bool, rs *mdb.MongoDB) om.Deployment { +func CreateFromReplicaSet(mongoDBImage string, rs *mdb.MongoDB) om.Deployment { sts := construct.DatabaseStatefulSet(*rs, construct.ReplicaSetOptions( func(options *construct.DatabaseStatefulSetOptions) { options.PodVars = &env.PodEnvVars{ProjectID: "abcd"} @@ -32,7 +32,7 @@ func CreateFromReplicaSet(mongoDBImage string, forceEnterprise bool, rs *mdb.Mon } d.MergeReplicaSet( - replicaset.BuildFromStatefulSet(mongoDBImage, forceEnterprise, sts, rs.GetSpec(), rs.Status.FeatureCompatibilityVersion), + replicaset.BuildFromStatefulSet(mongoDBImage, sts, rs.GetSpec(), rs.Status.FeatureCompatibilityVersion), rs.Spec.AdditionalMongodConfig.ToMap(), lastConfig.ToMap(), zap.S(), diff --git a/controllers/om/deployment_test.go b/controllers/om/deployment_test.go index 8f0808dae..068205c75 100644 --- a/controllers/om/deployment_test.go +++ b/controllers/om/deployment_test.go @@ -62,7 +62,7 @@ func TestMergeReplicaSet(t *testing.T) { // Now the deployment "gets updated" from external - new node is added and one is removed - this should be fixed // by merge - newProcess := NewMongodProcess("foo", "bar", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, &mdbv1.NewStandaloneBuilder().Build().Spec, "", nil, "") + newProcess := NewMongodProcess("foo", "bar", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, &mdbv1.NewStandaloneBuilder().Build().Spec, "", nil, "") d.getProcesses()[0]["processType"] = ProcessTypeMongos // this will be overriden d.getProcesses()[1].EnsureNetConfig()["MaxIncomingConnections"] = 20 // this will be left as-is @@ -741,7 +741,7 @@ func buildRsByProcesses(rsName string, processes []Process) ReplicaSetWithProces } func createStandalone() Process { - return NewMongodProcess("merchantsStandalone", "mongo1.some.host", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3"), "", nil, "") + return NewMongodProcess("merchantsStandalone", "mongo1.some.host", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3"), "", nil, "") } func createMongosProcesses(num int, name, clusterName string) []Process { @@ -749,7 +749,7 @@ func createMongosProcesses(num int, name, clusterName string) []Process { for i := 0; i < num; i++ { idx := strconv.Itoa(i) - mongosProcesses[i] = NewMongosProcess(name+idx, "mongoS"+idx+".some.host", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3"), "", nil, "") + mongosProcesses[i] = NewMongosProcess(name+idx, "mongoS"+idx+".some.host", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3"), "", nil, "") if clusterName != "" { mongosProcesses[i].setCluster(clusterName) } @@ -765,7 +765,7 @@ func createReplicaSetProcessesCount(count int, rsName string) []Process { rsMembers := make([]Process, count) for i := 0; i < count; i++ { - rsMembers[i] = NewMongodProcess(fmt.Sprintf("%s-%d", rsName, i), fmt.Sprintf("%s-%d.some.host", rsName, i), "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3"), "", nil, "") + rsMembers[i] = NewMongodProcess(fmt.Sprintf("%s-%d", rsName, i), fmt.Sprintf("%s-%d.some.host", rsName, i), "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3"), "", nil, "") // Note that we don't specify the replicaset config for process } return rsMembers @@ -775,7 +775,7 @@ func createReplicaSetProcessesCountEnt(count int, rsName string) []Process { rsMembers := make([]Process, count) for i := 0; i < count; i++ { - rsMembers[i] = NewMongodProcess(fmt.Sprintf("%s-%d", rsName, i), fmt.Sprintf("%s-%d.some.host", rsName, i), "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3-ent"), "", nil, "") + rsMembers[i] = NewMongodProcess(fmt.Sprintf("%s-%d", rsName, i), fmt.Sprintf("%s-%d.some.host", rsName, i), "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, defaultMongoDBVersioned("3.6.3-ent"), "", nil, "") // Note that we don't specify the replicaset config for process } return rsMembers diff --git a/controllers/om/depshardedcluster_test.go b/controllers/om/depshardedcluster_test.go index e9f025dd0..5b0fded94 100644 --- a/controllers/om/depshardedcluster_test.go +++ b/controllers/om/depshardedcluster_test.go @@ -109,7 +109,7 @@ func TestMergeShardedCluster_ReplicaSetsModified(t *testing.T) { // These OM changes must be overriden (*d.getReplicaSetByName("cluster-0"))["protocolVersion"] = util.Int32Ref(2) (*d.getReplicaSetByName("configSrv")).addMember( - NewMongodProcess("foo", "bar", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, mdbv1.NewStandaloneBuilder().Build().GetSpec(), "", nil, ""), "", automationconfig.MemberOptions{}, + NewMongodProcess("foo", "bar", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, mdbv1.NewStandaloneBuilder().Build().GetSpec(), "", nil, ""), "", automationconfig.MemberOptions{}, ) (*d.getReplicaSetByName("cluster-2")).setMembers(d.getReplicaSetByName("cluster-2").Members()[0:2]) diff --git a/controllers/om/process.go b/controllers/om/process.go index cdc5cf1a5..751e9c339 100644 --- a/controllers/om/process.go +++ b/controllers/om/process.go @@ -93,13 +93,13 @@ func NewProcessFromInterface(i interface{}) Process { return i.(map[string]interface{}) } -func NewMongosProcess(name, hostName, mongoDBImage string, forceEnterprise bool, additionalMongodConfig *mdbv1.AdditionalMongodConfig, spec mdbv1.DbSpec, certificateFilePath string, annotations map[string]string, fcv string) Process { +func NewMongosProcess(name, hostName, mongoDBImage string, additionalMongodConfig *mdbv1.AdditionalMongodConfig, spec mdbv1.DbSpec, certificateFilePath string, annotations map[string]string, fcv string) Process { if additionalMongodConfig == nil { additionalMongodConfig = mdbv1.NewEmptyAdditionalMongodConfig() } architecture := architectures.GetArchitecture(annotations) - processVersion := architectures.GetMongoVersionForAutomationConfig(mongoDBImage, spec.GetMongoDBVersion(), forceEnterprise, architecture) + processVersion := architectures.GetMongoVersionForAutomationConfig(mongoDBImage, spec.GetMongoDBVersion(), architecture) p := createProcess( WithName(name), WithHostname(hostName), @@ -119,13 +119,13 @@ func NewMongosProcess(name, hostName, mongoDBImage string, forceEnterprise bool, return p } -func NewMongodProcess(name, hostName, mongoDBImage string, forceEnterprise bool, additionalConfig *mdbv1.AdditionalMongodConfig, spec mdbv1.DbSpec, certificateFilePath string, annotations map[string]string, fcv string) Process { +func NewMongodProcess(name, hostName, mongoDBImage string, additionalConfig *mdbv1.AdditionalMongodConfig, spec mdbv1.DbSpec, certificateFilePath string, annotations map[string]string, fcv string) Process { if additionalConfig == nil { additionalConfig = mdbv1.NewEmptyAdditionalMongodConfig() } architecture := architectures.GetArchitecture(annotations) - processVersion := architectures.GetMongoVersionForAutomationConfig(mongoDBImage, spec.GetMongoDBVersion(), forceEnterprise, architecture) + processVersion := architectures.GetMongoVersionForAutomationConfig(mongoDBImage, spec.GetMongoDBVersion(), architecture) p := createProcess( WithName(name), WithHostname(hostName), diff --git a/controllers/om/process/om_process.go b/controllers/om/process/om_process.go index da02f70cf..6f0b20c07 100644 --- a/controllers/om/process/om_process.go +++ b/controllers/om/process/om_process.go @@ -13,7 +13,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/util" ) -func CreateMongodProcessesWithLimit(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, dbSpec mdbv1.DbSpec, limit int, fcv string) []om.Process { +func CreateMongodProcessesWithLimit(mongoDBImage string, set appsv1.StatefulSet, dbSpec mdbv1.DbSpec, limit int, fcv string) []om.Process { hostnames, names := dns.GetDnsForStatefulSetReplicasSpecified(set, dbSpec.GetClusterDomain(), limit, dbSpec.GetExternalDomain()) processes := make([]om.Process, len(hostnames)) @@ -23,14 +23,14 @@ func CreateMongodProcessesWithLimit(mongoDBImage string, forceEnterprise bool, s } for idx, hostname := range hostnames { - processes[idx] = om.NewMongodProcess(names[idx], hostname, mongoDBImage, forceEnterprise, dbSpec.GetAdditionalMongodConfig(), dbSpec, certificateFileName, set.Annotations, fcv) + processes[idx] = om.NewMongodProcess(names[idx], hostname, mongoDBImage, dbSpec.GetAdditionalMongodConfig(), dbSpec, certificateFileName, set.Annotations, fcv) } return processes } // CreateMongodProcessesWithLimitMulti creates the process array for automationConfig based on MultiCluster CR spec -func CreateMongodProcessesWithLimitMulti(mongoDBImage string, forceEnterprise bool, mrs mdbmultiv1.MongoDBMultiCluster, certFileName string) ([]om.Process, error) { +func CreateMongodProcessesWithLimitMulti(mongoDBImage string, mrs mdbmultiv1.MongoDBMultiCluster, certFileName string) ([]om.Process, error) { hostnames := make([]string, 0) clusterNums := make([]int, 0) podNum := make([]int, 0) @@ -50,7 +50,7 @@ func CreateMongodProcessesWithLimitMulti(mongoDBImage string, forceEnterprise bo processes := make([]om.Process, len(hostnames)) for idx := range hostnames { - processes[idx] = om.NewMongodProcess(fmt.Sprintf("%s-%d-%d", mrs.Name, clusterNums[idx], podNum[idx]), hostnames[idx], mongoDBImage, forceEnterprise, mrs.Spec.GetAdditionalMongodConfig(), &mrs.Spec, certFileName, mrs.Annotations, mrs.CalculateFeatureCompatibilityVersion()) + processes[idx] = om.NewMongodProcess(fmt.Sprintf("%s-%d-%d", mrs.Name, clusterNums[idx], podNum[idx]), hostnames[idx], mongoDBImage, mrs.Spec.GetAdditionalMongodConfig(), &mrs.Spec, certFileName, mrs.Annotations, mrs.CalculateFeatureCompatibilityVersion()) } return processes, nil diff --git a/controllers/om/process_test.go b/controllers/om/process_test.go index ba3df1977..087f108fd 100644 --- a/controllers/om/process_test.go +++ b/controllers/om/process_test.go @@ -17,7 +17,7 @@ func TestCreateMongodProcess(t *testing.T) { mongoDBImage := "mongodb/mongodb-enterprise-server" t.Run("Create AgentLoggingMongodConfig", func(t *testing.T) { spec := defaultMongoDBVersioned("4.0.5") - process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, false, spec.GetAdditionalMongodConfig(), spec, "", nil, "4.0") + process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, spec.GetAdditionalMongodConfig(), spec, "", nil, "4.0") assert.Equal(t, "trinity", process.Name()) assert.Equal(t, "trinity-0.trinity-svc.svc.cluster.local", process.HostName()) @@ -40,7 +40,7 @@ func TestCreateMongodProcess(t *testing.T) { AddOption("storage.dbPath", "/some/other/data") // this will be overridden rs := mdbv1.NewReplicaSetBuilder().SetAdditionalConfig(config).Build() - process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, false, rs.Spec.AdditionalMongodConfig, rs.GetSpec(), "", nil, "") + process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, rs.Spec.AdditionalMongodConfig, rs.GetSpec(), "", nil, "") assert.Equal(t, "inMemory", maputil.ReadMapValueAsInterface(process.Args(), "storage", "engine")) assert.Equal(t, 500, maputil.ReadMapValueAsInterface(process.Args(), "setParameter", "connPoolMaxConnsPerHost")) @@ -53,7 +53,7 @@ func TestCreateMongodProcessStatic(t *testing.T) { mongoDBImage := "mongodb/mongodb-enterprise-server" t.Run("Create AgentLoggingMongodConfig", func(t *testing.T) { spec := defaultMongoDBVersioned("4.0.5") - process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, false, spec.GetAdditionalMongodConfig(), spec, "", map[string]string{}, "4.0") + process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, spec.GetAdditionalMongodConfig(), spec, "", map[string]string{}, "4.0") assert.Equal(t, "trinity", process.Name()) assert.Equal(t, "trinity-0.trinity-svc.svc.cluster.local", process.HostName()) @@ -75,7 +75,7 @@ func TestCreateMongodProcessStatic(t *testing.T) { AddOption("storage.dbPath", "/some/other/data") // this will be overridden rs := mdbv1.NewReplicaSetBuilder().SetAdditionalConfig(config).Build() - process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, false, rs.Spec.AdditionalMongodConfig, rs.GetSpec(), "", nil, "") + process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", mongoDBImage, rs.Spec.AdditionalMongodConfig, rs.GetSpec(), "", nil, "") assert.Equal(t, "inMemory", maputil.ReadMapValueAsInterface(process.Args(), "storage", "engine")) assert.Equal(t, 500, maputil.ReadMapValueAsInterface(process.Args(), "setParameter", "connPoolMaxConnsPerHost")) @@ -151,7 +151,7 @@ func TestConfigureX509_Process(t *testing.T) { }, }, } - process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, mdb.GetSpec(), "", nil, "") + process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, mdb.GetSpec(), "", nil, "") process.ConfigureClusterAuthMode("", "") // should not update fields assert.NotContains(t, process.security(), "clusterAuthMode") @@ -166,13 +166,13 @@ func TestCreateMongodProcess_SSL(t *testing.T) { additionalConfig := mdbv1.NewAdditionalMongodConfig("net.ssl.mode", string(tls.Prefer)) mdb := mdbv1.NewStandaloneBuilder().SetVersion("3.6.4").SetFCVersion("3.6").SetAdditionalConfig(additionalConfig).Build() - process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, additionalConfig, mdb.GetSpec(), "", nil, "") + process := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", additionalConfig, mdb.GetSpec(), "", nil, "") assert.Equal(t, map[string]interface{}{"mode": string(tls.Disabled)}, process.TLSConfig()) mdb = mdbv1.NewStandaloneBuilder().SetVersion("3.6.4").SetFCVersion("3.6").SetAdditionalConfig(additionalConfig). SetSecurityTLSEnabled().Build() - process = NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, additionalConfig, mdb.GetSpec(), "", nil, "") + process = NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", additionalConfig, mdb.GetSpec(), "", nil, "") assert.Equal(t, map[string]interface{}{ "mode": string(tls.Prefer), @@ -184,7 +184,7 @@ func TestCreateMongosProcess_SSL(t *testing.T) { additionalConfig := mdbv1.NewAdditionalMongodConfig("net.ssl.mode", string(tls.Allow)) mdb := mdbv1.NewStandaloneBuilder().SetVersion("3.6.4").SetFCVersion("3.6").SetAdditionalConfig(additionalConfig). SetSecurityTLSEnabled().Build() - process := NewMongosProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, additionalConfig, mdb.GetSpec(), "", nil, "") + process := NewMongosProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", additionalConfig, mdb.GetSpec(), "", nil, "") assert.Equal(t, map[string]interface{}{"mode": string(tls.Allow), "certificateKeyFile": "/mongodb-automation/server.pem"}, process.TLSConfig()) } @@ -207,14 +207,14 @@ func TestCreateMongodMongosProcess_TLSModeForDifferentSpecs(t *testing.T) { additionalConfig := mdbv1.NewAdditionalMongodConfig("net.tls.mode", string(tls.Allow)) // standalone spec - assertTLSConfig(NewMongodProcess(name, host, "fake-mongoDBImage", false, additionalConfig, getSpec(mdbv1.NewStandaloneBuilder()), "", nil, "")) + assertTLSConfig(NewMongodProcess(name, host, "fake-mongoDBImage", additionalConfig, getSpec(mdbv1.NewStandaloneBuilder()), "", nil, "")) // replica set spec - assertTLSConfig(NewMongodProcess(name, host, "fake-mongoDBImage", false, additionalConfig, getSpec(mdbv1.NewReplicaSetBuilder()), "", nil, "")) + assertTLSConfig(NewMongodProcess(name, host, "fake-mongoDBImage", additionalConfig, getSpec(mdbv1.NewReplicaSetBuilder()), "", nil, "")) // sharded cluster spec - assertTLSConfig(NewMongosProcess(name, host, "fake-mongoDBImage", false, additionalConfig, getSpec(mdbv1.NewClusterBuilder()), "", nil, "")) - assertTLSConfig(NewMongodProcess(name, host, "fake-mongoDBImage", false, additionalConfig, getSpec(mdbv1.NewClusterBuilder()), "", nil, "")) + assertTLSConfig(NewMongosProcess(name, host, "fake-mongoDBImage", additionalConfig, getSpec(mdbv1.NewClusterBuilder()), "", nil, "")) + assertTLSConfig(NewMongodProcess(name, host, "fake-mongoDBImage", additionalConfig, getSpec(mdbv1.NewClusterBuilder()), "", nil, "")) } // TestMergeMongodProcess_SSL verifies that merging for the process SSL settings keeps the Operator "owned" properties @@ -227,8 +227,8 @@ func TestMergeMongodProcess_SSL(t *testing.T) { omMdb := mdbv1.NewStandaloneBuilder().SetVersion("3.6.4").SetFCVersion("3.6"). SetAdditionalConfig(mdbv1.NewEmptyAdditionalMongodConfig()).Build() - operatorProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, operatorMdb.GetSpec(), "", nil, "") - omProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, omMdb.GetSpec(), "", nil, "") + operatorProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, operatorMdb.GetSpec(), "", nil, "") + omProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, omMdb.GetSpec(), "", nil, "") omProcess.EnsureTLSConfig()["mode"] = "allowTLS" // this will be overridden omProcess.EnsureTLSConfig()["PEMKeyFile"] = "/var/mongodb/server.pem" // this will be overridden omProcess.EnsureTLSConfig()["sslOnNormalPorts"] = "true" // this will be left as-is @@ -248,11 +248,11 @@ func TestMergeMongodProcess_SSL(t *testing.T) { func TestMergeMongodProcess_MongodbOptions(t *testing.T) { omMdb := mdbv1.NewStandaloneBuilder().SetAdditionalConfig( mdbv1.NewAdditionalMongodConfig("storage.wiredTiger.engineConfig.cacheSizeGB", 3)).Build() - omProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, omMdb.Spec.AdditionalMongodConfig, omMdb.GetSpec(), "", nil, "") + omProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", omMdb.Spec.AdditionalMongodConfig, omMdb.GetSpec(), "", nil, "") operatorMdb := mdbv1.NewStandaloneBuilder().SetAdditionalConfig( mdbv1.NewAdditionalMongodConfig("storage.wiredTiger.engineConfig.directoryForIndexes", "/some/dir")).Build() - operatorProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, operatorMdb.Spec.AdditionalMongodConfig, operatorMdb.GetSpec(), "", nil, "") + operatorProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", operatorMdb.Spec.AdditionalMongodConfig, operatorMdb.GetSpec(), "", nil, "") omProcess.mergeFrom(operatorProcess, nil, nil) @@ -288,7 +288,7 @@ func TestMergeMongodProcess_AdditionalMongodConfig_CanBeRemoved(t *testing.T) { prevAdditionalConfig.AddOption("some.other.option2", "value2") omMdb := mdbv1.NewStandaloneBuilder().SetAdditionalConfig(prevAdditionalConfig).Build() - omProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, omMdb.Spec.AdditionalMongodConfig, omMdb.GetSpec(), "", nil, "") + omProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", omMdb.Spec.AdditionalMongodConfig, omMdb.GetSpec(), "", nil, "") specAdditionalConfig := mdbv1.NewEmptyAdditionalMongodConfig() // we are changing the cacheSize to 4 @@ -297,7 +297,7 @@ func TestMergeMongodProcess_AdditionalMongodConfig_CanBeRemoved(t *testing.T) { specAdditionalConfig.AddOption("some.other.option", "value") operatorMdb := mdbv1.NewStandaloneBuilder().SetAdditionalConfig(specAdditionalConfig).Build() - operatorProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", false, operatorMdb.Spec.AdditionalMongodConfig, operatorMdb.GetSpec(), "", nil, "") + operatorProcess := NewMongodProcess("trinity", "trinity-0.trinity-svc.svc.cluster.local", "fake-mongoDBImage", operatorMdb.Spec.AdditionalMongodConfig, operatorMdb.GetSpec(), "", nil, "") omProcess.mergeFrom(operatorProcess, specAdditionalConfig.ToMap(), prevAdditionalConfig.ToMap()) diff --git a/controllers/om/replicaset/om_replicaset.go b/controllers/om/replicaset/om_replicaset.go index 09ce05162..5cc166d1c 100644 --- a/controllers/om/replicaset/om_replicaset.go +++ b/controllers/om/replicaset/om_replicaset.go @@ -15,15 +15,15 @@ import ( // BuildFromStatefulSet returns a replica set that can be set in the Automation Config // based on the given StatefulSet and MongoDB resource. -func BuildFromStatefulSet(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, dbSpec mdbv1.DbSpec, fcv string) om.ReplicaSetWithProcesses { - return BuildFromStatefulSetWithReplicas(mongoDBImage, forceEnterprise, set, dbSpec, int(*set.Spec.Replicas), fcv) +func BuildFromStatefulSet(mongoDBImage string, set appsv1.StatefulSet, dbSpec mdbv1.DbSpec, fcv string) om.ReplicaSetWithProcesses { + return BuildFromStatefulSetWithReplicas(mongoDBImage, set, dbSpec, int(*set.Spec.Replicas), fcv) } // BuildFromStatefulSetWithReplicas returns a replica set that can be set in the Automation Config // based on the given StatefulSet and MongoDB spec. The amount of members is set by the replicas // parameter. -func BuildFromStatefulSetWithReplicas(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, dbSpec mdbv1.DbSpec, replicas int, fcv string) om.ReplicaSetWithProcesses { - members := process.CreateMongodProcessesWithLimit(mongoDBImage, forceEnterprise, set, dbSpec, replicas, fcv) +func BuildFromStatefulSetWithReplicas(mongoDBImage string, set appsv1.StatefulSet, dbSpec mdbv1.DbSpec, replicas int, fcv string) om.ReplicaSetWithProcesses { + members := process.CreateMongodProcessesWithLimit(mongoDBImage, set, dbSpec, replicas, fcv) replicaSet := om.NewReplicaSet(set.Name, dbSpec.GetMongoDBVersion()) rsWithProcesses := om.NewReplicaSetWithProcesses(replicaSet, members, dbSpec.GetMemberOptions()) rsWithProcesses.SetHorizons(dbSpec.GetHorizonConfig()) diff --git a/controllers/om/replicaset_test.go b/controllers/om/replicaset_test.go index 738ef023d..ca08ad537 100644 --- a/controllers/om/replicaset_test.go +++ b/controllers/om/replicaset_test.go @@ -17,7 +17,7 @@ func makeMinimalRsWithProcesses() ReplicaSetWithProcesses { processes := make([]Process, 3) memberOptions := make([]automationconfig.MemberOptions, 3) for i := range processes { - proc := NewMongodProcess("my-test-repl-"+strconv.Itoa(i), "my-test-repl-"+strconv.Itoa(i), "fake-mongoDBImage", false, &mdbv1.AdditionalMongodConfig{}, &mdb.Spec, "", nil, "") + proc := NewMongodProcess("my-test-repl-"+strconv.Itoa(i), "my-test-repl-"+strconv.Itoa(i), "fake-mongoDBImage", &mdbv1.AdditionalMongodConfig{}, &mdb.Spec, "", nil, "") processes[i] = proc replicaSetWithProcesses.addMember(proc, "", memberOptions[i]) } diff --git a/controllers/operator/authentication_test.go b/controllers/operator/authentication_test.go index 6b47167de..3cd5cbad3 100644 --- a/controllers/operator/authentication_test.go +++ b/controllers/operator/authentication_test.go @@ -47,7 +47,7 @@ func TestX509CanBeEnabled_WhenThereAreOnlyTlsDeployments_ReplicaSet(t *testing.T addKubernetesTlsResources(ctx, kubeClient, rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) } @@ -57,7 +57,7 @@ func TestX509ClusterAuthentication_CanBeEnabled_IfX509AuthenticationIsEnabled_Re kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) addKubernetesTlsResources(ctx, kubeClient, rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) } @@ -90,7 +90,7 @@ func TestUpdateOmAuthentication_NoAuthenticationEnabled(t *testing.T) { processNames := []string{"my-rs-0", "my-rs-1", "my-rs-2"} kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) r.updateOmAuthentication(ctx, conn, processNames, rs, "", "", "", false, zap.S()) ac, _ := conn.ReadAutomationConfig() @@ -103,7 +103,7 @@ func TestUpdateOmAuthentication_EnableX509_TlsNotEnabled(t *testing.T) { ctx := context.Background() rs := DefaultReplicaSetBuilder().SetName("my-rs").SetMembers(3).Build() // deployment with existing non-tls non-x509 replica set - conn := om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs)) + conn := om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", rs)) // configure X509 authentication & tls rs.Spec.Security.Authentication.Modes = []mdbv1.AuthMode{"X509"} @@ -111,7 +111,7 @@ func TestUpdateOmAuthentication_EnableX509_TlsNotEnabled(t *testing.T) { rs.Spec.Security.TLSConfig.Enabled = true kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, conn, []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) assert.True(t, status.IsOK(), "configuring both options at once should not result in a failed status") @@ -121,9 +121,9 @@ func TestUpdateOmAuthentication_EnableX509_TlsNotEnabled(t *testing.T) { func TestUpdateOmAuthentication_EnableX509_WithTlsAlreadyEnabled(t *testing.T) { ctx := context.Background() rs := DefaultReplicaSetBuilder().SetName("my-rs").SetMembers(3).EnableTLS().Build() - omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs))) + omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", rs))) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) assert.True(t, status.IsOK(), "configuring x509 when tls has already been enabled should not result in a failed status") @@ -136,9 +136,9 @@ func TestUpdateOmAuthentication_AuthenticationIsNotConfigured_IfAuthIsNotSet(t * rs.Spec.Security.Authentication = nil - omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs))) + omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(deployment.CreateFromReplicaSet("fake-mongoDBImage", rs))) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) status, _ := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) assert.True(t, status.IsOK(), "no authentication should have been configured") @@ -161,7 +161,7 @@ func TestUpdateOmAuthentication_DoesNotDisableAuth_IfAuthIsNotSet(t *testing.T) Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) @@ -174,7 +174,7 @@ func TestUpdateOmAuthentication_DoesNotDisableAuth_IfAuthIsNotSet(t *testing.T) rs.Spec.Security.Authentication = nil - reconciler = newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler = newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -196,7 +196,7 @@ func TestCanConfigureAuthenticationDisabled_WithNoModes(t *testing.T) { Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) @@ -208,7 +208,7 @@ func TestUpdateOmAuthentication_EnableX509_FromEmptyDeployment(t *testing.T) { rs := DefaultReplicaSetBuilder().SetName("my-rs").SetMembers(3).EnableTLS().EnableAuth().EnableX509().Build() omConnectionFactory := om.NewCachedOMConnectionFactoryWithInitializedConnection(om.NewMockedOmConnection(om.NewDeployment())) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) createAgentCSRs(t, ctx, 1, r.client, certsv1.CertificateApproved) status, isMultiStageReconciliation := r.updateOmAuthentication(ctx, omConnectionFactory.GetConnection(), []string{"my-rs-0", "my-rs-1", "my-rs-2"}, rs, "", "", "", false, zap.S()) @@ -228,7 +228,7 @@ func TestX509AgentUserIsCorrectlyConfigured(t *testing.T) { // configure x509/tls resources addKubernetesTlsResources(ctx, kubeClient, rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -264,7 +264,7 @@ func TestScramAgentUserIsCorrectlyConfigured(t *testing.T) { assert.NoError(t, err) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -294,7 +294,7 @@ func TestScramAgentUser_IsNotOverridden(t *testing.T) { } }) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, kubeClient) @@ -313,7 +313,7 @@ func TestX509InternalClusterAuthentication_CanBeEnabledWithScram_ReplicaSet(t *t Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, r.client, rs) checkReconcileSuccessful(ctx, t, r, rs, kubeClient) @@ -366,7 +366,7 @@ func TestConfigureLdapDeploymentAuthentication_WithScramAgentAuthentication(t *t Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) data := map[string]string{ "password": "LITZTOd6YiCV8j", } @@ -423,7 +423,7 @@ func TestConfigureLdapDeploymentAuthentication_WithCustomRole(t *testing.T) { Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) data := map[string]string{ "password": "LITZTOd6YiCV8j", } @@ -477,7 +477,7 @@ func TestConfigureLdapDeploymentAuthentication_WithAuthzQueryTemplate_AndUserToD Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + r := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) data := map[string]string{ "password": "LITZTOd6YiCV8j", } @@ -740,7 +740,7 @@ func TestInvalidPEM_SecretDoesNotContainKey(t *testing.T) { Build() kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) // Replace the secret with an empty one @@ -795,7 +795,7 @@ func Test_NoExternalDomainPresent(t *testing.T) { rs.Spec.ExternalAccessConfiguration = &mdbv1.ExternalAccessConfiguration{ExternalDomain: ptr.To("foo")} kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, kubeClient, nil, "", "", false, omConnectionFactory.GetConnectionFunc) addKubernetesTlsResources(ctx, kubeClient, rs) secret := &corev1.Secret{} diff --git a/controllers/operator/mongodbmultireplicaset_controller.go b/controllers/operator/mongodbmultireplicaset_controller.go index fc2ccde71..92f4943ff 100644 --- a/controllers/operator/mongodbmultireplicaset_controller.go +++ b/controllers/operator/mongodbmultireplicaset_controller.go @@ -80,7 +80,6 @@ type ReconcileMongoDbMultiReplicaSet struct { omConnectionFactory om.ConnectionFactory memberClusterClientsMap map[string]kubernetesClient.Client // holds the client for each of the memberclusters(where the MongoDB ReplicaSet is deployed) memberClusterSecretClientsMap map[string]secrets.SecretClient - forceEnterprise bool enableClusterMongoDBRoles bool imageUrls images.ImageUrls @@ -90,7 +89,7 @@ type ReconcileMongoDbMultiReplicaSet struct { var _ reconcile.Reconciler = &ReconcileMongoDbMultiReplicaSet{} -func newMultiClusterReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory, memberClustersMap map[string]client.Client) *ReconcileMongoDbMultiReplicaSet { +func newMultiClusterReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory, memberClustersMap map[string]client.Client) *ReconcileMongoDbMultiReplicaSet { clientsMap := make(map[string]kubernetesClient.Client) secretClientsMap := make(map[string]secrets.SecretClient) @@ -108,7 +107,6 @@ func newMultiClusterReplicaSetReconciler(ctx context.Context, kubeClient client. omConnectionFactory: omFunc, memberClusterClientsMap: clientsMap, memberClusterSecretClientsMap: secretClientsMap, - forceEnterprise: forceEnterprise, imageUrls: imageUrls, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion: databaseNonStaticImageVersion, @@ -746,7 +744,7 @@ func (r *ReconcileMongoDbMultiReplicaSet) updateOmDeploymentRs(ctx context.Conte } } - processes, err := process.CreateMongodProcessesWithLimitMulti(r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, mrs, certificateFileName) + processes, err := process.CreateMongodProcessesWithLimitMulti(r.imageUrls[mcoConstruct.MongodbImageEnv], mrs, certificateFileName) if err != nil && !isRecovering { return err } @@ -1110,9 +1108,9 @@ func (r *ReconcileMongoDbMultiReplicaSet) reconcileOMCAConfigMap(ctx context.Con // AddMultiReplicaSetController creates a new MongoDbMultiReplicaset Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func AddMultiReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClustersMap map[string]cluster.Cluster) error { +func AddMultiReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, memberClustersMap map[string]cluster.Cluster) error { // Create a new controller - reconciler := newMultiClusterReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, om.NewOpsManagerConnection, multicluster.ClustersMapToClientMap(memberClustersMap)) + reconciler := newMultiClusterReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, om.NewOpsManagerConnection, multicluster.ClustersMapToClientMap(memberClustersMap)) c, err := controller.New(util.MongoDbMultiClusterController, mgr, controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)}) // nolint:forbidigo if err != nil { return err diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 806ace443..1b79676fa 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -758,7 +758,7 @@ func TestMultiReplicaSetRace(t *testing.T) { omConnectionFactory := om.NewDefaultCachedOMConnectionFactory().WithResourceToProjectMapping(resourceToProjectMapping) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(clusters, omConnectionFactory, true, true) - reconciler := newMultiClusterReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc, memberClusterMap) + reconciler := newMultiClusterReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc, memberClusterMap) testConcurrentReconciles(ctx, t, fakeClient, reconciler, rs1, rs2, rs3) } @@ -1514,7 +1514,7 @@ func calculateHostNamesForExternalDomains(m *mdbmulti.MongoDBMultiCluster) []str func multiReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, m *mdbmulti.MongoDBMultiCluster) (*ReconcileMongoDbMultiReplicaSet, kubernetesClient.Client, map[string]client.Client, *om.CachedOMConnectionFactory) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(m) memberClusterMap := getFakeMultiClusterMap(omConnectionFactory) - return newMultiClusterReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, omConnectionFactory.GetConnectionFunc, memberClusterMap), kubeClient, memberClusterMap, omConnectionFactory + return newMultiClusterReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, omConnectionFactory.GetConnectionFunc, memberClusterMap), kubeClient, memberClusterMap, omConnectionFactory } func getFakeMultiClusterMap(omConnectionFactory *om.CachedOMConnectionFactory) map[string]client.Client { diff --git a/controllers/operator/mongodbreplicaset_controller.go b/controllers/operator/mongodbreplicaset_controller.go index c16ed89c6..aaabd4aa8 100644 --- a/controllers/operator/mongodbreplicaset_controller.go +++ b/controllers/operator/mongodbreplicaset_controller.go @@ -61,7 +61,6 @@ type ReconcileMongoDbReplicaSet struct { *ReconcileCommonController omConnectionFactory om.ConnectionFactory imageUrls images.ImageUrls - forceEnterprise bool enableClusterMongoDBRoles bool initDatabaseNonStaticImageVersion string @@ -70,12 +69,11 @@ type ReconcileMongoDbReplicaSet struct { var _ reconcile.Reconciler = &ReconcileMongoDbReplicaSet{} -func newReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbReplicaSet { +func newReplicaSetReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbReplicaSet { return &ReconcileMongoDbReplicaSet{ ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), omConnectionFactory: omFunc, imageUrls: imageUrls, - forceEnterprise: forceEnterprise, enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, @@ -349,9 +347,9 @@ func (r *ReconcileMongoDbReplicaSet) reconcileHostnameOverrideConfigMap(ctx cont // AddReplicaSetController creates a new MongoDbReplicaset Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func AddReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool) error { +func AddReplicaSetController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool) error { // Create a new controller - reconciler := newReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, om.NewOpsManagerConnection) + reconciler := newReplicaSetReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, om.NewOpsManagerConnection) c, err := controller.New(util.MongoDbReplicaSetController, mgr, controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)}) // nolint:forbidigo if err != nil { return err @@ -427,7 +425,7 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c // If current operation is to Disable TLS, then we should the current members of the Replica Set, // this is, do not scale them up or down util TLS disabling has completed. - shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(conn, r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, membersNumberBefore, rs, set, log, caFilePath) + shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(conn, r.imageUrls[mcoConstruct.MongodbImageEnv], membersNumberBefore, rs, set, log, caFilePath) if err != nil && !isRecovering { return workflow.Failed(err) } @@ -441,7 +439,7 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c updatedMembers = int(*set.Spec.Replicas) } - replicaSet := replicaset.BuildFromStatefulSetWithReplicas(r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, set, rs.GetSpec(), updatedMembers, rs.CalculateFeatureCompatibilityVersion()) + replicaSet := replicaset.BuildFromStatefulSetWithReplicas(r.imageUrls[mcoConstruct.MongodbImageEnv], set, rs.GetSpec(), updatedMembers, rs.CalculateFeatureCompatibilityVersion()) processNames := replicaSet.GetProcessNames() internalClusterPath := "" @@ -510,7 +508,7 @@ func (r *ReconcileMongoDbReplicaSet) updateOmDeploymentRs(ctx context.Context, c // updateOmDeploymentDisableTLSConfiguration checks if TLS configuration needs // to be disabled. In which case it will disable it and inform to the calling // function. -func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage string, forceEnterprise bool, membersNumberBefore int, rs *mdbv1.MongoDB, set appsv1.StatefulSet, log *zap.SugaredLogger, caFilePath string) (bool, error) { +func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage string, membersNumberBefore int, rs *mdbv1.MongoDB, set appsv1.StatefulSet, log *zap.SugaredLogger, caFilePath string) (bool, error) { tlsConfigWasDisabled := false err := conn.ReadUpdateDeployment( @@ -524,7 +522,7 @@ func updateOmDeploymentDisableTLSConfiguration(conn om.Connection, mongoDBImage // configure as many agents/Pods as we currently have, no more (in case // there's a scale up change at the same time). - replicaSet := replicaset.BuildFromStatefulSetWithReplicas(mongoDBImage, forceEnterprise, set, rs.GetSpec(), membersNumberBefore, rs.CalculateFeatureCompatibilityVersion()) + replicaSet := replicaset.BuildFromStatefulSetWithReplicas(mongoDBImage, set, rs.GetSpec(), membersNumberBefore, rs.CalculateFeatureCompatibilityVersion()) lastConfig, err := rs.GetLastAdditionalMongodConfigByType(mdbv1.ReplicaSetConfig) if err != nil { diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index 4c9abb64c..238a158e3 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -67,7 +67,7 @@ func TestCreateReplicaSet(t *testing.T) { assert.Equal(t, *sts.Spec.Replicas, int32(3)) connection := omConnectionFactory.GetConnection() - connection.(*om.MockedOmConnection).CheckDeployment(t, deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rs), "auth", "ssl") + connection.(*om.MockedOmConnection).CheckDeployment(t, deployment.CreateFromReplicaSet("fake-mongoDBImage", rs), "auth", "ssl") connection.(*om.MockedOmConnection).CheckNumberOfUpdateRequests(t, 2) } @@ -92,7 +92,7 @@ func TestReplicaSetRace(t *testing.T) { Get: mock.GetFakeClientInterceptorGetFunc(omConnectionFactory, true, true), }).Build() - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) testConcurrentReconciles(ctx, t, fakeClient, reconciler, rs, rs2, rs3) } @@ -376,28 +376,28 @@ func TestCreateReplicaSet_TLS(t *testing.T) { func TestUpdateDeploymentTLSConfiguration(t *testing.T) { rsWithTLS := mdbv1.NewReplicaSetBuilder().SetSecurityTLSEnabled().Build() rsNoTLS := mdbv1.NewReplicaSetBuilder().Build() - deploymentWithTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rsWithTLS) - deploymentNoTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", false, rsNoTLS) + deploymentWithTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", rsWithTLS) + deploymentNoTLS := deployment.CreateFromReplicaSet("fake-mongoDBImage", rsNoTLS) stsWithTLS := construct.DatabaseStatefulSet(*rsWithTLS, construct.ReplicaSetOptions(construct.GetPodEnvOptions()), zap.S()) stsNoTLS := construct.DatabaseStatefulSet(*rsNoTLS, construct.ReplicaSetOptions(construct.GetPodEnvOptions()), zap.S()) // TLS Disabled -> TLS Disabled - shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsNoTLS, stsNoTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err := updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", 3, rsNoTLS, stsNoTLS, zap.S(), util.CAFilePathInContainer) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Disabled -> TLS Enabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", false, 3, rsWithTLS, stsWithTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentNoTLS), "fake-mongoDBImage", 3, rsWithTLS, stsWithTLS, zap.S(), util.CAFilePathInContainer) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Enabled -> TLS Enabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsWithTLS, stsWithTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", 3, rsWithTLS, stsWithTLS, zap.S(), util.CAFilePathInContainer) assert.NoError(t, err) assert.False(t, shouldLockMembers) // TLS Enabled -> TLS Disabled - shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", false, 3, rsNoTLS, stsNoTLS, zap.S(), util.CAFilePathInContainer) + shouldLockMembers, err = updateOmDeploymentDisableTLSConfiguration(om.NewMockedOmConnection(deploymentWithTLS), "fake-mongoDBImage", 3, rsNoTLS, stsNoTLS, zap.S(), util.CAFilePathInContainer) assert.NoError(t, err) assert.True(t, shouldLockMembers) } @@ -410,7 +410,7 @@ func TestCreateDeleteReplicaSet(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, fakeClient) omConn := omConnectionFactory.GetConnection() @@ -549,7 +549,7 @@ func TestFeatureControlPolicyAndTagAddedWithNewerOpsManager(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, fakeClient) @@ -573,7 +573,7 @@ func TestFeatureControlPolicyNoAuthNewerOpsManager(t *testing.T) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newReplicaSetReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, rs, fakeClient) @@ -983,7 +983,7 @@ func assertCorrectNumberOfMembersAndProcesses(ctx context.Context, t *testing.T, func defaultReplicaSetReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, rs *mdbv1.MongoDB) (*ReconcileMongoDbReplicaSet, kubernetesClient.Client, *om.CachedOMConnectionFactory) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(rs) - return newReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory + return newReplicaSetReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory } // newDefaultPodSpec creates pod spec with default values,sets only the topology key and persistence sizes, diff --git a/controllers/operator/mongodbshardedcluster_controller.go b/controllers/operator/mongodbshardedcluster_controller.go index fb98984ef..97dc9801d 100644 --- a/controllers/operator/mongodbshardedcluster_controller.go +++ b/controllers/operator/mongodbshardedcluster_controller.go @@ -79,19 +79,17 @@ type ReconcileMongoDbShardedCluster struct { omConnectionFactory om.ConnectionFactory memberClustersMap map[string]client.Client imageUrls images.ImageUrls - forceEnterprise bool enableClusterMongoDBRoles bool initDatabaseNonStaticImageVersion string databaseNonStaticImageVersion string } -func newShardedClusterReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClusterMap map[string]client.Client, omFunc om.ConnectionFactory) *ReconcileMongoDbShardedCluster { +func newShardedClusterReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, memberClusterMap map[string]client.Client, omFunc om.ConnectionFactory) *ReconcileMongoDbShardedCluster { return &ReconcileMongoDbShardedCluster{ ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), omConnectionFactory: omFunc, memberClustersMap: memberClusterMap, - forceEnterprise: forceEnterprise, imageUrls: imageUrls, enableClusterMongoDBRoles: enableClusterMongoDBRoles, @@ -631,7 +629,6 @@ type ShardedClusterReconcileHelper struct { commonController *ReconcileCommonController omConnectionFactory om.ConnectionFactory imageUrls images.ImageUrls - forceEnterprise bool enableClusterMongoDBRoles bool automationAgentVersion string @@ -671,7 +668,7 @@ func NewReadOnlyClusterReconcilerHelper( globalMemberClustersMap map[string]client.Client, log *zap.SugaredLogger, ) (*ShardedClusterReconcileHelper, error) { - return newShardedClusterReconcilerHelper(ctx, reconciler, nil, "", "", false, false, + return newShardedClusterReconcilerHelper(ctx, reconciler, nil, "", "", false, sc, globalMemberClustersMap, nil, log, true) } @@ -681,7 +678,6 @@ func NewShardedClusterReconcilerHelper( imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, - forceEnterprise bool, enableClusterMongoDBRoles bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, @@ -689,7 +685,7 @@ func NewShardedClusterReconcilerHelper( log *zap.SugaredLogger, ) (*ShardedClusterReconcileHelper, error) { return newShardedClusterReconcilerHelper(ctx, reconciler, imageUrls, initDatabaseNonStaticImageVersion, - databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, sc, globalMemberClustersMap, omConnectionFactory, log, false) + databaseNonStaticImageVersion, enableClusterMongoDBRoles, sc, globalMemberClustersMap, omConnectionFactory, log, false) } func newShardedClusterReconcilerHelper( @@ -698,7 +694,6 @@ func newShardedClusterReconcilerHelper( imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, - forceEnterprise bool, enableClusterMongoDBRoles bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, @@ -717,7 +712,6 @@ func newShardedClusterReconcilerHelper( commonController: reconciler, omConnectionFactory: omConnectionFactory, imageUrls: imageUrls, - forceEnterprise: forceEnterprise, enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, @@ -849,7 +843,7 @@ func (r *ReconcileMongoDbShardedCluster) Reconcile(ctx context.Context, request return reconcileResult, err } - reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.forceEnterprise, r.enableClusterMongoDBRoles, sc, r.memberClustersMap, r.omConnectionFactory, log) + reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.enableClusterMongoDBRoles, sc, r.memberClustersMap, r.omConnectionFactory, log) if err != nil { return r.updateStatus(ctx, sc, workflow.Failed(xerrors.Errorf("Failed to initialize sharded cluster reconciler: %w", err)), log) } @@ -858,7 +852,7 @@ func (r *ReconcileMongoDbShardedCluster) Reconcile(ctx context.Context, request // OnDelete tries to complete a Deletion reconciliation event func (r *ReconcileMongoDbShardedCluster) OnDelete(ctx context.Context, obj runtime.Object, log *zap.SugaredLogger) error { - reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.forceEnterprise, r.enableClusterMongoDBRoles, obj.(*mdbv1.MongoDB), r.memberClustersMap, r.omConnectionFactory, log) + reconcilerHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, r.imageUrls, r.initDatabaseNonStaticImageVersion, r.databaseNonStaticImageVersion, r.enableClusterMongoDBRoles, obj.(*mdbv1.MongoDB), r.memberClustersMap, r.omConnectionFactory, log) if err != nil { return err } @@ -1640,9 +1634,9 @@ func logDiffOfProcessNames(acProcesses []string, healthyProcesses []string, log } } -func AddShardedClusterController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClustersMap map[string]cluster.Cluster) error { +func AddShardedClusterController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, memberClustersMap map[string]cluster.Cluster) error { // Create a new controller - reconciler := newShardedClusterReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, multicluster.ClustersMapToClientMap(memberClustersMap), om.NewOpsManagerConnection) + reconciler := newShardedClusterReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, multicluster.ClustersMapToClientMap(memberClustersMap), om.NewOpsManagerConnection) options := controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)} // nolint:forbidigo c, err := controller.New(util.MongoDbShardedClusterController, mgr, options) if err != nil { @@ -2179,7 +2173,7 @@ func (r *ShardedClusterReconcileHelper) createDesiredMongosProcesses(certificate for _, memberCluster := range r.mongosMemberClusters { hostnames, podNames := r.getMongosHostnames(memberCluster, scale.ReplicasThisReconciliation(r.GetMongosScaler(memberCluster))) for i := range hostnames { - process := om.NewMongosProcess(podNames[i], hostnames[i], r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, r.sc.Spec.MongosSpec.GetAdditionalMongodConfig(), r.sc.GetSpec(), certificateFilePath, r.sc.Annotations, r.sc.CalculateFeatureCompatibilityVersion()) + process := om.NewMongosProcess(podNames[i], hostnames[i], r.imageUrls[mcoConstruct.MongodbImageEnv], r.sc.Spec.MongosSpec.GetAdditionalMongodConfig(), r.sc.GetSpec(), certificateFilePath, r.sc.Annotations, r.sc.CalculateFeatureCompatibilityVersion()) processes = append(processes, process) } } @@ -2193,7 +2187,7 @@ func (r *ShardedClusterReconcileHelper) createDesiredConfigSrvProcessesAndMember for _, memberCluster := range r.configSrvMemberClusters { hostnames, podNames := r.getConfigSrvHostnames(memberCluster, scale.ReplicasThisReconciliation(r.GetConfigSrvScaler(memberCluster))) for i := range hostnames { - process := om.NewMongodProcess(podNames[i], hostnames[i], r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, r.sc.Spec.ConfigSrvSpec.GetAdditionalMongodConfig(), r.sc.GetSpec(), certificateFilePath, r.sc.Annotations, r.sc.CalculateFeatureCompatibilityVersion()) + process := om.NewMongodProcess(podNames[i], hostnames[i], r.imageUrls[mcoConstruct.MongodbImageEnv], r.sc.Spec.ConfigSrvSpec.GetAdditionalMongodConfig(), r.sc.GetSpec(), certificateFilePath, r.sc.Annotations, r.sc.CalculateFeatureCompatibilityVersion()) processes = append(processes, process) } @@ -2210,7 +2204,7 @@ func (r *ShardedClusterReconcileHelper) createDesiredShardProcessesAndMemberOpti for _, memberCluster := range r.shardsMemberClustersMap[shardIdx] { hostnames, podNames := r.getShardHostnames(shardIdx, memberCluster, scale.ReplicasThisReconciliation(r.GetShardScaler(shardIdx, memberCluster))) for i := range hostnames { - process := om.NewMongodProcess(podNames[i], hostnames[i], r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, r.desiredShardsConfiguration[shardIdx].GetAdditionalMongodConfig(), r.sc.GetSpec(), certificateFilePath, r.sc.Annotations, r.sc.CalculateFeatureCompatibilityVersion()) + process := om.NewMongodProcess(podNames[i], hostnames[i], r.imageUrls[mcoConstruct.MongodbImageEnv], r.desiredShardsConfiguration[shardIdx].GetAdditionalMongodConfig(), r.sc.GetSpec(), certificateFilePath, r.sc.Annotations, r.sc.CalculateFeatureCompatibilityVersion()) processes = append(processes, process) } specMemberOptions := r.desiredShardsConfiguration[shardIdx].GetClusterSpecItem(memberCluster.Name).MemberConfig @@ -2220,20 +2214,20 @@ func (r *ShardedClusterReconcileHelper) createDesiredShardProcessesAndMemberOpti return processes, memberOptions } -func createConfigSrvProcesses(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { - return createMongodProcessForShardedCluster(mongoDBImage, forceEnterprise, set, mdb.Spec.ConfigSrvSpec.GetAdditionalMongodConfig(), mdb, certificateFilePath) +func createConfigSrvProcesses(mongoDBImage string, set appsv1.StatefulSet, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { + return createMongodProcessForShardedCluster(mongoDBImage, set, mdb.Spec.ConfigSrvSpec.GetAdditionalMongodConfig(), mdb, certificateFilePath) } -func createShardProcesses(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { - return createMongodProcessForShardedCluster(mongoDBImage, forceEnterprise, set, mdb.Spec.ShardSpec.GetAdditionalMongodConfig(), mdb, certificateFilePath) +func createShardProcesses(mongoDBImage string, set appsv1.StatefulSet, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { + return createMongodProcessForShardedCluster(mongoDBImage, set, mdb.Spec.ShardSpec.GetAdditionalMongodConfig(), mdb, certificateFilePath) } -func createMongodProcessForShardedCluster(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, additionalMongodConfig *mdbv1.AdditionalMongodConfig, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { +func createMongodProcessForShardedCluster(mongoDBImage string, set appsv1.StatefulSet, additionalMongodConfig *mdbv1.AdditionalMongodConfig, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { hostnames, names := dns.GetDnsForStatefulSet(set, mdb.Spec.GetClusterDomain(), nil) processes := make([]om.Process, len(hostnames)) for idx, hostname := range hostnames { - processes[idx] = om.NewMongodProcess(names[idx], hostname, mongoDBImage, forceEnterprise, additionalMongodConfig, &mdb.Spec, certificateFilePath, mdb.Annotations, mdb.CalculateFeatureCompatibilityVersion()) + processes[idx] = om.NewMongodProcess(names[idx], hostname, mongoDBImage, additionalMongodConfig, &mdb.Spec, certificateFilePath, mdb.Annotations, mdb.CalculateFeatureCompatibilityVersion()) } return processes diff --git a/controllers/operator/mongodbshardedcluster_controller_multi_test.go b/controllers/operator/mongodbshardedcluster_controller_multi_test.go index 158665f3b..0d23fe0f4 100644 --- a/controllers/operator/mongodbshardedcluster_controller_multi_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_multi_test.go @@ -42,9 +42,9 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/util" ) -func newShardedClusterReconcilerForMultiCluster(ctx context.Context, forceEnterprise bool, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { - r := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) +func newShardedClusterReconcilerForMultiCluster(ctx context.Context, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { + r := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) + reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) if err != nil { return nil, nil, err } @@ -369,7 +369,7 @@ func BlockReconcileScalingBothWaysCase(t *testing.T, tc BlockReconcileScalingBot require.NoError(t, err) // Checking that we don't scale both ways is done when we initiate the reconciler, not in the reconcile loop. - reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) // The validation happens at the beginning of the reconciliation loop. We expect to fail immediately when scaling is // invalid, or stay in pending phase otherwise. @@ -411,7 +411,7 @@ func TestReconcileCreateMultiClusterShardedClusterWithExternalDomain(t *testing. kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { @@ -482,7 +482,7 @@ func TestReconcileCreateMultiClusterShardedClusterWithExternalAccessAndOnlyTopLe kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, memberClusters.MongosDistribution, clusterMapping, memberClusters.ConfigServerDistribution, memberClusters.ShardDistribution, test.ClusterLocalDomains, test.SingleExternalClusterDomains) @@ -549,7 +549,7 @@ func TestReconcileCreateMultiClusterShardedClusterWithExternalAccessAndNoExterna kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, memberClusters.MongosDistribution, clusterMapping, memberClusters.ConfigServerDistribution, memberClusters.ShardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) @@ -616,7 +616,7 @@ func TestReconcileCreateMultiClusterShardedCluster(t *testing.T) { kubeClient := kubernetesClient.NewClient(fakeClient) memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusters.ClusterNames, omConnectionFactory, true, true) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, memberClusters.MongosDistribution, clusterMapping, memberClusters.ConfigServerDistribution, memberClusters.ShardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) @@ -814,7 +814,7 @@ func TestReconcileMultiClusterShardedClusterCertsAndSecretsReplication(t *testin memberClusterMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusterNames, omConnectionFactory, true, false) ctx := context.Background() - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping omConnectionFactory.SetPostCreateHook(func(connection om.Connection) { allHostnames, _ := generateAllHosts(sc, mongosDistribution, clusterMapping, configSrvDistribution, shardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) @@ -985,7 +985,7 @@ func TestReconcileForComplexMultiClusterYaml(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusterNames, omConnectionFactory) - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping require.NoError(t, err) @@ -1076,7 +1076,7 @@ func TestMigrateToNewDeploymentState(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters([]string{multicluster.LegacyCentralClusterName}, omConnectionFactory) - reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, _, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) // Migration is performed at reconciliation, when needed @@ -1130,7 +1130,7 @@ func testDesiredConfigurationFromYAML[T *mdbv1.ShardedClusterComponentSpec | map kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusterNames, omConnectionFactory) - _, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + _, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) var actual interface{} @@ -1249,11 +1249,11 @@ func TestMultiClusterShardedSetRace(t *testing.T) { globalMemberClustersMap := getFakeMultiClusterMapWithConfiguredInterceptor(memberClusterNames, omConnectionFactory, true, false) ctx := context.Background() - reconciler := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - allHostnames := generateHostsForCluster(ctx, reconciler, false, sc, mongosDistribution, configSrvDistribution, shardDistribution) - allHostnames1 := generateHostsForCluster(ctx, reconciler, false, sc1, mongosDistribution, configSrvDistribution, shardDistribution) - allHostnames2 := generateHostsForCluster(ctx, reconciler, false, sc2, mongosDistribution, configSrvDistribution, shardDistribution) + allHostnames := generateHostsForCluster(ctx, reconciler, sc, mongosDistribution, configSrvDistribution, shardDistribution) + allHostnames1 := generateHostsForCluster(ctx, reconciler, sc1, mongosDistribution, configSrvDistribution, shardDistribution) + allHostnames2 := generateHostsForCluster(ctx, reconciler, sc2, mongosDistribution, configSrvDistribution, shardDistribution) projectHostMapping := map[string][]string{ projectName: allHostnames, @@ -1645,7 +1645,7 @@ func TestMultiClusterShardedMongosDeadlock(t *testing.T) { // TODO: statuses in OM mock // TODO: OM mock: set agent ready depending on a clusterDown parameter ? + set mongos not ready if anything is not ready - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping @@ -1952,7 +1952,7 @@ func MultiClusterShardedScalingWithOverridesTestCase(t *testing.T, tc MultiClust for _, scalingStep := range tc.scalingSteps { t.Run(scalingStep.name, func(t *testing.T) { - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping @@ -2295,7 +2295,7 @@ func TestMultiClusterShardedScaling(t *testing.T) { memberClusterClients = append(memberClusterClients, c) } - reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping := reconcilerHelper.deploymentState.ClusterMapping addAllHostsWithDistribution := func(connection om.Connection, mongosDistribution map[string]int, clusterMapping map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) { @@ -2341,7 +2341,7 @@ func TestMultiClusterShardedScaling(t *testing.T) { err = kubeClient.Update(ctx, sc) require.NoError(t, err) - reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping = reconcilerHelper.deploymentState.ClusterMapping addAllHostsWithDistribution(omConnectionFactory.GetConnection(), mongosDistribution, clusterMapping, configSrvDistribution, shardDistribution) @@ -2368,7 +2368,7 @@ func TestMultiClusterShardedScaling(t *testing.T) { err = kubeClient.Update(ctx, sc) require.NoError(t, err) - reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcilerHelper, err = newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) clusterMapping = reconcilerHelper.deploymentState.ClusterMapping addAllHostsWithDistribution(omConnectionFactory.GetConnection(), mongosDistribution, clusterMapping, configSrvDistribution, shardDistribution) @@ -2409,8 +2409,8 @@ func reconcileUntilSuccessful(ctx context.Context, t *testing.T, reconciler reco } } -func generateHostsForCluster(ctx context.Context, reconciler *ReconcileMongoDbShardedCluster, forceEnterprise bool, sc *mdbv1.MongoDB, mongosDistribution map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) []string { - reconcileHelper, _ := NewShardedClusterReconcilerHelper(ctx, reconciler.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", forceEnterprise, false, sc, reconciler.memberClustersMap, reconciler.omConnectionFactory, zap.S()) +func generateHostsForCluster(ctx context.Context, reconciler *ReconcileMongoDbShardedCluster, sc *mdbv1.MongoDB, mongosDistribution map[string]int, configSrvDistribution map[string]int, shardDistribution []map[string]int) []string { + reconcileHelper, _ := NewShardedClusterReconcilerHelper(ctx, reconciler.ReconcileCommonController, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, sc, reconciler.memberClustersMap, reconciler.omConnectionFactory, zap.S()) allHostnames, _ := generateAllHosts(sc, mongosDistribution, reconcileHelper.deploymentState.ClusterMapping, configSrvDistribution, shardDistribution, test.ClusterLocalDomains, test.NoneExternalClusterDomains) return allHostnames } @@ -3492,7 +3492,7 @@ func TestMultiClusterShardedServiceCreation_WithExternalName(t *testing.T) { kubeClient, omConnectionFactory := mock.NewDefaultFakeClient(sc) memberClusterMap := getFakeMultiClusterMapWithClusters(memberClusters, omConnectionFactory) - reconciler, reconcileHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, false, sc, memberClusterMap, kubeClient, omConnectionFactory) + reconciler, reconcileHelper, err := newShardedClusterReconcilerForMultiCluster(ctx, sc, memberClusterMap, kubeClient, omConnectionFactory) require.NoError(t, err) mongosDistribution := clusterSpecListToDistribution(tc.mongosClusterSpecList) diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index 8404ebabb..0bc5c3622 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -197,7 +197,7 @@ func TestShardedClusterRace(t *testing.T) { WithObjects(mock.GetDefaultResources()...). Build() - reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, nil, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, nil, omConnectionFactory.GetConnectionFunc) testConcurrentReconciles(ctx, t, fakeClient, reconciler, sc1, sc2, sc3) } @@ -1052,7 +1052,7 @@ func TestFeatureControlsNoAuth(t *testing.T) { sc := test.DefaultClusterBuilder().RemoveAuth().Build() omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, sc) - reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, nil, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, nil, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, sc, fakeClient) @@ -1253,7 +1253,7 @@ func TestFeatureControlsAuthEnabled(t *testing.T) { sc := test.DefaultClusterBuilder().Build() omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFuncSettingVersion()) fakeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, sc) - reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, nil, omConnectionFactory.GetConnectionFunc) + reconciler := newShardedClusterReconciler(ctx, fakeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, nil, omConnectionFactory.GetConnectionFunc) checkReconcileSuccessful(ctx, t, reconciler, sc, fakeClient) @@ -1620,12 +1620,12 @@ func assertPodSpecSts(t *testing.T, sts *appsv1.StatefulSet, nodeName, hostName } } -func createMongosProcesses(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { +func createMongosProcesses(mongoDBImage string, set appsv1.StatefulSet, mdb *mdbv1.MongoDB, certificateFilePath string) []om.Process { hostnames, names := dns.GetDnsForStatefulSet(set, mdb.Spec.GetClusterDomain(), nil) processes := make([]om.Process, len(hostnames)) for idx, hostname := range hostnames { - processes[idx] = om.NewMongosProcess(names[idx], hostname, mongoDBImage, forceEnterprise, mdb.Spec.MongosSpec.GetAdditionalMongodConfig(), mdb.GetSpec(), certificateFilePath, mdb.Annotations, mdb.CalculateFeatureCompatibilityVersion()) + processes[idx] = om.NewMongosProcess(names[idx], hostname, mongoDBImage, mdb.Spec.MongosSpec.GetAdditionalMongodConfig(), mdb.GetSpec(), certificateFilePath, mdb.Annotations, mdb.CalculateFeatureCompatibilityVersion()) } return processes @@ -1644,7 +1644,7 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc construct.GetPodEnvOptions(), ) shardSts := construct.DatabaseStatefulSet(*sh, shardOptions, zap.S()) - shards[i], _ = buildReplicaSetFromProcesses(shardSts.Name, createShardProcesses("fake-mongoDBImage", false, shardSts, sh, ""), sh, sh.Spec.GetMemberOptions(), om.NewDeployment()) + shards[i], _ = buildReplicaSetFromProcesses(shardSts.Name, createShardProcesses("fake-mongoDBImage", shardSts, sh, ""), sh, sh.Spec.GetMemberOptions(), om.NewDeployment()) } desiredMongosConfig := createMongosSpec(sh) @@ -1655,7 +1655,7 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc construct.GetPodEnvOptions(), ) mongosSts := construct.DatabaseStatefulSet(*sh, mongosOptions, zap.S()) - mongosProcesses := createMongosProcesses("fake-mongoDBImage", false, mongosSts, sh, util.PEMKeyFilePathInContainer) + mongosProcesses := createMongosProcesses("fake-mongoDBImage", mongosSts, sh, util.PEMKeyFilePathInContainer) desiredConfigSrvConfig := createConfigSrvSpec(sh) configServerOptions := construct.ConfigServerOptions( @@ -1665,7 +1665,7 @@ func createDeploymentFromShardedCluster(t *testing.T, updatable v1.CustomResourc construct.GetPodEnvOptions(), ) configSvrSts := construct.DatabaseStatefulSet(*sh, configServerOptions, zap.S()) - configRs, _ := buildReplicaSetFromProcesses(configSvrSts.Name, createConfigSrvProcesses("fake-mongoDBImage", false, configSvrSts, sh, ""), sh, sh.Spec.GetMemberOptions(), om.NewDeployment()) + configRs, _ := buildReplicaSetFromProcesses(configSvrSts.Name, createConfigSrvProcesses("fake-mongoDBImage", configSvrSts, sh, ""), sh, sh.Spec.GetMemberOptions(), om.NewDeployment()) d := om.NewDeployment() _, err := d.MergeShardedCluster(om.DeploymentShardedClusterMergeOptions{ @@ -1690,8 +1690,8 @@ func defaultClusterReconciler(ctx context.Context, imageUrls images.ImageUrls, i } func newShardedClusterReconcilerFromResource(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, sc *mdbv1.MongoDB, globalMemberClustersMap map[string]client.Client, kubeClient kubernetesClient.Client, omConnectionFactory *om.CachedOMConnectionFactory) (*ReconcileMongoDbShardedCluster, *ShardedClusterReconcileHelper, error) { - r := newShardedClusterReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) - reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) + r := newShardedClusterReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc) + reconcileHelper, err := NewShardedClusterReconcilerHelper(ctx, r.ReconcileCommonController, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, sc, globalMemberClustersMap, omConnectionFactory.GetConnectionFunc, zap.S()) if err != nil { return nil, nil, err } diff --git a/controllers/operator/mongodbstandalone_controller.go b/controllers/operator/mongodbstandalone_controller.go index 1078fd6b8..f70842adb 100644 --- a/controllers/operator/mongodbstandalone_controller.go +++ b/controllers/operator/mongodbstandalone_controller.go @@ -51,9 +51,9 @@ import ( // AddStandaloneController creates a new MongoDbStandalone Controller and adds it to the Manager. The Manager will set fields on the Controller // and Start it when the Manager is Started. -func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool) error { +func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool) error { // Create a new controller - reconciler := newStandaloneReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, om.NewOpsManagerConnection) + reconciler := newStandaloneReconciler(ctx, mgr.GetClient(), imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, om.NewOpsManagerConnection) c, err := controller.New(util.MongoDbStandaloneController, mgr, controller.Options{Reconciler: reconciler, MaxConcurrentReconciles: env.ReadIntOrDefault(util.MaxConcurrentReconcilesEnv, 1)}) // nolint:forbidigo if err != nil { return err @@ -113,12 +113,11 @@ func AddStandaloneController(ctx context.Context, mgr manager.Manager, imageUrls return nil } -func newStandaloneReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbStandalone { +func newStandaloneReconciler(ctx context.Context, kubeClient client.Client, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, omFunc om.ConnectionFactory) *ReconcileMongoDbStandalone { return &ReconcileMongoDbStandalone{ ReconcileCommonController: NewReconcileCommonController(ctx, kubeClient), omConnectionFactory: omFunc, imageUrls: imageUrls, - forceEnterprise: forceEnterprise, enableClusterMongoDBRoles: enableClusterMongoDBRoles, initDatabaseNonStaticImageVersion: initDatabaseNonStaticImageVersion, @@ -131,7 +130,6 @@ type ReconcileMongoDbStandalone struct { *ReconcileCommonController omConnectionFactory om.ConnectionFactory imageUrls images.ImageUrls - forceEnterprise bool enableClusterMongoDBRoles bool initDatabaseNonStaticImageVersion string @@ -325,7 +323,7 @@ func (r *ReconcileMongoDbStandalone) updateOmDeployment(ctx context.Context, con return status } - standaloneOmObject := createProcess(r.imageUrls[mcoConstruct.MongodbImageEnv], r.forceEnterprise, set, util.DatabaseContainerName, s) + standaloneOmObject := createProcess(r.imageUrls[mcoConstruct.MongodbImageEnv], set, util.DatabaseContainerName, s) err := conn.ReadUpdateDeployment( func(d om.Deployment) error { excessProcesses := d.GetNumberOfExcessProcesses(s.Name) @@ -419,8 +417,8 @@ func (r *ReconcileMongoDbStandalone) OnDelete(ctx context.Context, obj runtime.O return nil } -func createProcess(mongoDBImage string, forceEnterprise bool, set appsv1.StatefulSet, containerName string, s *mdbv1.MongoDB) om.Process { +func createProcess(mongoDBImage string, set appsv1.StatefulSet, containerName string, s *mdbv1.MongoDB) om.Process { hostnames, _ := dns.GetDnsForStatefulSet(set, s.Spec.GetClusterDomain(), nil) - process := om.NewMongodProcess(s.Name, hostnames[0], mongoDBImage, forceEnterprise, s.Spec.GetAdditionalMongodConfig(), s.GetSpec(), "", s.Annotations, s.CalculateFeatureCompatibilityVersion()) + process := om.NewMongodProcess(s.Name, hostnames[0], mongoDBImage, s.Spec.GetAdditionalMongodConfig(), s.GetSpec(), "", s.Annotations, s.CalculateFeatureCompatibilityVersion()) return process } diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index 1663b24bd..59f34c117 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -37,7 +37,7 @@ import ( func TestCreateOmProcess(t *testing.T) { const mongodbImage = "quay.io/mongodb/mongodb-enterprise-server" sts := construct.DatabaseStatefulSet(*DefaultReplicaSetBuilder().SetName("dublin").Build(), construct.StandaloneOptions(construct.GetPodEnvOptions()), zap.S()) - process := createProcess(mongodbImage, false, sts, util.AgentContainerName, DefaultStandaloneBuilder().Build()) + process := createProcess(mongodbImage, sts, util.AgentContainerName, DefaultStandaloneBuilder().Build()) // Note, that for standalone the name of process is the name of statefulset - not the pod inside it. assert.Equal(t, "dublin", process.Name()) assert.Equal(t, "dublin-0.dublin-svc.my-namespace.svc.cluster.local", process.HostName()) @@ -49,7 +49,7 @@ func TestCreateOmProcesStatic(t *testing.T) { t.Setenv(architectures.DefaultEnvArchitecture, string(architectures.Static)) sts := construct.DatabaseStatefulSet(*DefaultReplicaSetBuilder().SetName("dublin").Build(), construct.StandaloneOptions(construct.GetPodEnvOptions()), zap.S()) - process := createProcess(mongodbImage, false, sts, util.AgentContainerName, DefaultStandaloneBuilder().Build()) + process := createProcess(mongodbImage, sts, util.AgentContainerName, DefaultStandaloneBuilder().Build()) // Note, that for standalone the name of process is the name of statefulset - not the pod inside it. assert.Equal(t, "dublin", process.Name()) assert.Equal(t, "dublin-0.dublin-svc.my-namespace.svc.cluster.local", process.HostName()) @@ -151,7 +151,7 @@ func TestOnAddStandaloneWithDelay(t *testing.T) { }, }) - reconciler := newStandaloneReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, false, omConnectionFactory.GetConnectionFunc) + reconciler := newStandaloneReconciler(ctx, kubeClient, nil, "fake-initDatabaseNonStaticImageVersion", "fake-databaseNonStaticImageVersion", false, omConnectionFactory.GetConnectionFunc) checkReconcilePending(ctx, t, reconciler, st, "StatefulSet not ready", kubeClient, 3) // this affects Get interceptor func, blocking automatically marking sts as ready @@ -330,7 +330,7 @@ func TestStandaloneAgentVersionMapping(t *testing.T) { func defaultStandaloneReconciler(ctx context.Context, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, omConnectionFactoryFunc om.ConnectionFactory, rs *mdbv1.MongoDB) (*ReconcileMongoDbStandalone, kubernetesClient.Client, *om.CachedOMConnectionFactory) { omConnectionFactory := om.NewCachedOMConnectionFactory(omConnectionFactoryFunc) kubeClient := mock.NewDefaultFakeClientWithOMConnectionFactory(omConnectionFactory, rs) - return newStandaloneReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory + return newStandaloneReconciler(ctx, kubeClient, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, false, omConnectionFactory.GetConnectionFunc), kubeClient, omConnectionFactory } // TODO remove in favor of '/api/mongodbbuilder.go' @@ -404,7 +404,7 @@ func createDeploymentFromStandalone(st *mdbv1.MongoDB) om.Deployment { d := om.NewDeployment() sts := construct.DatabaseStatefulSet(*st, construct.StandaloneOptions(construct.GetPodEnvOptions()), zap.S()) hostnames, _ := dns.GetDnsForStatefulSet(sts, st.Spec.GetClusterDomain(), nil) - process := om.NewMongodProcess(st.Name, hostnames[0], "fake-mongoDBImage", false, st.Spec.AdditionalMongodConfig, st.GetSpec(), "", nil, st.Status.FeatureCompatibilityVersion) + process := om.NewMongodProcess(st.Name, hostnames[0], "fake-mongoDBImage", st.Spec.AdditionalMongodConfig, st.GetSpec(), "", nil, st.Status.FeatureCompatibilityVersion) lastConfig, err := st.GetLastAdditionalMongodConfigByType(mdbv1.StandaloneConfig) if err != nil { diff --git a/main.go b/main.go index 484c1b061..66bb128b7 100644 --- a/main.go +++ b/main.go @@ -49,7 +49,6 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/pprof" "github.com/mongodb/mongodb-kubernetes/pkg/telemetry" "github.com/mongodb/mongodb-kubernetes/pkg/util" - "github.com/mongodb/mongodb-kubernetes/pkg/util/architectures" "github.com/mongodb/mongodb-kubernetes/pkg/util/env" "github.com/mongodb/mongodb-kubernetes/pkg/util/stringutil" "github.com/mongodb/mongodb-kubernetes/pkg/webhook" @@ -123,7 +122,6 @@ func main() { initializeEnvironment() imageUrls := images.LoadImageUrlsFromEnv() - forceEnterprise := env.ReadBoolOrDefault(architectures.MdbAssumeEnterpriseImage, false) initDatabaseNonStaticImageVersion := env.ReadOrDefault(construct.InitDatabaseVersionEnv, "latest") databaseNonStaticImageVersion := env.ReadOrDefault(construct.DatabaseVersionEnv, "latest") initAppdbVersion := env.ReadOrDefault(construct.InitAppdbVersionEnv, "latest") @@ -230,7 +228,7 @@ func main() { // Setup all Controllers if slices.Contains(crds, mongoDBCRDPlural) { - if err := setupMongoDBCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { + if err := setupMongoDBCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { log.Fatal(err) } } @@ -245,7 +243,7 @@ func main() { } } if slices.Contains(crds, mongoDBMultiClusterCRDPlural) { - if err := setupMongoDBMultiClusterCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { + if err := setupMongoDBMultiClusterCRD(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { log.Fatal(err) } } @@ -307,14 +305,14 @@ func main() { } } -func setupMongoDBCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { - if err := operator.AddStandaloneController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles); err != nil { +func setupMongoDBCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { + if err := operator.AddStandaloneController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles); err != nil { return err } - if err := operator.AddReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles); err != nil { + if err := operator.AddReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles); err != nil { return err } - if err := operator.AddShardedClusterController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { + if err := operator.AddShardedClusterController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { return err } return ctrl.NewWebhookManagedBy(mgr).For(&mdbv1.MongoDB{}).Complete() @@ -331,8 +329,8 @@ func setupMongoDBUserCRD(ctx context.Context, mgr manager.Manager, memberCluster return operator.AddMongoDBUserController(ctx, mgr, memberClusterObjectsMap) } -func setupMongoDBMultiClusterCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, forceEnterprise bool, enableClusterMongoDBRoles bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { - if err := operator.AddMultiReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, forceEnterprise, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { +func setupMongoDBMultiClusterCRD(ctx context.Context, mgr manager.Manager, imageUrls images.ImageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion string, enableClusterMongoDBRoles bool, memberClusterObjectsMap map[string]runtime_cluster.Cluster) error { + if err := operator.AddMultiReplicaSetController(ctx, mgr, imageUrls, initDatabaseNonStaticImageVersion, databaseNonStaticImageVersion, enableClusterMongoDBRoles, memberClusterObjectsMap); err != nil { return err } return ctrl.NewWebhookManagedBy(mgr).For(&mdbmultiv1.MongoDBMultiCluster{}).Complete() diff --git a/pkg/util/architectures/static.go b/pkg/util/architectures/static.go index dc398dabe..56ab8fd83 100644 --- a/pkg/util/architectures/static.go +++ b/pkg/util/architectures/static.go @@ -33,12 +33,6 @@ const ( DefaultEnvArchitecture = "MDB_DEFAULT_ARCHITECTURE" Static DefaultArchitecture = "static" NonStatic DefaultArchitecture = "non-static" - // MdbAssumeEnterpriseImage allows the customer to override the version image detection used by the operator to - // set up the automation config. - // true: always append the -ent suffix and assume enterprise - // false: do not append the -ent suffix and assume community - // default: false - MdbAssumeEnterpriseImage = "MDB_ASSUME_ENTERPRISE_IMAGE" // MdbAgentImageRepo contains the repository containing the agent image for the database MdbAgentImageRepo = "MDB_AGENT_IMAGE_REPOSITORY" MdbAgentImageRepoDefault = "quay.io/mongodb/mongodb-agent-ubi" @@ -74,12 +68,12 @@ func GetArchitecture(annotations map[string]string) DefaultArchitecture { // GetMongoVersionForAutomationConfig returns the required version with potentially the suffix -ent. // If we are in static containers architecture, we need the -ent suffix in case we are running the ea image. // If not, the agent will try to change the version to reflect the non-enterprise image. -func GetMongoVersionForAutomationConfig(mongoDBImage, version string, forceEnterprise bool, architecture DefaultArchitecture) string { +func GetMongoVersionForAutomationConfig(mongoDBImage, version string, architecture DefaultArchitecture) string { if architecture != Static { return version } // the image repo should be either mongodb / mongodb-enterprise-server or mongodb / mongodb-community-server - if strings.Contains(mongoDBImage, util.OfficialEnterpriseServerImageUrl) || forceEnterprise { + if strings.Contains(mongoDBImage, util.OfficialEnterpriseServerImageUrl) { if !strings.HasSuffix(version, "-ent") { version = version + "-ent" } diff --git a/pkg/util/architectures/static_test.go b/pkg/util/architectures/static_test.go index 2f11a0015..bd91fafad 100644 --- a/pkg/util/architectures/static_test.go +++ b/pkg/util/architectures/static_test.go @@ -71,49 +71,37 @@ func TestIsRunningStaticArchitecture(t *testing.T) { func TestGetMongoVersion(t *testing.T) { tests := []struct { - name string - mongoDBImage string - version string - forceEnterprise bool - architecture DefaultArchitecture - want string + name string + mongoDBImage string + version string + architecture DefaultArchitecture + want string }{ { - name: "nothing", - mongoDBImage: "", - version: "8.0.0", - forceEnterprise: false, - architecture: NonStatic, - want: "8.0.0", + name: "nothing", + mongoDBImage: "", + version: "8.0.0", + architecture: NonStatic, + want: "8.0.0", }, { - name: "enterprise repo", - mongoDBImage: "quay.io/mongodb/mongodb-enterprise-server", - version: "8.0.0", - forceEnterprise: false, - architecture: Static, - want: "8.0.0-ent", + name: "enterprise repo", + mongoDBImage: "quay.io/mongodb/mongodb-enterprise-server", + version: "8.0.0", + architecture: Static, + want: "8.0.0-ent", }, { - name: "community repo", - mongoDBImage: "quay.io/mongodb/mongodb-community-server", - version: "8.0.0", - forceEnterprise: false, - architecture: NonStatic, - want: "8.0.0", - }, - { - name: "enterprise repo forced", - mongoDBImage: "quay.io/mongodb/mongodb-private-server", - version: "8.0.0", - forceEnterprise: true, - architecture: Static, - want: "8.0.0-ent", + name: "community repo", + mongoDBImage: "quay.io/mongodb/mongodb-community-server", + version: "8.0.0", + architecture: NonStatic, + want: "8.0.0", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := GetMongoVersionForAutomationConfig(tt.mongoDBImage, tt.version, tt.forceEnterprise, tt.architecture); got != tt.want { + if got := GetMongoVersionForAutomationConfig(tt.mongoDBImage, tt.version, tt.architecture); got != tt.want { t.Errorf("GetMongoVersionForAutomationConfig() = %v, want %v", got, tt.want) } })