diff --git a/.evergreen.yml b/.evergreen.yml index 209bf152a..fe222b34a 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -462,9 +462,6 @@ tasks: skip_tags: ubuntu,release - name: build_agent_images_ubi - depends_on: - - name: build_init_database_image_ubi - variant: init_test_run commands: - func: clone - func: setup_building_host diff --git a/changelog/20250806_fix_changing_container_setup_of_static_architecture.md b/changelog/20250806_fix_changing_container_setup_of_static_architecture.md new file mode 100644 index 000000000..928a9799b --- /dev/null +++ b/changelog/20250806_fix_changing_container_setup_of_static_architecture.md @@ -0,0 +1,8 @@ +--- +title: Changing container setup of static architecture +kind: fix +date: 2025-08-06 +--- + +* This change fixes the current complex and difficult-to-maintain architecture for stateful set containers, which relies on an "agent matrix" to map operator and agent versions which led to a sheer amount of images. +* We solve this by shifting to a 3-container setup. This new design eliminates the need for the operator-version/agent-version matrix by adding one additional container containing all required binaries. This architecture maps to what we already do with the mongodb-database container. diff --git a/controllers/operator/common_controller.go b/controllers/operator/common_controller.go index 3fb753d47..bf9cf2ec0 100644 --- a/controllers/operator/common_controller.go +++ b/controllers/operator/common_controller.go @@ -47,7 +47,6 @@ import ( "github.com/mongodb/mongodb-kubernetes/pkg/util/architectures" "github.com/mongodb/mongodb-kubernetes/pkg/util/env" "github.com/mongodb/mongodb-kubernetes/pkg/util/stringutil" - "github.com/mongodb/mongodb-kubernetes/pkg/util/versionutil" "github.com/mongodb/mongodb-kubernetes/pkg/vault" ) @@ -684,9 +683,7 @@ func (r *ReconcileCommonController) getAgentVersion(conn om.Connection, omVersio return "", err } else { log.Debugf("Using agent version %s", agentVersion) - currentOperatorVersion := versionutil.StaticContainersOperatorVersion() - log.Debugf("Using Operator version: %s", currentOperatorVersion) - return agentVersion + "_" + currentOperatorVersion, nil + return agentVersion, nil } } diff --git a/controllers/operator/construct/appdb_construction.go b/controllers/operator/construct/appdb_construction.go index 6056e25af..d6c296db0 100644 --- a/controllers/operator/construct/appdb_construction.go +++ b/controllers/operator/construct/appdb_construction.go @@ -122,6 +122,8 @@ func appDbPodSpec(initContainerImage string, om om.MongoDBOpsManager) podtemplat construct.AgentName, container.WithResourceRequirements(buildRequirementsFromPodSpec(*appdbPodSpec)), ) + scriptsVolumeMount := statefulset.CreateVolumeMount("agent-scripts", "/opt/scripts", statefulset.WithReadOnly(false)) + hooksVolumeMount := statefulset.CreateVolumeMount("hooks", "/hooks", statefulset.WithReadOnly(false)) initUpdateFunc := podtemplatespec.NOOP() if !architectures.IsRunningStaticArchitecture(om.Annotations) { @@ -130,8 +132,6 @@ func appDbPodSpec(initContainerImage string, om om.MongoDBOpsManager) podtemplat // volumes of different containers. initUpdateFunc = func(templateSpec *corev1.PodTemplateSpec) { templateSpec.Spec.InitContainers = []corev1.Container{} - scriptsVolumeMount := statefulset.CreateVolumeMount("agent-scripts", "/opt/scripts", statefulset.WithReadOnly(false)) - hooksVolumeMount := statefulset.CreateVolumeMount("hooks", "/hooks", statefulset.WithReadOnly(false)) podtemplatespec.WithInitContainer(InitAppDbContainerName, buildAppDBInitContainer(initContainerImage, []corev1.VolumeMount{scriptsVolumeMount, hooksVolumeMount}))(templateSpec) } } @@ -233,6 +233,12 @@ func CAConfigMapName(appDb om.AppDBSpec, log *zap.SugaredLogger) string { // and volumemounts for TLS. func tlsVolumes(appDb om.AppDBSpec, podVars *env.PodEnvVars, log *zap.SugaredLogger) podtemplatespec.Modification { volumesToAdd, volumeMounts := getTLSVolumesAndVolumeMounts(appDb, podVars, log) + + // Add agent API key volume mount if not using vault and monitoring is enabled + if !vault.IsVaultSecretBackend() && ShouldEnableMonitoring(podVars) { + volumeMounts = append(volumeMounts, statefulset.CreateVolumeMount(AgentAPIKeyVolumeName, AgentAPIKeySecretPath)) + } + volumesFunc := func(spec *corev1.PodTemplateSpec) { for _, v := range volumesToAdd { podtemplatespec.WithVolume(v)(spec) @@ -380,7 +386,7 @@ func AppDbStatefulSet(opsManager om.MongoDBOpsManager, podVars *env.PodEnvVars, externalDomain := appDb.GetExternalDomainForMemberCluster(scaler.MemberClusterName()) if ShouldEnableMonitoring(podVars) { - monitoringModification = addMonitoringContainer(*appDb, *podVars, opts, externalDomain, log) + monitoringModification = addMonitoringContainer(*appDb, *podVars, opts, externalDomain, architectures.IsRunningStaticArchitecture(opsManager.Annotations), log) } else { // Otherwise, let's remove for now every podTemplateSpec related to monitoring // We will apply them when enabling monitoring @@ -390,7 +396,7 @@ func AppDbStatefulSet(opsManager om.MongoDBOpsManager, podVars *env.PodEnvVars, } // We copy the Automation Agent command from community and add the agent startup parameters - automationAgentCommand := construct.AutomationAgentCommand(true, opsManager.Spec.AppDB.GetAgentLogLevel(), opsManager.Spec.AppDB.GetAgentLogFile(), opsManager.Spec.AppDB.GetAgentMaxLogFileDurationHours()) + automationAgentCommand := construct.AutomationAgentCommand(architectures.IsRunningStaticArchitecture(opsManager.Annotations), true, opsManager.Spec.AppDB.GetAgentLogLevel(), opsManager.Spec.AppDB.GetAgentLogFile(), opsManager.Spec.AppDB.GetAgentMaxLogFileDurationHours()) idx := len(automationAgentCommand) - 1 automationAgentCommand[idx] += appDb.AutomationAgent.StartupParameters.ToCommandLineArgs() @@ -403,13 +409,10 @@ func AppDbStatefulSet(opsManager om.MongoDBOpsManager, podVars *env.PodEnvVars, MountPath: "/var/lib/automation/config/acVersion", } - // Here we ask to craete init containers which also creates required volumens. + // Here we ask to create init containers which also creates required volumes. // Note that we provide empty images for init containers. They are not important - // at this stage beucase later we will define our own init containers for non-static architecture. - mod := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&opsManager.Spec.AppDB, scaler, opts.MongodbImage, opts.AgentImage, "", "", true) - if architectures.IsRunningStaticArchitecture(opsManager.Annotations) { - mod = construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&opsManager.Spec.AppDB, scaler, opts.MongodbImage, opts.AgentImage, "", "", false) - } + // at this stage because later we will define our own init containers for non-static architecture. + mod := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&opsManager.Spec.AppDB, scaler, opts.MongodbImage, opts.AgentImage, "", "", !architectures.IsRunningStaticArchitecture(opsManager.Annotations), opts.InitAppDBImage) sts := statefulset.New( mod, @@ -493,7 +496,7 @@ func getVolumeMountIndexByName(mounts []corev1.VolumeMount, name string) int { // addMonitoringContainer returns a podtemplatespec modification that adds the monitoring container to the AppDB Statefulset. // Note that this replicates some code from the functions that do this for the base AppDB Statefulset. After many iterations // this was deemed to be an acceptable compromise to make code clearer and more maintainable. -func addMonitoringContainer(appDB om.AppDBSpec, podVars env.PodEnvVars, opts AppDBStatefulSetOptions, externalDomain *string, log *zap.SugaredLogger) podtemplatespec.Modification { +func addMonitoringContainer(appDB om.AppDBSpec, podVars env.PodEnvVars, opts AppDBStatefulSetOptions, externalDomain *string, isStatic bool, log *zap.SugaredLogger) podtemplatespec.Modification { var monitoringAcVolume corev1.Volume var monitoringACFunc podtemplatespec.Modification @@ -516,7 +519,7 @@ func addMonitoringContainer(appDB om.AppDBSpec, podVars env.PodEnvVars, opts App } // Construct the command by concatenating: // 1. The base command - from community - command := construct.MongodbUserCommandWithAPIKeyExport + command := construct.GetMongodbUserCommandWithAPIKeyExport(isStatic) command += "agent/mongodb-agent" command += " -healthCheckFilePath=" + monitoringAgentHealthStatusFilePathValue command += " -serveStatusPort=5001" diff --git a/controllers/operator/construct/construction_test.go b/controllers/operator/construct/construction_test.go index 6108b1c8e..c5dd99979 100644 --- a/controllers/operator/construct/construction_test.go +++ b/controllers/operator/construct/construction_test.go @@ -28,20 +28,20 @@ func TestBuildStatefulSet_PersistentFlagStatic(t *testing.T) { mdb := mdbv1.NewReplicaSetBuilder().SetPersistent(nil).Build() set := DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) assert.Len(t, set.Spec.VolumeClaimTemplates, 1) - assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 7) + assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 8) assert.Len(t, set.Spec.Template.Spec.Containers[1].VolumeMounts, 7) mdb = mdbv1.NewReplicaSetBuilder().SetPersistent(util.BooleanRef(true)).Build() set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) assert.Len(t, set.Spec.VolumeClaimTemplates, 1) - assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 7) + assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 8) assert.Len(t, set.Spec.Template.Spec.Containers[1].VolumeMounts, 7) // If no persistence is set then we still mount init scripts mdb = mdbv1.NewReplicaSetBuilder().SetPersistent(util.BooleanRef(false)).Build() set = DatabaseStatefulSet(*mdb, ReplicaSetOptions(GetPodEnvOptions()), zap.S()) assert.Len(t, set.Spec.VolumeClaimTemplates, 0) - assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 7) + assert.Len(t, set.Spec.Template.Spec.Containers[0].VolumeMounts, 8) assert.Len(t, set.Spec.Template.Spec.Containers[1].VolumeMounts, 7) } @@ -111,6 +111,7 @@ func TestBuildStatefulSet_PersistentVolumeClaimSingleStatic(t *testing.T) { {Name: util.PvcNameData, MountPath: util.PvcMountPathData, SubPath: util.PvcNameData}, {Name: util.PvcNameData, MountPath: util.PvcMountPathJournal, SubPath: util.PvcNameJournal}, {Name: util.PvcNameData, MountPath: util.PvcMountPathLogs, SubPath: util.PvcNameLogs}, + {Name: PvcNameDatabaseScripts, MountPath: PvcMountPathScripts, ReadOnly: false}, }) } diff --git a/controllers/operator/construct/database_construction.go b/controllers/operator/construct/database_construction.go index 7194c643c..d4ae74ab7 100644 --- a/controllers/operator/construct/database_construction.go +++ b/controllers/operator/construct/database_construction.go @@ -105,10 +105,10 @@ type DatabaseStatefulSetOptions struct { StsType StsType AdditionalMongodConfig *mdbv1.AdditionalMongodConfig - InitDatabaseNonStaticImage string - DatabaseNonStaticImage string - MongodbImage string - AgentImage string + InitDatabaseImage string + DatabaseNonStaticImage string + MongodbImage string + AgentImage string Annotations map[string]string VaultConfig vault.VaultConfiguration @@ -366,7 +366,8 @@ func DatabaseStatefulSetHelper(mdb databaseStatefulSetSource, stsOpts *DatabaseS stsOpts.ExtraEnvs = extraEnvs templateFunc := buildMongoDBPodTemplateSpec(*stsOpts, mdb) - return statefulset.New(buildDatabaseStatefulSetConfigurationFunction(mdb, templateFunc, *stsOpts, log)) + sts := statefulset.New(buildDatabaseStatefulSetConfigurationFunction(mdb, templateFunc, *stsOpts, log)) + return sts } // buildVaultDatabaseSecretsToInject fully constructs the DatabaseSecretsToInject required to @@ -487,25 +488,44 @@ func buildDatabaseStatefulSetConfigurationFunction(mdb databaseStatefulSetSource shareProcessNs := statefulset.NOOP() secondContainerModification := podtemplatespec.NOOP() + var databaseImage string + var staticMods []podtemplatespec.Modification if architectures.IsRunningStaticArchitecture(mdb.GetAnnotations()) { shareProcessNs = func(sts *appsv1.StatefulSet) { - a := true - sts.Spec.Template.Spec.ShareProcessNamespace = &a + sts.Spec.Template.Spec.ShareProcessNamespace = ptr.To(true) } - secondContainerModification = podtemplatespec.WithContainerByIndex(1, container.WithVolumeMounts(volumeMounts)) - } - - var databaseImage string - if architectures.IsRunningStaticArchitecture(mdb.GetAnnotations()) { + // Add volume mounts to all containers in static architecture + // This runs after all containers have been added to the spec + staticMods = append(staticMods, func(spec *corev1.PodTemplateSpec) { + for i := range spec.Spec.Containers { + container.WithVolumeMounts(volumeMounts)(&spec.Spec.Containers[i]) + } + }) databaseImage = opts.AgentImage } else { databaseImage = opts.DatabaseNonStaticImage } + podTemplateModifications := []podtemplatespec.Modification{ + podTemplateAnnotationFunc, + podtemplatespec.WithAffinity(podAffinity, PodAntiAffinityLabelKey, 100), + podtemplatespec.WithTerminationGracePeriodSeconds(util.DefaultPodTerminationPeriodSeconds), + podtemplatespec.WithPodLabels(podLabels), + podtemplatespec.WithContainerByIndex(0, sharedDatabaseContainerFunc(databaseImage, *opts.PodSpec, volumeMounts, configureContainerSecurityContext, opts.ServicePort)), + secondContainerModification, + volumesFunc, + configurePodSpecSecurityContext, + configureImagePullSecrets, + podTemplateSpecFunc, + } + podTemplateModifications = append(podTemplateModifications, staticMods...) + return statefulset.Apply( + // StatefulSet metadata statefulset.WithLabels(ssLabels), statefulset.WithName(stsName), statefulset.WithNamespace(mdb.GetNamespace()), + // StatefulSet spec statefulset.WithMatchLabels(podLabels), statefulset.WithServiceName(opts.ServiceName), statefulset.WithReplicas(opts.Replicas), @@ -513,18 +533,7 @@ func buildDatabaseStatefulSetConfigurationFunction(mdb databaseStatefulSetSource annotationFunc, volumeClaimFuncs, shareProcessNs, - statefulset.WithPodSpecTemplate(podtemplatespec.Apply( - podTemplateAnnotationFunc, - podtemplatespec.WithAffinity(podAffinity, PodAntiAffinityLabelKey, 100), - podtemplatespec.WithTerminationGracePeriodSeconds(util.DefaultPodTerminationPeriodSeconds), - podtemplatespec.WithPodLabels(podLabels), - podtemplatespec.WithContainerByIndex(0, sharedDatabaseContainerFunc(databaseImage, *opts.PodSpec, volumeMounts, configureContainerSecurityContext, opts.ServicePort)), - secondContainerModification, - volumesFunc, - configurePodSpecSecurityContext, - configureImagePullSecrets, - podTemplateSpecFunc, - )), + statefulset.WithPodSpecTemplate(podtemplatespec.Apply(podTemplateModifications...)), ) } @@ -666,6 +675,88 @@ func getVolumesAndVolumeMounts(mdb databaseStatefulSetSource, databaseOpts Datab // buildMongoDBPodTemplateSpec constructs the podTemplateSpec for the MongoDB resource func buildMongoDBPodTemplateSpec(opts DatabaseStatefulSetOptions, mdb databaseStatefulSetSource) podtemplatespec.Modification { + var modifications podtemplatespec.Modification + if architectures.IsRunningStaticArchitecture(mdb.GetAnnotations()) { + modifications = buildStaticArchitecturePodTemplateSpec(opts, mdb) + } else { + modifications = buildNonStaticArchitecturePodTemplateSpec(opts, mdb) + } + sharedModifications := sharedDatabaseConfiguration(opts) + return podtemplatespec.Apply(sharedModifications, modifications) +} + +// buildStaticArchitecturePodTemplateSpec constructs the podTemplateSpec for static architecture +func buildStaticArchitecturePodTemplateSpec(opts DatabaseStatefulSetOptions, mdb databaseStatefulSetSource) podtemplatespec.Modification { + // scripts volume is needed for agent-launcher-shim.sh to copy scripts + scriptsVolume := statefulset.CreateVolumeFromEmptyDir("database-scripts") + databaseScriptsVolumeMount := databaseScriptsVolumeMount(false) // writable for shim script + + volumes := []corev1.Volume{scriptsVolume} + volumeMounts := []corev1.VolumeMount{databaseScriptsVolumeMount} + + _, configureContainerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() + + agentContainerModifications := []func(*corev1.Container){container.Apply( + container.WithName(util.AgentContainerName), + container.WithImage(opts.AgentImage), + container.WithEnvs(databaseEnvVars(opts)...), + container.WithArgs([]string{}), + container.WithImagePullPolicy(corev1.PullPolicy(env.ReadOrPanic(util.AutomationAgentImagePullPolicy))), // nolint:forbidigo + container.WithLivenessProbe(DatabaseLivenessProbe()), + container.WithEnvs(startupParametersToAgentFlag(opts.AgentConfig.StartupParameters)), + container.WithEnvs(logConfigurationToEnvVars(opts.AgentConfig.StartupParameters, opts.AdditionalMongodConfig)...), + container.WithEnvs(staticContainersEnvVars(mdb)...), + container.WithEnvs(readinessEnvironmentVariablesToEnvVars(opts.AgentConfig.ReadinessProbe.EnvironmentVariables)...), + container.WithCommand([]string{"/usr/local/bin/agent-launcher-shim.sh"}), + container.WithVolumeMounts(volumeMounts), + configureContainerSecurityContext, + )} + + mongodContainerModifications := []func(*corev1.Container){container.Apply( + container.WithName(util.DatabaseContainerName), + container.WithResourceRequirements(buildRequirementsFromPodSpec(*opts.PodSpec)), + container.WithImage(opts.MongodbImage), + container.WithEnvs(databaseEnvVars(opts)...), + container.WithCommand([]string{"bash", "-c", "tail -F -n0 ${MDB_LOG_FILE_MONGODB} mongodb_marker"}), + configureContainerSecurityContext, + )} + + agentUtilitiesHolderModifications := []func(*corev1.Container){container.Apply( + container.WithName(util.AgentContainerUtilitiesName), + container.WithArgs([]string{""}), + container.WithImage(opts.InitDatabaseImage), + container.WithEnvs(databaseEnvVars(opts)...), + container.WithCommand([]string{"bash", "-c", "touch /tmp/agent-utilities-holder_marker && tail -F -n0 /tmp/agent-utilities-holder_marker"}), + configureContainerSecurityContext, + )} + + if opts.HostNameOverrideConfigmapName != "" { + volumes = append(volumes, statefulset.CreateVolumeFromConfigMap(opts.HostNameOverrideConfigmapName, opts.HostNameOverrideConfigmapName)) + hostnameOverrideModification := container.WithVolumeMounts([]corev1.VolumeMount{ + { + Name: opts.HostNameOverrideConfigmapName, + MountPath: "/opt/scripts/config", + }, + }) + agentContainerModifications = append(agentContainerModifications, hostnameOverrideModification) + mongodContainerModifications = append(mongodContainerModifications, hostnameOverrideModification) + agentUtilitiesHolderModifications = append(agentUtilitiesHolderModifications, hostnameOverrideModification) + } + + mods := []podtemplatespec.Modification{ + podtemplatespec.WithServiceAccount(util.MongoDBServiceAccount), + podtemplatespec.WithServiceAccount(getServiceAccountName(opts)), + podtemplatespec.WithVolumes(volumes), + podtemplatespec.WithContainerByIndex(0, agentContainerModifications...), + podtemplatespec.WithContainerByIndex(1, mongodContainerModifications...), + podtemplatespec.WithContainerByIndex(2, agentUtilitiesHolderModifications...), + } + + return podtemplatespec.Apply(mods...) +} + +// buildNonStaticArchitecturePodTemplateSpec constructs the podTemplateSpec for non-static architecture +func buildNonStaticArchitecturePodTemplateSpec(opts DatabaseStatefulSetOptions, mdb databaseStatefulSetSource) podtemplatespec.Modification { // scripts volume is shared by the init container and the AppDB, so the startup // script can be copied over scriptsVolume := statefulset.CreateVolumeFromEmptyDir("database-scripts") @@ -674,71 +765,41 @@ func buildMongoDBPodTemplateSpec(opts DatabaseStatefulSetOptions, mdb databaseSt volumes := []corev1.Volume{scriptsVolume} volumeMounts := []corev1.VolumeMount{databaseScriptsVolumeMount} - initContainerModifications := []func(*corev1.Container){buildDatabaseInitContainer(opts.InitDatabaseNonStaticImage)} + initContainerModifications := []func(*corev1.Container){buildDatabaseInitContainer(opts.InitDatabaseImage)} + databaseContainerModifications := []func(*corev1.Container){container.Apply( container.WithName(util.DatabaseContainerName), container.WithImage(opts.DatabaseNonStaticImage), container.WithEnvs(databaseEnvVars(opts)...), container.WithCommand([]string{"/opt/scripts/agent-launcher.sh"}), container.WithVolumeMounts(volumeMounts), + container.WithImagePullPolicy(corev1.PullPolicy(env.ReadOrPanic(util.AutomationAgentImagePullPolicy))), // nolint:forbidigo + container.WithLivenessProbe(DatabaseLivenessProbe()), + container.WithEnvs(startupParametersToAgentFlag(opts.AgentConfig.StartupParameters)), + container.WithEnvs(logConfigurationToEnvVars(opts.AgentConfig.StartupParameters, opts.AdditionalMongodConfig)...), + container.WithEnvs(staticContainersEnvVars(mdb)...), + container.WithEnvs(readinessEnvironmentVariablesToEnvVars(opts.AgentConfig.ReadinessProbe.EnvironmentVariables)...), )} - _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() - - staticContainerMongodContainerModification := podtemplatespec.NOOP() - if architectures.IsRunningStaticArchitecture(mdb.GetAnnotations()) { - // we don't use initContainers therefore, we reset it here - initContainerModifications = []func(*corev1.Container){} - mongodModification := []func(*corev1.Container){container.Apply( - container.WithName(util.DatabaseContainerName), - container.WithArgs([]string{""}), - container.WithImage(opts.MongodbImage), - container.WithEnvs(databaseEnvVars(opts)...), - container.WithCommand([]string{"bash", "-c", "tail -F -n0 ${MDB_LOG_FILE_MONGODB} mongodb_marker"}), - containerSecurityContext, - )} - staticContainerMongodContainerModification = podtemplatespec.WithContainerByIndex(1, mongodModification...) - - // We are not setting the database-scripts volume on purpose, - // since we don't need to copy things from the init container over. - databaseContainerModifications = []func(*corev1.Container){container.Apply( - container.WithName(util.AgentContainerName), - container.WithImage(opts.AgentImage), - container.WithEnvs(databaseEnvVars(opts)...), - containerSecurityContext, - )} - } - if opts.HostNameOverrideConfigmapName != "" { volumes = append(volumes, statefulset.CreateVolumeFromConfigMap(opts.HostNameOverrideConfigmapName, opts.HostNameOverrideConfigmapName)) - modification := container.WithVolumeMounts([]corev1.VolumeMount{ + hostnameOverrideModification := container.WithVolumeMounts([]corev1.VolumeMount{ { Name: opts.HostNameOverrideConfigmapName, MountPath: "/opt/scripts/config", }, }) - - // we only need to add the volume modification if we actually use an init container - if len(initContainerModifications) > 0 { - initContainerModifications = append(initContainerModifications, modification) - } - - databaseContainerModifications = append(databaseContainerModifications, modification) + initContainerModifications = append(initContainerModifications, hostnameOverrideModification) + databaseContainerModifications = append(databaseContainerModifications, hostnameOverrideModification) } - serviceAccountName := getServiceAccountName(opts) - mods := []podtemplatespec.Modification{ - sharedDatabaseConfiguration(opts, mdb), + sharedDatabaseConfiguration(opts), podtemplatespec.WithServiceAccount(util.MongoDBServiceAccount), - podtemplatespec.WithServiceAccount(serviceAccountName), + podtemplatespec.WithServiceAccount(getServiceAccountName(opts)), podtemplatespec.WithVolumes(volumes), podtemplatespec.WithContainerByIndex(0, databaseContainerModifications...), - staticContainerMongodContainerModification, - } - - if len(initContainerModifications) > 0 { - mods = append(mods, podtemplatespec.WithInitContainerByIndex(0, initContainerModifications...)) + podtemplatespec.WithInitContainerByIndex(0, initContainerModifications...), } return podtemplatespec.Apply(mods...) @@ -761,56 +822,14 @@ func getServiceAccountName(opts DatabaseStatefulSetOptions) string { // sharedDatabaseConfiguration is a function which applies all the shared configuration // between the appDb and MongoDB resources -func sharedDatabaseConfiguration(opts DatabaseStatefulSetOptions, mdb databaseStatefulSetSource) podtemplatespec.Modification { - configurePodSpecSecurityContext, configureContainerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() +func sharedDatabaseConfiguration(opts DatabaseStatefulSetOptions) podtemplatespec.Modification { + configurePodSpecSecurityContext, _ := podtemplatespec.WithDefaultSecurityContextsModifications() pullSecretsConfigurationFunc := podtemplatespec.NOOP() if pullSecrets, ok := env.Read(util.ImagePullSecrets); ok { // nolint:forbidigo pullSecretsConfigurationFunc = podtemplatespec.WithImagePullSecrets(pullSecrets) } - agentModification := podtemplatespec.WithContainerByIndex(0, - container.Apply( - container.WithResourceRequirements(buildRequirementsFromPodSpec(*opts.PodSpec)), - container.WithPorts([]corev1.ContainerPort{{ContainerPort: opts.ServicePort}}), - container.WithImagePullPolicy(corev1.PullPolicy(env.ReadOrPanic(util.AutomationAgentImagePullPolicy))), // nolint:forbidigo - container.WithLivenessProbe(DatabaseLivenessProbe()), - container.WithEnvs(startupParametersToAgentFlag(opts.AgentConfig.StartupParameters)), - container.WithEnvs(logConfigurationToEnvVars(opts.AgentConfig.StartupParameters, opts.AdditionalMongodConfig)...), - container.WithEnvs(readinessEnvironmentVariablesToEnvVars(opts.AgentConfig.ReadinessProbe.EnvironmentVariables)...), - configureContainerSecurityContext, - ), - ) - - staticMongodModification := podtemplatespec.NOOP() - if architectures.IsRunningStaticArchitecture(mdb.GetAnnotations()) { - // The mongod - staticMongodModification = podtemplatespec.WithContainerByIndex(1, - container.Apply( - container.WithArgs([]string{"tail -F -n0 \"${MDB_LOG_FILE_MONGODB}\""}), - container.WithResourceRequirements(buildRequirementsFromPodSpec(*opts.PodSpec)), - container.WithPorts([]corev1.ContainerPort{{ContainerPort: opts.ServicePort}}), - container.WithImagePullPolicy(corev1.PullPolicy(env.ReadOrPanic(util.AutomationAgentImagePullPolicy))), // nolint:forbidigo - container.WithEnvs(startupParametersToAgentFlag(opts.AgentConfig.StartupParameters)), - container.WithEnvs(logConfigurationToEnvVars(opts.AgentConfig.StartupParameters, opts.AdditionalMongodConfig)...), - configureContainerSecurityContext, - ), - ) - agentModification = podtemplatespec.WithContainerByIndex(0, - container.Apply( - container.WithImagePullPolicy(corev1.PullPolicy(env.ReadOrPanic(util.AutomationAgentImagePullPolicy))), // nolint:forbidigo - container.WithLivenessProbe(DatabaseLivenessProbe()), - container.WithEnvs(startupParametersToAgentFlag(opts.AgentConfig.StartupParameters)), - container.WithEnvs(logConfigurationToEnvVars(opts.AgentConfig.StartupParameters, opts.AdditionalMongodConfig)...), - container.WithEnvs(staticContainersEnvVars(mdb)...), - container.WithEnvs(readinessEnvironmentVariablesToEnvVars(opts.AgentConfig.ReadinessProbe.EnvironmentVariables)...), - container.WithArgs([]string{}), - container.WithCommand([]string{"/opt/scripts/agent-launcher.sh"}), - configureContainerSecurityContext, - ), - ) - } - return podtemplatespec.Apply( podtemplatespec.WithPodLabels(defaultPodLabels(opts.ServiceName, opts.Name)), podtemplatespec.WithTerminationGracePeriodSeconds(util.DefaultPodTerminationPeriodSeconds), @@ -818,10 +837,6 @@ func sharedDatabaseConfiguration(opts DatabaseStatefulSetOptions, mdb databaseSt configurePodSpecSecurityContext, podtemplatespec.WithAffinity(opts.Name, PodAntiAffinityLabelKey, 100), podtemplatespec.WithTopologyKey(opts.PodSpec.GetTopologyKeyOrDefault(), 0), - // The Agent - agentModification, - // AgentLoggingMongodConfig if static container - staticMongodModification, ) } diff --git a/controllers/operator/database_statefulset_options.go b/controllers/operator/database_statefulset_options.go index 0526cb865..d60bb1747 100644 --- a/controllers/operator/database_statefulset_options.go +++ b/controllers/operator/database_statefulset_options.go @@ -100,10 +100,10 @@ func WithDefaultConfigSrvStorageSize() func(options *construct.DatabaseStatefulS } } -// WithInitDatabaseNonStaticImage sets the InitDatabaseNonStaticImage field. +// WithInitDatabaseNonStaticImage sets the InitDatabaseImage field. func WithInitDatabaseNonStaticImage(image string) func(*construct.DatabaseStatefulSetOptions) { return func(opts *construct.DatabaseStatefulSetOptions) { - opts.InitDatabaseNonStaticImage = image + opts.InitDatabaseImage = image } } diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 806ace443..21a6b1c99 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -163,11 +163,10 @@ func TestMultiReplicaSetClusterReconcileContainerImagesWithStaticArchitecture(t require.NoError(t, err) assert.Len(t, sts.Spec.Template.Spec.InitContainers, 0) - require.Len(t, sts.Spec.Template.Spec.Containers, 2) + require.Len(t, sts.Spec.Template.Spec.Containers, 3) - // Version from OM + operator version - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1_9.9.9-test", sts.Spec.Template.Spec.Containers[0].Image) - assert.Equal(t, "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", sts.Spec.Template.Spec.Containers[1].Image) + // Version from OM + VerifyStaticContainers(t, sts.Spec.Template.Spec.Containers) }) } } diff --git a/controllers/operator/mongodbopsmanager_controller_test.go b/controllers/operator/mongodbopsmanager_controller_test.go index 83b26b4df..07291dabb 100644 --- a/controllers/operator/mongodbopsmanager_controller_test.go +++ b/controllers/operator/mongodbopsmanager_controller_test.go @@ -589,13 +589,10 @@ func TestOpsManagerReconcileContainerImagesWithStaticArchitecture(t *testing.T) require.NoError(t, err) require.Len(t, appDBSts.Spec.Template.Spec.InitContainers, 0) - require.Len(t, appDBSts.Spec.Template.Spec.Containers, 3) - - // Version from the mapping file (agent version + operator version) - assert.Contains(t, appDBSts.Spec.Template.Spec.Containers[0].Image, "-1_9.9.9-test") + require.Len(t, appDBSts.Spec.Template.Spec.Containers, 4) assert.Equal(t, "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi@sha256:MONGODB_SHA", appDBSts.Spec.Template.Spec.Containers[1].Image) // In static architecture this container is a copy of agent container - assert.Equal(t, appDBSts.Spec.Template.Spec.Containers[0].Image, appDBSts.Spec.Template.Spec.Containers[2].Image) + assert.Equal(t, appDBSts.Spec.Template.Spec.Containers[0].Image, appDBSts.Spec.Template.Spec.Containers[3].Image) } func TestOpsManagerConnectionString_IsPassedAsSecretRef(t *testing.T) { diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index 4c9abb64c..64c3f724c 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -148,11 +148,29 @@ func TestReplicaSetClusterReconcileContainerImagesWithStaticArchitecture(t *test assert.NoError(t, err) assert.Len(t, sts.Spec.Template.Spec.InitContainers, 0) - require.Len(t, sts.Spec.Template.Spec.Containers, 2) + require.Len(t, sts.Spec.Template.Spec.Containers, 3) - // Version from OM + operator version - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1_9.9.9-test", sts.Spec.Template.Spec.Containers[0].Image) - assert.Equal(t, "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", sts.Spec.Template.Spec.Containers[1].Image) + // Version from OM + VerifyStaticContainers(t, sts.Spec.Template.Spec.Containers) +} + +func VerifyStaticContainers(t *testing.T, containers []corev1.Container) { + agentContainerImage := findContainerImage(containers, util.AgentContainerName) + require.NotNil(t, agentContainerImage, "Agent container not found") + assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1", agentContainerImage) + + mongoContainerImage := findContainerImage(containers, util.DatabaseContainerName) + require.NotNil(t, mongoContainerImage, "MongoDB container not found") + assert.Equal(t, "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", mongoContainerImage) +} + +func findContainerImage(containers []corev1.Container, containerName string) string { + for _, container := range containers { + if container.Name == containerName { + return container.Image + } + } + return "" } func buildReplicaSetWithCustomProjectName(rsName string) (*mdbv1.MongoDB, *corev1.ConfigMap, string) { @@ -538,9 +556,9 @@ func TestReplicaSetCustomPodSpecTemplateStatic(t *testing.T) { assertPodSpecSts(t, &statefulSet, podSpec.NodeName, podSpec.Hostname, podSpec.RestartPolicy) podSpecTemplate := statefulSet.Spec.Template.Spec - assert.Len(t, podSpecTemplate.Containers, 3, "Should have 3 containers now") - assert.Equal(t, util.AgentContainerName, podSpecTemplate.Containers[0].Name, "Database container should always be first") - assert.Equal(t, "my-custom-container", podSpecTemplate.Containers[2].Name, "Custom container should be second") + assert.Len(t, podSpecTemplate.Containers, 4, "Should have 4 containers now") + assert.Equal(t, util.AgentContainerName, podSpecTemplate.Containers[0].Name, "Agent container should be first alphabetically") + assert.Equal(t, "my-custom-container", podSpecTemplate.Containers[len(podSpecTemplate.Containers)-1].Name, "Custom container should be last") } func TestFeatureControlPolicyAndTagAddedWithNewerOpsManager(t *testing.T) { diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index 8404ebabb..3fc1547e4 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -319,11 +319,10 @@ func TestShardedClusterReconcileContainerImagesWithStaticArchitecture(t *testing assert.NoError(t, err) assert.Len(t, sts.Spec.Template.Spec.InitContainers, 0) - require.Len(t, sts.Spec.Template.Spec.Containers, 2) + require.Len(t, sts.Spec.Template.Spec.Containers, 3) - // Version from OM + operator version - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1_9.9.9-test", sts.Spec.Template.Spec.Containers[0].Image) - assert.Equal(t, "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", sts.Spec.Template.Spec.Containers[1].Image) + // Version from OM + VerifyStaticContainers(t, sts.Spec.Template.Spec.Containers) }) } } @@ -928,22 +927,22 @@ func TestShardedCustomPodSpecTemplate(t *testing.T) { assertPodSpecSts(t, &statefulSetScConfig, configSrvPodSpec.NodeName, configSrvPodSpec.Hostname, configSrvPodSpec.RestartPolicy) podSpecTemplateSc0 := statefulSetSc0.Spec.Template.Spec - assert.Len(t, podSpecTemplateSc0.Containers, 2, "Should have 2 containers now") + assert.Len(t, podSpecTemplateSc0.Containers, 2, "Should have 3 containers now") assert.Equal(t, util.DatabaseContainerName, podSpecTemplateSc0.Containers[0].Name, "Database container should always be first") assert.Equal(t, "my-custom-container-sc", podSpecTemplateSc0.Containers[1].Name, "Custom container should be second") podSpecTemplateSc1 := statefulSetSc1.Spec.Template.Spec - assert.Len(t, podSpecTemplateSc1.Containers, 2, "Should have 2 containers now") + assert.Len(t, podSpecTemplateSc1.Containers, 2, "Should have 3 containers now") assert.Equal(t, util.DatabaseContainerName, podSpecTemplateSc1.Containers[0].Name, "Database container should always be first") assert.Equal(t, "my-custom-container-sc", podSpecTemplateSc1.Containers[1].Name, "Custom container should be second") podSpecTemplateMongoS := statefulSetMongoS.Spec.Template.Spec - assert.Len(t, podSpecTemplateMongoS.Containers, 2, "Should have 2 containers now") + assert.Len(t, podSpecTemplateMongoS.Containers, 2, "Should have 3 containers now") assert.Equal(t, util.DatabaseContainerName, podSpecTemplateMongoS.Containers[0].Name, "Database container should always be first") assert.Equal(t, "my-custom-container-mongos", podSpecTemplateMongoS.Containers[1].Name, "Custom container should be second") podSpecTemplateScConfig := statefulSetScConfig.Spec.Template.Spec - assert.Len(t, podSpecTemplateScConfig.Containers, 2, "Should have 2 containers now") + assert.Len(t, podSpecTemplateScConfig.Containers, 2, "Should have 3 containers now") assert.Equal(t, util.DatabaseContainerName, podSpecTemplateScConfig.Containers[0].Name, "Database container should always be first") assert.Equal(t, "my-custom-container-config", podSpecTemplateScConfig.Containers[1].Name, "Custom container should be second") } @@ -1027,24 +1026,24 @@ func TestShardedCustomPodStaticSpecTemplate(t *testing.T) { assertPodSpecSts(t, &statefulSetScConfig, configSrvPodSpec.NodeName, configSrvPodSpec.Hostname, configSrvPodSpec.RestartPolicy) podSpecTemplateSc0 := statefulSetSc0.Spec.Template.Spec - assert.Len(t, podSpecTemplateSc0.Containers, 3, "Should have 2 containers now") - assert.Equal(t, util.AgentContainerName, podSpecTemplateSc0.Containers[0].Name, "Database container should always be first") - assert.Equal(t, "my-custom-container-sc", podSpecTemplateSc0.Containers[2].Name, "Custom container should be second") + assert.Len(t, podSpecTemplateSc0.Containers, 4, "Should have 4 containers (3 base + 1 custom)") + assert.Equal(t, util.AgentContainerName, podSpecTemplateSc0.Containers[0].Name, "Agent container should be first alphabetically") + assert.Equal(t, "my-custom-container-sc", podSpecTemplateSc0.Containers[len(podSpecTemplateSc0.Containers)-1].Name, "Custom container should be last") podSpecTemplateSc1 := statefulSetSc1.Spec.Template.Spec - assert.Len(t, podSpecTemplateSc1.Containers, 3, "Should have 2 containers now") - assert.Equal(t, util.AgentContainerName, podSpecTemplateSc1.Containers[0].Name, "Database container should always be first") - assert.Equal(t, "my-custom-container-sc", podSpecTemplateSc1.Containers[2].Name, "Custom container should be second") + assert.Len(t, podSpecTemplateSc1.Containers, 4, "Should have 4 containers (3 base + 1 custom)") + assert.Equal(t, util.AgentContainerName, podSpecTemplateSc1.Containers[0].Name, "Agent container should be first alphabetically") + assert.Equal(t, "my-custom-container-sc", podSpecTemplateSc1.Containers[len(podSpecTemplateSc1.Containers)-1].Name, "Custom container should be last") podSpecTemplateMongoS := statefulSetMongoS.Spec.Template.Spec - assert.Len(t, podSpecTemplateMongoS.Containers, 3, "Should have 2 containers now") - assert.Equal(t, util.AgentContainerName, podSpecTemplateMongoS.Containers[0].Name, "Database container should always be first") - assert.Equal(t, "my-custom-container-mongos", podSpecTemplateMongoS.Containers[2].Name, "Custom container should be second") + assert.Len(t, podSpecTemplateMongoS.Containers, 4, "Should have 4 containers (3 base + 1 custom)") + assert.Equal(t, util.AgentContainerName, podSpecTemplateMongoS.Containers[0].Name, "Agent container should be first alphabetically") + assert.Equal(t, "my-custom-container-mongos", podSpecTemplateMongoS.Containers[len(podSpecTemplateMongoS.Containers)-1].Name, "Custom container should be last") podSpecTemplateScConfig := statefulSetScConfig.Spec.Template.Spec - assert.Len(t, podSpecTemplateScConfig.Containers, 3, "Should have 2 containers now") - assert.Equal(t, util.AgentContainerName, podSpecTemplateScConfig.Containers[0].Name, "Database container should always be first") - assert.Equal(t, "my-custom-container-config", podSpecTemplateScConfig.Containers[2].Name, "Custom container should be second") + assert.Len(t, podSpecTemplateScConfig.Containers, 4, "Should have 4 containers (3 base + 1 custom)") + assert.Equal(t, util.AgentContainerName, podSpecTemplateScConfig.Containers[0].Name, "Agent container should be first alphabetically") + assert.Equal(t, "my-custom-container-config", podSpecTemplateScConfig.Containers[len(podSpecTemplateScConfig.Containers)-1].Name, "Custom container should be last") } func TestFeatureControlsNoAuth(t *testing.T) { diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index 1663b24bd..f6438ed68 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -128,11 +128,10 @@ func TestStandaloneClusterReconcileContainerImagesWithStaticArchitecture(t *test assert.NoError(t, err) assert.Len(t, sts.Spec.Template.Spec.InitContainers, 0) - require.Len(t, sts.Spec.Template.Spec.Containers, 2) + require.Len(t, sts.Spec.Template.Spec.Containers, 3) - // Version from OM + operator version - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1_9.9.9-test", sts.Spec.Template.Spec.Containers[0].Image) - assert.Equal(t, "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", sts.Spec.Template.Spec.Containers[1].Image) + // Version from OM + VerifyStaticContainers(t, sts.Spec.Template.Spec.Containers) } // TestOnAddStandaloneWithDelay checks the reconciliation on standalone creation with some "delay" in getting diff --git a/docker/mongodb-agent-non-matrix/Dockerfile b/docker/mongodb-agent-non-matrix/Dockerfile deleted file mode 100644 index dadfc7099..000000000 --- a/docker/mongodb-agent-non-matrix/Dockerfile +++ /dev/null @@ -1,75 +0,0 @@ -FROM scratch AS base - -ARG agent_version -ARG agent_distro -ARG tools_version -ARG tools_distro - -ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz -ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz - -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE - -FROM registry.access.redhat.com/ubi9/ubi-minimal - -ARG version - -LABEL name="MongoDB Agent" \ - version="${version}" \ - summary="MongoDB Agent" \ - description="MongoDB Agent" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" - -# Replace libcurl-minimal and curl-minimal with the full versions -# https://bugzilla.redhat.com/show_bug.cgi?id=1994521 -RUN microdnf install -y libssh libpsl libbrotli \ - && microdnf download curl libcurl \ - && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ - && microdnf remove -y libcurl-minimal curl-minimal - -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper -# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ -RUN microdnf install -y --disableplugin=subscription-manager \ - cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs openldap openssl xz-libs -# Dependencies for the Agent -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ - net-snmp \ - net-snmp-agent-libs -RUN microdnf install -y --disableplugin=subscription-manager \ - hostname tar gzip procps jq \ - && microdnf upgrade -y \ - && rm -rf /var/lib/apt/lists/* - -RUN mkdir -p /agent \ - && mkdir -p /var/lib/mongodb-mms-automation \ - && mkdir -p /var/log/mongodb-mms-automation/ \ - && chmod -R +wr /var/log/mongodb-mms-automation/ \ - # ensure that the agent user can write the logs in OpenShift - && touch /var/log/mongodb-mms-automation/readiness.log \ - && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log - - -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE - -# Copy scripts to a safe location that won't be overwritten by volume mount -COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh -COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh -COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh -COPY --from=base /opt/scripts/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe - -RUN tar xfz /agent/mongodb-agent.tar.gz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ - && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb-agent.tar.gz \ - && rm -r mongodb-mms-automation-agent-* - -RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz - -USER 2000 -CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-agent-non-matrix/Dockerfile.builder b/docker/mongodb-agent-non-matrix/Dockerfile.builder deleted file mode 100644 index ac4dd31f0..000000000 --- a/docker/mongodb-agent-non-matrix/Dockerfile.builder +++ /dev/null @@ -1,15 +0,0 @@ -FROM scratch - -ARG agent_version -ARG agent_distro -ARG tools_distro -ARG tools_version - -ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz -ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz - -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE -COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh -COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh -COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh -COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh diff --git a/docker/mongodb-agent-non-matrix/Dockerfile.old b/docker/mongodb-agent-non-matrix/Dockerfile.old deleted file mode 100644 index e1c1caff2..000000000 --- a/docker/mongodb-agent-non-matrix/Dockerfile.old +++ /dev/null @@ -1,60 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM registry.access.redhat.com/ubi9/ubi-minimal - -ARG version - -LABEL name="MongoDB Agent" \ - version="${version}" \ - summary="MongoDB Agent" \ - description="MongoDB Agent" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" - -# Replace libcurl-minimal and curl-minimal with the full versions -# https://bugzilla.redhat.com/show_bug.cgi?id=1994521 -RUN microdnf install -y libssh libpsl libbrotli \ - && microdnf download curl libcurl \ - && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ - && microdnf remove -y libcurl-minimal curl-minimal - -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper -# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ -RUN microdnf install -y --disableplugin=subscription-manager \ - cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs openldap openssl xz-libs -# Dependencies for the Agent -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ - net-snmp \ - net-snmp-agent-libs -RUN microdnf install -y --disableplugin=subscription-manager \ - hostname tar gzip procps jq \ - && microdnf upgrade -y \ - && rm -rf /var/lib/apt/lists/* - -RUN mkdir -p /agent \ - && mkdir -p /var/lib/mongodb-mms-automation \ - && mkdir -p /var/log/mongodb-mms-automation/ \ - && chmod -R +wr /var/log/mongodb-mms-automation/ \ - # ensure that the agent user can write the logs in OpenShift - && touch /var/log/mongodb-mms-automation/readiness.log \ - && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log - - -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE - -RUN tar xfz /agent/mongodb-agent.tar.gz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ - && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb-agent.tar.gz \ - && rm -r mongodb-mms-automation-agent-* - -RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz - -USER 2000 -CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-agent-non-matrix/README.md b/docker/mongodb-agent-non-matrix/README.md deleted file mode 100644 index 79dc0d2d5..000000000 --- a/docker/mongodb-agent-non-matrix/README.md +++ /dev/null @@ -1,17 +0,0 @@ -### Building locally - -For building the MongoDB Agent (non-static) image locally use the example command: - -TODO: What to do with label quay.expires-after=48h? -```bash -AGENT_VERSION="108.0.7.8810-1" -TOOLS_VERSION="100.12.0" -AGENT_DISTRO="rhel9_x86_64" -TOOLS_DISTRO="rhel93-x86_64" -docker buildx build --load --progress plain . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${AGENT_VERSION}" \ - --build-arg version="${VERSION}" \ - --build-arg agent_version="${AGENT_VERSION}" \ - --build-arg tools_version="${TOOLS_VERSION}" \ - --build-arg agent_distro="${AGENT_DISTRO}" \ - --build-arg tools_distro="${TOOLS_DISTRO}" -``` diff --git a/docker/mongodb-agent/Dockerfile b/docker/mongodb-agent/Dockerfile index 5ec4e127b..cd5eccf08 100644 --- a/docker/mongodb-agent/Dockerfile +++ b/docker/mongodb-agent/Dockerfile @@ -1,40 +1,19 @@ -# the init database image gets supplied by pipeline.py and corresponds to the operator version we want to release -# the agent with. This enables us to release the agent for older operator. -ARG init_database_image -FROM ${init_database_image} AS init_database - -FROM public.ecr.aws/docker/library/golang:1.24 AS dependency_downloader - -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ - -COPY go.mod go.sum ./ - -RUN go mod download - -FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder - -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ - -COPY --from=dependency_downloader /go/pkg /go/pkg -COPY . /go/src/github.com/mongodb/mongodb-kubernetes - -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go - FROM scratch AS base -ARG mongodb_tools_url_ubi -ARG mongodb_agent_url_ubi -COPY --from=readiness_builder /readinessprobe /data/ -COPY --from=readiness_builder /version-upgrade-hook /data/ +ARG agent_version +ARG agent_distro +ARG tools_version +ARG tools_distro -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz -ADD ${mongodb_agent_url_ubi} /data/mongodb_agent_ubi.tgz +ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz +ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz -COPY --from=init_database /probes/probe.sh /data/probe.sh -COPY --from=init_database /scripts/agent-launcher-lib.sh /data/ -COPY --from=init_database /scripts/agent-launcher.sh /data/ -COPY --from=init_database /licenses/LICENSE /data/ +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE +COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh +COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh +COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh +COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh FROM registry.access.redhat.com/ubi9/ubi-minimal @@ -48,13 +27,6 @@ LABEL name="MongoDB Agent" \ release="1" \ maintainer="support@mongodb.com" -COPY --from=base /data/probe.sh /opt/scripts/probe.sh -COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe -COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook -COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh -COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh -COPY --from=base /data/LICENSE /licenses/LICENSE - # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 RUN microdnf install -y libssh libpsl libbrotli \ @@ -75,25 +47,34 @@ RUN microdnf install -y --disableplugin=subscription-manager \ && microdnf upgrade -y \ && rm -rf /var/lib/apt/lists/* +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz -COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz -RUN tar xfz /tools/mongodb_tools.tgz -RUN mv mongodb-database-tools-*/bin/* /tools -RUN chmod +x /tools/* -RUN rm /tools/mongodb_tools.tgz -RUN rm -rf /mongodb-database-tools-* +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE -RUN tar xfz /agent/mongodb_agent.tgz -RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent -RUN chmod +x /agent/mongodb-agent -RUN rm /agent/mongodb_agent.tgz -RUN rm -rf mongodb-mms-automation-agent-* +# Copy scripts to a safe location that won't be overwritten by volume mount +COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh +COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh +COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh +COPY --from=base /opt/scripts/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe -RUN mkdir -p /var/lib/automation/config -RUN chmod -R +r /var/lib/automation/config +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* -USER 2000 +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz -HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 +USER 2000 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-agent/Dockerfile.builder b/docker/mongodb-agent/Dockerfile.builder index bfdf4c969..ac4dd31f0 100644 --- a/docker/mongodb-agent/Dockerfile.builder +++ b/docker/mongodb-agent/Dockerfile.builder @@ -1,37 +1,15 @@ -# the init database image gets supplied by pipeline.py and corresponds to the operator version we want to release -# the agent with. This enables us to release the agent for older operator. -ARG init_database_image -FROM ${init_database_image} as init_database - -FROM public.ecr.aws/docker/library/golang:1.24 as dependency_downloader - -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ - -COPY go.mod go.sum ./ - -RUN go mod download - -FROM public.ecr.aws/docker/library/golang:1.24 as readiness_builder - -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ - -COPY --from=dependency_downloader /go/pkg /go/pkg -COPY . /go/src/github.com/mongodb/mongodb-kubernetes - -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go - FROM scratch -ARG mongodb_tools_url_ubi -ARG mongodb_agent_url_ubi -COPY --from=readiness_builder /readinessprobe /data/ -COPY --from=readiness_builder /version-upgrade-hook /data/ +ARG agent_version +ARG agent_distro +ARG tools_distro +ARG tools_version -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz -ADD ${mongodb_agent_url_ubi} /data/mongodb_agent_ubi.tgz +ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz +ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz -COPY --from=init_database /probes/probe.sh /data/probe.sh -COPY --from=init_database /scripts/agent-launcher-lib.sh /data/ -COPY --from=init_database /scripts/agent-launcher.sh /data/ -COPY --from=init_database /licenses/LICENSE /data/ +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE +COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh +COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh +COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh +COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh diff --git a/docker/mongodb-agent/Dockerfile.old b/docker/mongodb-agent/Dockerfile.old index 08d8746d8..80d5c8da6 100644 --- a/docker/mongodb-agent/Dockerfile.old +++ b/docker/mongodb-agent/Dockerfile.old @@ -13,13 +13,6 @@ LABEL name="MongoDB Agent" \ release="1" \ maintainer="support@mongodb.com" -COPY --from=base /data/probe.sh /opt/scripts/probe.sh -COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe -COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook -COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh -COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh -COPY --from=base /data/LICENSE /licenses/LICENSE - # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 RUN microdnf install -y libssh libpsl libbrotli \ @@ -40,25 +33,33 @@ RUN microdnf install -y --disableplugin=subscription-manager \ && microdnf upgrade -y \ && rm -rf /var/lib/apt/lists/* +RUN mkdir -p /agent \ + && mkdir -p /var/lib/mongodb-mms-automation \ + && mkdir -p /var/log/mongodb-mms-automation/ \ + && chmod -R +wr /var/log/mongodb-mms-automation/ \ + # ensure that the agent user can write the logs in OpenShift + && touch /var/log/mongodb-mms-automation/readiness.log \ + && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz -COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz +COPY --from=base /data/mongodb-agent.tar.gz /agent +COPY --from=base /data/mongodb-tools.tgz /agent +COPY --from=base /data/LICENSE /licenses/LICENSE -RUN tar xfz /tools/mongodb_tools.tgz -RUN mv mongodb-database-tools-*/bin/* /tools -RUN chmod +x /tools/* -RUN rm /tools/mongodb_tools.tgz -RUN rm -rf /mongodb-database-tools-* +# Copy scripts to a safe location that won't be overwritten by volume mount +COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh +COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh +COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh +COPY --from=base /opt/scripts/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe -RUN tar xfz /agent/mongodb_agent.tgz -RUN mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent -RUN chmod +x /agent/mongodb-agent -RUN rm /agent/mongodb_agent.tgz -RUN rm -rf mongodb-mms-automation-agent-* +RUN tar xfz /agent/mongodb-agent.tar.gz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && mkdir -p /var/lib/automation/config \ + && chmod -R +r /var/lib/automation/config \ + && rm /agent/mongodb-agent.tar.gz \ + && rm -r mongodb-mms-automation-agent-* -RUN mkdir -p /var/lib/automation/config -RUN chmod -R +r /var/lib/automation/config +RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz USER 2000 - -HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 +CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-agent-non-matrix/agent-launcher-shim.sh b/docker/mongodb-agent/agent-launcher-shim.sh old mode 100644 new mode 100755 similarity index 70% rename from docker/mongodb-agent-non-matrix/agent-launcher-shim.sh rename to docker/mongodb-agent/agent-launcher-shim.sh index fda61405b..4d735bad6 --- a/docker/mongodb-agent-non-matrix/agent-launcher-shim.sh +++ b/docker/mongodb-agent/agent-launcher-shim.sh @@ -3,6 +3,9 @@ set -e SCRIPTS_DIR="/opt/scripts" +# Note: Signal handling is now managed by agent-launcher.sh which becomes PID 1 +# after exec. The cleanup function in agent-launcher.sh includes lock file cleanup. + # Function to start the agent launcher start_agent_launcher() { echo "Starting agent launcher..." @@ -11,6 +14,7 @@ start_agent_launcher() { if [[ -f "$SCRIPTS_DIR/agent-launcher.sh" ]]; then echo "Found agent-launcher.sh, executing..." + echo "Note: agent-launcher.sh will become PID 1 and handle all signal processing including cleanup" exec "$SCRIPTS_DIR/agent-launcher.sh" else echo "ERROR: agent-launcher.sh not found" diff --git a/docker/mongodb-agent-non-matrix/dummy-probe.sh b/docker/mongodb-agent/dummy-probe.sh similarity index 100% rename from docker/mongodb-agent-non-matrix/dummy-probe.sh rename to docker/mongodb-agent/dummy-probe.sh diff --git a/docker/mongodb-agent-non-matrix/dummy-readinessprobe.sh b/docker/mongodb-agent/dummy-readinessprobe.sh similarity index 100% rename from docker/mongodb-agent-non-matrix/dummy-readinessprobe.sh rename to docker/mongodb-agent/dummy-readinessprobe.sh diff --git a/docker/mongodb-agent-non-matrix/setup-agent-files.sh b/docker/mongodb-agent/setup-agent-files.sh old mode 100644 new mode 100755 similarity index 100% rename from docker/mongodb-agent-non-matrix/setup-agent-files.sh rename to docker/mongodb-agent/setup-agent-files.sh diff --git a/docker/mongodb-kubernetes-tests/kubetester/kubetester.py b/docker/mongodb-kubernetes-tests/kubetester/kubetester.py index c2147bedc..2b0c5c596 100644 --- a/docker/mongodb-kubernetes-tests/kubetester/kubetester.py +++ b/docker/mongodb-kubernetes-tests/kubetester/kubetester.py @@ -69,6 +69,13 @@ def is_default_architecture_static() -> bool: return os.getenv("MDB_DEFAULT_ARCHITECTURE", "non-static") == "static" +def assert_container_count_with_static(current_container_count: int, expected_counter_without_static: int): + if is_default_architecture_static(): + assert current_container_count == expected_counter_without_static + 1 + else: + assert current_container_count == expected_counter_without_static + + def get_default_architecture() -> str: return "static" if is_default_architecture_static() else "non-static" diff --git a/docker/mongodb-kubernetes-tests/tests/opsmanager/om_jvm_params.py b/docker/mongodb-kubernetes-tests/tests/opsmanager/om_jvm_params.py index 69616ae9b..6cdfa8aa4 100644 --- a/docker/mongodb-kubernetes-tests/tests/opsmanager/om_jvm_params.py +++ b/docker/mongodb-kubernetes-tests/tests/opsmanager/om_jvm_params.py @@ -8,7 +8,7 @@ from kubetester.opsmanager import MongoDBOpsManager from kubetester.phase import Phase from pytest import fixture, mark -from tests.conftest import assert_log_rotation_process, is_multi_cluster +from tests.conftest import is_multi_cluster from tests.opsmanager.withMonitoredAppDB.conftest import enable_multi_cluster_deployment OM_CONF_PATH_DIR = "mongodb-ops-manager/conf/mms.conf" diff --git a/docker/mongodb-kubernetes-tests/tests/opsmanager/withMonitoredAppDB/om_ops_manager_pod_spec.py b/docker/mongodb-kubernetes-tests/tests/opsmanager/withMonitoredAppDB/om_ops_manager_pod_spec.py index 8d593eedd..3258ebbd1 100644 --- a/docker/mongodb-kubernetes-tests/tests/opsmanager/withMonitoredAppDB/om_ops_manager_pod_spec.py +++ b/docker/mongodb-kubernetes-tests/tests/opsmanager/withMonitoredAppDB/om_ops_manager_pod_spec.py @@ -9,6 +9,7 @@ from kubernetes import client from kubetester import try_load from kubetester.custom_podspec import assert_volume_mounts_are_equal +from kubetester.kubetester import assert_container_count_with_static from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import is_default_architecture_static from kubetester.opsmanager import MongoDBOpsManager @@ -79,19 +80,23 @@ def test_backup_1_pod_becomes_ready(self, ops_manager: MongoDBOpsManager): def test_appdb_pod_template_containers(self, ops_manager: MongoDBOpsManager): appdb_sts = ops_manager.read_appdb_statefulset() - assert len(appdb_sts.spec.template.spec.containers) == 4 + assert_container_count_with_static(len(appdb_sts.spec.template.spec.containers), 4) assert appdb_sts.spec.template.spec.service_account_name == APPDB_SA_NAME - appdb_agent_container = appdb_sts.spec.template.spec.containers[2] - assert appdb_agent_container.name == "mongodb-agent" + containers_by_name = {container.name: container for container in appdb_sts.spec.template.spec.containers} + + assert "mongodb-agent" in containers_by_name, "mongodb-agent container not found" + assert "appdb-sidecar" in containers_by_name, "appdb-sidecar container not found" + + appdb_agent_container = containers_by_name["mongodb-agent"] assert appdb_agent_container.resources.limits["cpu"] == "750m" assert appdb_agent_container.resources.limits["memory"] == "850M" - assert appdb_sts.spec.template.spec.containers[0].name == "appdb-sidecar" - assert appdb_sts.spec.template.spec.containers[0].image == "busybox" - assert appdb_sts.spec.template.spec.containers[0].command == ["sleep"] - assert appdb_sts.spec.template.spec.containers[0].args == ["infinity"] + appdb_sidecar_container = containers_by_name["appdb-sidecar"] + assert appdb_sidecar_container.image == "busybox" + assert appdb_sidecar_container.command == ["sleep"] + assert appdb_sidecar_container.args == ["infinity"] def test_appdb_persistence(self, ops_manager: MongoDBOpsManager, namespace: str): # appdb pod volume claim template @@ -362,16 +367,15 @@ def test_backup_1_pod_becomes_ready(self, ops_manager: MongoDBOpsManager): def test_appdb_pod_template(self, ops_manager: MongoDBOpsManager): appdb_sts = ops_manager.read_appdb_statefulset() - assert len(appdb_sts.spec.template.spec.containers) == 4 - - appdb_mongod_container = appdb_sts.spec.template.spec.containers[1] - assert appdb_mongod_container.name == "mongod" + assert_container_count_with_static(len(appdb_sts.spec.template.spec.containers), 4) - appdb_agent_container = appdb_sts.spec.template.spec.containers[2] - assert appdb_agent_container.name == "mongodb-agent" + # Find each container by name instead of position + containers_by_name = {c.name: c for c in appdb_sts.spec.template.spec.containers} - appdb_agent_monitoring_container = appdb_sts.spec.template.spec.containers[3] - assert appdb_agent_monitoring_container.name == "mongodb-agent-monitoring" + # Check that all required containers exist + assert "mongod" in containers_by_name, "mongod container not found" + assert "mongodb-agent" in containers_by_name, "mongodb-agent container not found" + assert "mongodb-agent-monitoring" in containers_by_name, "mongodb-agent-monitoring container not found" assert appdb_sts.spec.template.metadata.annotations == {"annotation1": "val"} diff --git a/docker/mongodb-kubernetes-tests/tests/shardedcluster/sharded_cluster_custom_podspec.py b/docker/mongodb-kubernetes-tests/tests/shardedcluster/sharded_cluster_custom_podspec.py index 909bb0761..efffcc770 100644 --- a/docker/mongodb-kubernetes-tests/tests/shardedcluster/sharded_cluster_custom_podspec.py +++ b/docker/mongodb-kubernetes-tests/tests/shardedcluster/sharded_cluster_custom_podspec.py @@ -101,18 +101,24 @@ def test_stateful_sets_spec_updated(sc: MongoDB): if is_default_architecture_static(): containers = shard0_sts.spec.template.spec.containers - assert len(containers) == 3 - assert containers[0].name == "mongodb-agent" - assert containers[1].name == "mongodb-enterprise-database" - assert containers[2].name == "sharded-cluster-sidecar-override" + container_names = [container.name for container in containers] + + assert len(containers) == 4 + assert "mongodb-agent" in container_names + assert "mongodb-enterprise-database" in container_names + assert "mongodb-agent-operator-utilities" in container_names + assert "sharded-cluster-sidecar-override" in container_names containers = shard1_sts.spec.template.spec.containers - assert len(containers) == 3 - assert containers[0].name == "mongodb-agent" - assert containers[1].name == "mongodb-enterprise-database" - assert containers[2].name == "sharded-cluster-sidecar" + container_names = [container.name for container in containers] + + assert len(containers) == 4 + assert "mongodb-agent" in container_names + assert "mongodb-enterprise-database" in container_names + assert "mongodb-agent-operator-utilities" in container_names + assert "sharded-cluster-sidecar" in container_names - resources = containers[2].resources + resources = containers[3].resources else: containers = shard1_sts.spec.template.spec.containers assert len(containers) == 2 diff --git a/docker/mongodb-kubernetes-tests/tests/standalone/standalone_custom_podspec.py b/docker/mongodb-kubernetes-tests/tests/standalone/standalone_custom_podspec.py index 66080f0e5..5aebc1ea7 100644 --- a/docker/mongodb-kubernetes-tests/tests/standalone/standalone_custom_podspec.py +++ b/docker/mongodb-kubernetes-tests/tests/standalone/standalone_custom_podspec.py @@ -26,16 +26,18 @@ def test_stateful_set_spec_updated(standalone, namespace): assert_stateful_set_podspec(sts.spec.template.spec, weight=50, topology_key="mykey", grace_period_seconds=10) containers = sts.spec.template.spec.containers + container_names = [container.name for container in containers] if is_default_architecture_static(): - assert len(containers) == 3 - assert containers[0].name == "mongodb-agent" - assert containers[1].name == "mongodb-enterprise-database" - assert containers[2].name == "standalone-sidecar" + assert len(containers) == 4 + assert "mongodb-agent" in container_names + assert "mongodb-enterprise-database" in container_names + assert "mongodb-agent-operator-utilities" in container_names + assert "standalone-sidecar" in container_names else: assert len(containers) == 2 - assert containers[0].name == "mongodb-enterprise-database" - assert containers[1].name == "standalone-sidecar" + assert "standalone-sidecar" in container_names + assert "mongodb-enterprise-database" in container_names labels = sts.spec.template.metadata.labels assert labels["label1"] == "value1" diff --git a/docker/mongodb-kubernetes-tests/tests/vaultintegration/mongodb_deployment_vault.py b/docker/mongodb-kubernetes-tests/tests/vaultintegration/mongodb_deployment_vault.py index d9dd72b65..b6ad82285 100644 --- a/docker/mongodb-kubernetes-tests/tests/vaultintegration/mongodb_deployment_vault.py +++ b/docker/mongodb-kubernetes-tests/tests/vaultintegration/mongodb_deployment_vault.py @@ -357,7 +357,7 @@ def test_mdb_created(replica_set: MongoDB, namespace: str): for pod_name in get_pods(MDB_RESOURCE + "-{}", 3): pod = client.CoreV1Api().read_namespaced_pod(pod_name, namespace) if is_default_architecture_static(): - assert len(pod.spec.containers) == 3 + assert len(pod.spec.containers) == 4 else: assert len(pod.spec.containers) == 2 diff --git a/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_backup_vault.py b/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_backup_vault.py index 991cd5a82..ca555b181 100644 --- a/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_backup_vault.py +++ b/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_backup_vault.py @@ -15,7 +15,7 @@ from kubetester.awss3client import AwsS3Client, s3_endpoint from kubetester.certs import create_mongodb_tls_certs, create_ops_manager_tls_certs from kubetester.http import https_endpoint_is_reachable -from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import KubernetesTester, assert_container_count_with_static from kubetester.kubetester import fixture as yaml_fixture from kubetester.kubetester import get_pods from kubetester.mongodb import MongoDB @@ -452,7 +452,7 @@ def test_appdb_reached_running_and_pod_count(ops_manager: MongoDBOpsManager, nam # check AppDB has 4 containers(+1 because of vault-agent) for pod_name in get_pods(ops_manager.name + "-db-{}", 3): pod = client.CoreV1Api().read_namespaced_pod(pod_name, namespace) - assert len(pod.spec.containers) == 4 + assert_container_count_with_static(len(pod.spec.containers), 4) @mark.e2e_vault_setup_om_backup diff --git a/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_deployment_vault.py b/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_deployment_vault.py index 7d3c87ba6..75bb5b3a2 100644 --- a/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_deployment_vault.py +++ b/docker/mongodb-kubernetes-tests/tests/vaultintegration/om_deployment_vault.py @@ -12,9 +12,9 @@ read_secret, ) from kubetester.certs import create_mongodb_tls_certs, create_ops_manager_tls_certs -from kubetester.kubetester import KubernetesTester +from kubetester.kubetester import KubernetesTester, assert_container_count_with_static from kubetester.kubetester import fixture as yaml_fixture -from kubetester.kubetester import get_pods +from kubetester.kubetester import get_pods, is_default_architecture_static from kubetester.operator import Operator from kubetester.opsmanager import MongoDBOpsManager from kubetester.phase import Phase @@ -277,7 +277,7 @@ def test_appdb_reached_running_and_pod_count(ops_manager: MongoDBOpsManager, nam # check AppDB has 4 containers(+1 because of vault-agent) for pod_name in get_pods(ops_manager.name + "-db-{}", 3): pod = client.CoreV1Api().read_namespaced_pod(pod_name, namespace) - assert len(pod.spec.containers) == 4 + assert_container_count_with_static(len(pod.spec.containers), 4) @mark.e2e_vault_setup_om diff --git a/docker/mongodb-kubernetes-tests/tests/vaultintegration/vault_tls.py b/docker/mongodb-kubernetes-tests/tests/vaultintegration/vault_tls.py index 86ea083b9..2c6c9dc7d 100644 --- a/docker/mongodb-kubernetes-tests/tests/vaultintegration/vault_tls.py +++ b/docker/mongodb-kubernetes-tests/tests/vaultintegration/vault_tls.py @@ -313,7 +313,7 @@ def test_mdb_created(replica_set: MongoDB, namespace: str): for pod_name in get_pods(MDB_RESOURCE + "-{}", 3): pod = client.CoreV1Api().read_namespaced_pod(pod_name, namespace) if is_default_architecture_static(): - assert len(pod.spec.containers) == 3 + assert len(pod.spec.containers) == 4 else: assert len(pod.spec.containers) == 2 diff --git a/inventories/agent.yaml b/inventories/agent.yaml index 42f5eaa21..876e4027b 100644 --- a/inventories/agent.yaml +++ b/inventories/agent.yaml @@ -3,58 +3,62 @@ vars: s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-agent images: -- name: mongodb-agent - vars: - context: . - template_context: docker/mongodb-agent - platform: linux/amd64 - - stages: - - name: mongodb-agent-build-context - task_type: docker_build - dockerfile: docker/mongodb-agent/Dockerfile.builder - buildargs: - mongodb_tools_url_ubi: $(inputs.params.mongodb_tools_url_ubi) - mongodb_agent_url_ubi: $(inputs.params.mongodb_agent_url_ubi) - init_database_image: $(inputs.params.init_database_image) - output: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version)-context - - - name: mongodb-agent-build-context-release - task_type: docker_build - tags: ["release"] - dockerfile: docker/mongodb-agent/Dockerfile.builder - buildargs: - mongodb_tools_url_ubi: $(inputs.params.mongodb_tools_url_ubi) - mongodb_agent_url_ubi: $(inputs.params.mongodb_agent_url_ubi) - init_database_image: $(inputs.params.init_database_image) - output: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context - - - name: mongodb-agent-build-ubi - task_type: docker_build - buildargs: - imagebase: $(inputs.params.registry)/mongodb-agent-ubi:$(inputs.params.version)-context - version: $(inputs.params.version) - dockerfile: docker/mongodb-agent/Dockerfile.old - output: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version) - - - name: master-latest - task_type: tag_image - tags: [ "master" ] - source: - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version) - destination: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.agent_version)_latest - - - name: mongodb-agent-template-ubi - task_type: dockerfile_template - tags: ["release"] - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile + - name: mongodb-agent-ubi + vars: + context: . + template_context: docker/mongodb-agent + + platform: linux/$(inputs.params.architecture) + stages: + - name: mongodb-agent-context + task_type: docker_build + dockerfile: docker/mongodb-agent/Dockerfile.builder + tags: [ "ubi" ] + buildargs: + agent_version: $(inputs.params.version) + tools_version: $(inputs.params.tools_version) + agent_distro: $(inputs.params.agent_distro) + tools_distro: $(inputs.params.tools_distro) + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/mongodb-agent-ubi + tag: $(inputs.params.version)-context-$(inputs.params.architecture) + + - name: mongodb-agent-build-context-release + task_type: docker_build + tags: ["release"] + dockerfile: docker/mongodb-agent/Dockerfile.builder + buildargs: + agent_version: $(inputs.params.version) + tools_version: $(inputs.params.tools_version) + agent_distro: $(inputs.params.agent_distro) + tools_distro: $(inputs.params.tools_distro) + output: + - registry: $(inputs.params.quay_registry) + tag: $(inputs.params.version)-context-$(inputs.params.architecture) + + - name: mongodb-agent-build + task_type: docker_build + tags: [ "ubi" ] + buildargs: + imagebase: $(inputs.params.registry)/mongodb-agent-ubi:$(inputs.params.version)-context-$(inputs.params.architecture) + version: $(inputs.params.version) + dockerfile: docker/mongodb-agent/Dockerfile.old + + labels: + quay.expires-after: 48h + + output: + - registry: $(inputs.params.registry)/mongodb-agent-ubi + tag: $(inputs.params.version)-$(inputs.params.architecture) + - registry: $(inputs.params.registry)/mongodb-agent-ubi + tag: latest-$(inputs.params.architecture) + + - name: mongodb-agent-template-ubi + task_type: dockerfile_template + tags: ["release"] + output: + - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/agent_non_matrix.yaml b/inventories/agent_non_matrix.yaml deleted file mode 100644 index 08531d9ed..000000000 --- a/inventories/agent_non_matrix.yaml +++ /dev/null @@ -1,64 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-agent-ubi - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-agent - -images: - - name: mongodb-agent - vars: - context: . - template_context: docker/mongodb-agent-non-matrix - - platform: linux/$(inputs.params.architecture) - stages: - - name: mongodb-agent-context - task_type: docker_build - dockerfile: docker/mongodb-agent-non-matrix/Dockerfile.builder - tags: [ "ubi" ] - buildargs: - agent_version: $(inputs.params.version) - tools_version: $(inputs.params.tools_version) - agent_distro: $(inputs.params.agent_distro) - tools_distro: $(inputs.params.tools_distro) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: mongodb-agent-build-context-release - task_type: docker_build - tags: ["release"] - dockerfile: docker/mongodb-agent-non-matrix/Dockerfile.builder - buildargs: - agent_version: $(inputs.params.version) - tools_version: $(inputs.params.tools_version) - agent_distro: $(inputs.params.agent_distro) - tools_distro: $(inputs.params.tools_distro) - output: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: mongodb-agent-build - task_type: docker_build - tags: [ "ubi" ] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-agent-ubi:$(inputs.params.version)-context-$(inputs.params.architecture) - version: $(inputs.params.version) - dockerfile: docker/mongodb-agent-non-matrix/Dockerfile.old - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version)-$(inputs.params.architecture) - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: latest-$(inputs.params.architecture) - - - name: mongodb-agent-template-ubi - task_type: dockerfile_template - tags: ["release"] - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/lib/sonar/builders/docker.py b/lib/sonar/builders/docker.py index 80a5daf8b..14c2bf91a 100644 --- a/lib/sonar/builders/docker.py +++ b/lib/sonar/builders/docker.py @@ -1,4 +1,5 @@ import random +import shutil import subprocess from typing import Dict, Optional @@ -101,7 +102,12 @@ def get_docker_build_cli_args( labels=Optional[Dict[str, str]], platform=Optional[str], ): - args = ["docker", "buildx", "build", "--load", "--progress", "plain", path, "-f", dockerfile, "-t", tag] + # Find docker executable dynamically to work across different environments + docker_cmd = shutil.which("docker") + if docker_cmd is None: + raise Exception("Docker executable not found in PATH") + + args = [docker_cmd, "buildx", "build", "--load", "--progress", "plain", path, "-f", dockerfile, "-t", tag] if buildargs is not None: for k, v in buildargs.items(): args.append("--build-arg") @@ -174,13 +180,17 @@ def check_registry_image_exists(repository, tag): @TRACER.start_as_current_span("docker_push") def docker_push(registry: str, tag: str): + docker_cmd = shutil.which("docker") + if docker_cmd is None: + raise Exception("Docker executable not found in PATH") + def inner_docker_push(should_raise=False): # We can't use docker-py here # as it doesn't support DOCKER_CONTENT_TRUST # env variable, which could be needed cp = subprocess.run( - ["docker", "push", f"{registry}:{tag}"], + [docker_cmd, "push", f"{registry}:{tag}"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) diff --git a/lib/sonar/test/test_build.py b/lib/sonar/test/test_build.py index 99d98b709..bb1bd848b 100644 --- a/lib/sonar/test/test_build.py +++ b/lib/sonar/test/test_build.py @@ -111,12 +111,13 @@ def test_platform_is_passed_to_docker_build(_docker_build, _docker_tag): _docker_build.assert_called() -def test_get_docker_build_cli_args(): - assert "docker buildx build --load --progress plain . -f dockerfile -t image:latest" == " ".join( +@patch("sonar.builders.docker.shutil.which", return_value="/mock/path/to/docker") +def test_get_docker_build_cli_args(mock_which): + assert "/mock/path/to/docker buildx build --load --progress plain . -f dockerfile -t image:latest" == " ".join( get_docker_build_cli_args(".", "dockerfile", "image:latest", None, None, None) ) assert ( - "docker buildx build --load --progress plain . -f dockerfile -t image:latest --build-arg a=1 --build-arg long_arg=long_value --label l1=v1 --label l2=v2 --platform linux/amd64" + "/mock/path/to/docker buildx build --load --progress plain . -f dockerfile -t image:latest --build-arg a=1 --build-arg long_arg=long_value --label l1=v1 --label l2=v2 --platform linux/amd64" == " ".join( get_docker_build_cli_args( ".", diff --git a/lib/sonar/test/test_docker.py b/lib/sonar/test/test_docker.py index e36840df0..370ed667c 100644 --- a/lib/sonar/test/test_docker.py +++ b/lib/sonar/test/test_docker.py @@ -1,5 +1,5 @@ from types import SimpleNamespace -from unittest.mock import Mock, call +from unittest.mock import Mock, call, patch import pytest from pytest_mock import MockerFixture @@ -8,7 +8,8 @@ from ..builders.docker import docker_push -def test_docker_push_is_retried(mocker: MockerFixture): +@patch("sonar.builders.docker.shutil.which", return_value="/mock/path/to/docker") +def test_docker_push_is_retried(mock_which, mocker: MockerFixture): a = SimpleNamespace(returncode=1, stderr="some-error") sp = mocker.patch("sonar.builders.docker.subprocess") sp.PIPE = "|PIPE|" @@ -20,16 +21,16 @@ def test_docker_push_is_retried(mocker: MockerFixture): # docker push is called 4 times, the last time it is called, it raises an exception sp.run.assert_has_calls( [ - call(["docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - call(["docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - call(["docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - call(["docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), + call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), + call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), + call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), + call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), ] ) -def test_docker_push_is_retried_and_works(mocker: MockerFixture): - +@patch("sonar.builders.docker.shutil.which", return_value="/mock/path/to/docker") +def test_docker_push_is_retried_and_works(mock_which, mocker: MockerFixture): ok = SimpleNamespace(returncode=0) sp = mocker.patch("sonar.builders.docker.subprocess") sp.PIPE = "|PIPE|" @@ -39,7 +40,7 @@ def test_docker_push_is_retried_and_works(mocker: MockerFixture): docker_push("reg", "tag") sp.run.assert_called_once_with( - ["docker", "push", "reg:tag"], + ["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|", ) diff --git a/mongodb-community-operator/controllers/construct/build_statefulset_test.go b/mongodb-community-operator/controllers/construct/build_statefulset_test.go index 42aaa27f8..c96e18741 100644 --- a/mongodb-community-operator/controllers/construct/build_statefulset_test.go +++ b/mongodb-community-operator/controllers/construct/build_statefulset_test.go @@ -34,7 +34,7 @@ func newTestReplicaSet() mdbv1.MongoDBCommunity { func TestMultipleCalls_DoNotCauseSideEffects(t *testing.T) { mdb := newTestReplicaSet() - stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true, "") sts := &appsv1.StatefulSet{} t.Run("1st Call", func(t *testing.T) { @@ -55,7 +55,7 @@ func TestManagedSecurityContext(t *testing.T) { t.Setenv(podtemplatespec.ManagedSecurityContextEnv, "true") mdb := newTestReplicaSet() - stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true) + stsFunc := BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, "fake-mongodbImage", "fake-agentImage", "fake-versionUpgradeHookImage", "fake-readinessProbeImage", true, "") sts := &appsv1.StatefulSet{} stsFunc(sts) @@ -65,7 +65,7 @@ func TestManagedSecurityContext(t *testing.T) { func TestMongod_Container(t *testing.T) { const mongodbImageMock = "fake-mongodbImage" - c := container.New(mongodbContainer(mongodbImageMock, []corev1.VolumeMount{}, mdbv1.NewMongodConfiguration())) + c := container.New(mongodbContainer(mongodbImageMock, []corev1.VolumeMount{}, mdbv1.NewMongodConfiguration(), false)) t.Run("Has correct Env vars", func(t *testing.T) { assert.Len(t, c.Env, 1) @@ -83,14 +83,14 @@ func TestMongod_Container(t *testing.T) { } func TestMongoDBAgentCommand(t *testing.T) { - cmd := AutomationAgentCommand(false, mdbv1.LogLevelInfo, "testfile", 24) + cmd := AutomationAgentCommand(false, false, mdbv1.LogLevelInfo, "testfile", 24) baseCmd := MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions assert.Len(t, cmd, 3) assert.Equal(t, cmd[0], "/bin/bash") assert.Equal(t, cmd[1], "-c") assert.Equal(t, cmd[2], baseCmd+" -logFile testfile -logLevel INFO -maxLogFileDurationHrs 24") - cmd = AutomationAgentCommand(false, mdbv1.LogLevelInfo, "/dev/stdout", 24) + cmd = AutomationAgentCommand(false, false, mdbv1.LogLevelInfo, "/dev/stdout", 24) assert.Len(t, cmd, 3) assert.Equal(t, cmd[0], "/bin/bash") assert.Equal(t, cmd[1], "-c") diff --git a/mongodb-community-operator/controllers/construct/mongodbstatefulset.go b/mongodb-community-operator/controllers/construct/mongodbstatefulset.go index bf0a16807..e0243e910 100644 --- a/mongodb-community-operator/controllers/construct/mongodbstatefulset.go +++ b/mongodb-community-operator/controllers/construct/mongodbstatefulset.go @@ -6,6 +6,7 @@ import ( "strconv" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -20,6 +21,7 @@ import ( "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/readiness/config" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/util/scale" "github.com/mongodb/mongodb-kubernetes/pkg/statefulset" + "github.com/mongodb/mongodb-kubernetes/pkg/util" ) var OfficialMongodbRepoUrls = []string{"docker.io/mongodb", "quay.io/mongodb"} @@ -76,18 +78,6 @@ export NSS_WRAPPER_PASSWD=/tmp/passwd export LD_PRELOAD=libnss_wrapper.so export NSS_WRAPPER_GROUP=/etc/group fi -` - //nolint:gosec //The credentials path is hardcoded in the container. - MongodbUserCommandWithAPIKeyExport = `current_uid=$(id -u) -AGENT_API_KEY="$(cat /mongodb-automation/agent-api-key/agentApiKey)" -declare -r current_uid -if ! grep -q "${current_uid}" /etc/passwd ; then -sed -e "s/^mongodb:/builder:/" /etc/passwd > /tmp/passwd -echo "mongodb:x:$(id -u):$(id -g):,,,:/:/bin/bash" >> /tmp/passwd -export NSS_WRAPPER_PASSWD=/tmp/passwd -export LD_PRELOAD=libnss_wrapper.so -export NSS_WRAPPER_GROUP=/etc/group -fi ` ) @@ -130,7 +120,7 @@ type MongoDBStatefulSetOwner interface { // BuildMongoDBReplicaSetStatefulSetModificationFunction builds the parts of the replica set that are common between every resource that implements // MongoDBStatefulSetOwner. // It doesn't configure TLS or additional containers/env vars that the statefulset might need. -func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string, withInitContainers bool) statefulset.Modification { +func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSetOwner, scaler scale.ReplicaSetScaler, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string, withInitContainers bool, initAppDBImage string) statefulset.Modification { labels := map[string]string{ "app": mdb.ServiceName(), } @@ -167,27 +157,30 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe } mongodVolumeMounts := []corev1.VolumeMount{mongodHealthStatusVolumeMount, keyFileVolumeVolumeMountMongod, tmpVolumeMount} - hooksVolumeMod := podtemplatespec.NOOP() - scriptsVolumeMod := podtemplatespec.NOOP() - - // This is temporary code; - // once we make the operator fully deploy static workloads, we will remove those init containers. - if withInitContainers { - // hooks volume is only required on the mongod pod. - hooksVolume = statefulset.CreateVolumeFromEmptyDir("hooks") - hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) - - // scripts volume is only required on the mongodb-agent pod. - scriptsVolume = statefulset.CreateVolumeFromEmptyDir("agent-scripts") - scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) + hooksVolume = statefulset.CreateVolumeFromEmptyDir("hooks") + hooksVolumeMount := statefulset.CreateVolumeMount(hooksVolume.Name, "/hooks", statefulset.WithReadOnly(false)) + scriptsVolume = statefulset.CreateVolumeFromEmptyDir("agent-scripts") + scriptsVolumeMount := statefulset.CreateVolumeMount(scriptsVolume.Name, "/opt/scripts", statefulset.WithReadOnly(false)) - upgradeInitContainer = podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount}, versionUpgradeHookImage)) - readinessInitContainer = podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount}, readinessProbeImage)) - scriptsVolumeMod = podtemplatespec.WithVolume(scriptsVolume) - hooksVolumeMod = podtemplatespec.WithVolume(hooksVolume) + scriptsVolumeMod := podtemplatespec.WithVolume(scriptsVolume) + hooksVolumeMod := podtemplatespec.WithVolume(hooksVolume) + withStaticContainerModification := podtemplatespec.NOOP() + shareProcessNs := statefulset.NOOP() + // we need the upgrade hook and readinessProbe either via init containers or via a side-car and /proc access + // if we don't use init containers we need to use static containers + if withInitContainers { mongodVolumeMounts = append(mongodVolumeMounts, hooksVolumeMount) mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, scriptsVolumeMount) + upgradeInitContainer = podtemplatespec.WithInitContainer(versionUpgradeHookName, versionUpgradeHookInit([]corev1.VolumeMount{hooksVolumeMount}, versionUpgradeHookImage)) + readinessInitContainer = podtemplatespec.WithInitContainer(ReadinessProbeContainerName, readinessProbeInit([]corev1.VolumeMount{scriptsVolumeMount}, readinessProbeImage)) + } else { + staticMounts := []corev1.VolumeMount{hooksVolumeMount, scriptsVolumeMount, tmpVolumeMount} + withStaticContainerModification = podtemplatespec.WithContainer(util.AgentContainerUtilitiesName, mongodbAgentUtilitiesContainer(staticMounts, initAppDBImage)) + mongodbAgentVolumeMounts = append(mongodbAgentVolumeMounts, staticMounts...) + shareProcessNs = func(sts *appsv1.StatefulSet) { + sts.Spec.Template.Spec.ShareProcessNamespace = ptr.To(true) + } } dataVolumeClaim := statefulset.NOOP() @@ -238,6 +231,7 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe dataVolumeClaim, logVolumeClaim, singleModeVolumeClaim, + shareProcessNs, statefulset.WithPodSpecTemplate( podtemplatespec.Apply( podSecurityContext, @@ -250,7 +244,8 @@ func BuildMongoDBReplicaSetStatefulSetModificationFunction(mdb MongoDBStatefulSe podtemplatespec.WithVolume(keyFileVolume), podtemplatespec.WithServiceAccount(mongodbDatabaseServiceAccountName), podtemplatespec.WithContainer(AgentName, mongodbAgentContainer(mdb.AutomationConfigSecretName(), mongodbAgentVolumeMounts, agentLogLevel, agentLogFile, agentMaxLogFileDurationHours, agentImage)), - podtemplatespec.WithContainer(MongodbName, mongodbContainer(mongodbImage, mongodVolumeMounts, mdb.GetMongodConfiguration())), + podtemplatespec.WithContainer(MongodbName, mongodbContainer(mongodbImage, mongodVolumeMounts, mdb.GetMongodConfiguration(), !withInitContainers)), + withStaticContainerModification, upgradeInitContainer, readinessInitContainer, ), @@ -263,7 +258,7 @@ func BaseAgentCommand() string { // AutomationAgentCommand withAgentAPIKeyExport detects whether we want to deploy this agent with the agent api key exported // it can be used to register the agent with OM. -func AutomationAgentCommand(withAgentAPIKeyExport bool, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int) []string { +func AutomationAgentCommand(withStatic bool, withAgentAPIKeyExport bool, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int) []string { // This is somewhat undocumented at https://www.mongodb.com/docs/ops-manager/current/reference/mongodb-agent-settings/ // Not setting the -logFile option make the mongodb-agent log to stdout. Setting -logFile /dev/stdout will result in // an error by the agent trying to open /dev/stdout-verbose and still trying to do log rotation. @@ -277,11 +272,31 @@ func AutomationAgentCommand(withAgentAPIKeyExport bool, logLevel mdbv1.LogLevel, } if withAgentAPIKeyExport { - return []string{"/bin/bash", "-c", MongodbUserCommandWithAPIKeyExport + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} + return []string{"/bin/bash", "-c", GetMongodbUserCommandWithAPIKeyExport(withStatic) + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} } return []string{"/bin/bash", "-c", MongodbUserCommand + BaseAgentCommand() + " -cluster=" + clusterFilePath + automationAgentOptions + agentLogOptions} } +func GetMongodbUserCommandWithAPIKeyExport(withStatic bool) string { + agentPrepareScript := "" + if withStatic { + agentPrepareScript = "/usr/local/bin/setup-agent-files.sh\n" + } + + //nolint:gosec //The credentials path is hardcoded in the container. + return fmt.Sprintf(`%scurrent_uid=$(id -u) +AGENT_API_KEY="$(cat /mongodb-automation/agent-api-key/agentApiKey)" +declare -r current_uid +if ! grep -q "${current_uid}" /etc/passwd ; then +sed -e "s/^mongodb:/builder:/" /etc/passwd > /tmp/passwd +echo "mongodb:x:$(id -u):$(id -g):,,,:/:/bin/bash" >> /tmp/passwd +export NSS_WRAPPER_PASSWD=/tmp/passwd +export LD_PRELOAD=libnss_wrapper.so +export NSS_WRAPPER_GROUP=/etc/group +fi +`, agentPrepareScript) +} + func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []corev1.VolumeMount, logLevel mdbv1.LogLevel, logFile string, maxLogFileDurationHours int, agentImage string) container.Modification { _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( @@ -291,7 +306,7 @@ func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []cor container.WithReadinessProbe(DefaultReadiness()), container.WithResourceRequirements(resourcerequirements.Defaults()), container.WithVolumeMounts(volumeMounts), - container.WithCommand(AutomationAgentCommand(false, logLevel, logFile, maxLogFileDurationHours)), + container.WithCommand(AutomationAgentCommand(false, false, logLevel, logFile, maxLogFileDurationHours)), containerSecurityContext, container.WithEnvs( corev1.EnvVar{ @@ -319,6 +334,20 @@ func mongodbAgentContainer(automationConfigSecretName string, volumeMounts []cor ) } +func mongodbAgentUtilitiesContainer(volumeMounts []corev1.VolumeMount, initDatabaseImage string) container.Modification { + _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() + return container.Apply( + container.WithName(util.AgentContainerUtilitiesName), + container.WithImage(initDatabaseImage), + container.WithImagePullPolicy(corev1.PullAlways), + container.WithResourceRequirements(resourcerequirements.Defaults()), + container.WithVolumeMounts(volumeMounts), + container.WithCommand([]string{"bash", "-c", "touch /tmp/agent-utilities-holder_marker && tail -F -n0 /tmp/agent-utilities-holder_marker"}), + container.WithArgs([]string{""}), + containerSecurityContext, + ) +} + func versionUpgradeHookInit(volumeMount []corev1.VolumeMount, versionUpgradeHookImage string) container.Modification { _, containerSecurityContext := podtemplatespec.WithDefaultSecurityContextsModifications() return container.Apply( @@ -371,9 +400,64 @@ func readinessProbeInit(volumeMount []corev1.VolumeMount, readinessProbeImage st ) } -func mongodbContainer(mongodbImage string, volumeMounts []corev1.VolumeMount, additionalMongoDBConfig mdbv1.MongodConfiguration) container.Modification { - filePath := additionalMongoDBConfig.GetDBDataDir() + "/" + automationMongodConfFileName - mongoDbCommand := fmt.Sprintf(` +// buildSignalHandling returns the signal handling setup for static architecture +func buildSignalHandling() string { + return fmt.Sprintf(` +# Signal handler for graceful shutdown in shared PID namespace +cleanup() { + # Important! Keep this in sync with DefaultPodTerminationPeriodSeconds constant from constants.go + termination_timeout_seconds=%d + + echo "MongoDB container received SIGTERM, shutting down gracefully..." + + if [ -n "$MONGOD_PID" ] && kill -0 "$MONGOD_PID" 2>/dev/null; then + echo "Sending SIGTERM to mongod process $MONGOD_PID" + kill -15 "$MONGOD_PID" + + echo "Waiting until mongod process is shutdown. Note, that if mongod process fails to shutdown in the time specified by the 'terminationGracePeriodSeconds' property (default ${termination_timeout_seconds} seconds) then the container will be killed by Kubernetes." + + # Use the same robust waiting mechanism as agent-launcher-lib.sh + # We cannot use 'wait' for processes started in background, use spinning loop + while [ -e "/proc/${MONGOD_PID}" ]; do + sleep 0.1 + done + + echo "mongod process has exited" + fi + + echo "MongoDB container shutdown complete" + exit 0 +} + +# Set up signal handler for static architecture +trap cleanup SIGTERM +`, util.DefaultPodTerminationPeriodSeconds) +} + +// buildMongodExecution returns the mongod execution command based on architecture +// in static we run /pause as pid1 and we need to ensure to redirect sigterm to the mongod process +func buildMongodExecution(filePath string, isStatic bool) string { + if isStatic { + return fmt.Sprintf(`mongod -f %s & +MONGOD_PID=$! +echo "Started mongod with PID $MONGOD_PID" + +# Wait for mongod to finish +wait "$MONGOD_PID"`, filePath) + } + return fmt.Sprintf("exec mongod -f %s", filePath) +} + +// buildMongodbCommand constructs the complete MongoDB container command +func buildMongodbCommand(filePath string, isStatic bool) string { + signalHandling := "" + if isStatic { + signalHandling = buildSignalHandling() + } + + mongodExec := buildMongodExecution(filePath, isStatic) + + return fmt.Sprintf(`%s if [ -e "/hooks/version-upgrade" ]; then #run post-start hook to handle version changes (if exists) /hooks/version-upgrade @@ -393,8 +477,13 @@ sleep 15 # start mongod with this configuration echo "Starting mongod..." -exec mongod -f %s; -`, filePath, keyfileFilePath, filePath) +%s +`, signalHandling, filePath, keyfileFilePath, mongodExec) +} + +func mongodbContainer(mongodbImage string, volumeMounts []corev1.VolumeMount, additionalMongoDBConfig mdbv1.MongodConfiguration, isStatic bool) container.Modification { + filePath := additionalMongoDBConfig.GetDBDataDir() + "/" + automationMongodConfFileName + mongoDbCommand := buildMongodbCommand(filePath, isStatic) containerCommand := []string{ "/bin/sh", diff --git a/mongodb-community-operator/controllers/construct/mongodbstatefulset_test.go b/mongodb-community-operator/controllers/construct/mongodbstatefulset_test.go index ed93b1ec8..adb2ad9cc 100644 --- a/mongodb-community-operator/controllers/construct/mongodbstatefulset_test.go +++ b/mongodb-community-operator/controllers/construct/mongodbstatefulset_test.go @@ -7,6 +7,7 @@ import ( corev1 "k8s.io/api/core/v1" + mdbv1 "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/api/v1" "github.com/mongodb/mongodb-kubernetes/mongodb-community-operator/pkg/readiness/config" ) @@ -98,3 +99,56 @@ func TestCollectEnvVars(t *testing.T) { }) } } + +func TestMongodbContainer_SignalHandling(t *testing.T) { + tests := []struct { + name string + isStatic bool + wantExec bool + }{ + { + name: "Non-static architecture uses exec mongod", + isStatic: false, + wantExec: true, + }, + { + name: "Static architecture uses trap and background mongod", + isStatic: true, + wantExec: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mongodConfig := mdbv1.NewMongodConfiguration() + mongodConfig.SetOption("storage.dbPath", "/data") + + containerMod := mongodbContainer("test-image", []corev1.VolumeMount{}, mongodConfig, tt.isStatic) + + testContainer := &corev1.Container{} + containerMod(testContainer) + + assert.Len(t, testContainer.Command, 3) + assert.Equal(t, "/bin/sh", testContainer.Command[0]) + assert.Equal(t, "-c", testContainer.Command[1]) + commandScript := testContainer.Command[2] + + if tt.isStatic { + assert.Contains(t, commandScript, "trap cleanup SIGTERM", "Static architecture should include signal trap") + assert.Contains(t, commandScript, "cleanup() {", "Static architecture should include cleanup function") + assert.Contains(t, commandScript, "mongod -f /data/automation-mongod.conf &", "Static architecture should run mongod in background") + assert.Contains(t, commandScript, "wait \"$MONGOD_PID\"", "Static architecture should wait for mongod process") + assert.Contains(t, commandScript, "termination_timeout_seconds", "Static architecture should include timeout configuration") + assert.Contains(t, commandScript, "while [ -e \"/proc/${MONGOD_PID}\" ]", "Static architecture should include robust process waiting") + assert.Contains(t, commandScript, "kill -15 \"$MONGOD_PID\"", "Static architecture should send SIGTERM to mongod") + } else { + assert.NotContains(t, commandScript, "trap cleanup SIGTERM", "Non-static architecture should not include signal trap") + assert.NotContains(t, commandScript, "cleanup() {", "Non-static architecture should not include cleanup function") + assert.Contains(t, commandScript, "exec mongod -f /data/automation-mongod.conf", "Non-static architecture should exec mongod") + } + + assert.Contains(t, commandScript, "Waiting for config and keyfile files to be created by the agent", "Should wait for agent files") + assert.Contains(t, commandScript, "Starting mongod...", "Should start mongod") + }) + } +} diff --git a/mongodb-community-operator/controllers/replica_set_controller.go b/mongodb-community-operator/controllers/replica_set_controller.go index 6c0190fb0..ba3e5b168 100644 --- a/mongodb-community-operator/controllers/replica_set_controller.go +++ b/mongodb-community-operator/controllers/replica_set_controller.go @@ -792,7 +792,7 @@ func getMongodConfigSearchModification(search *searchv1.MongoDBSearch) automatio // buildStatefulSetModificationFunction takes a MongoDB resource and converts it into // the corresponding stateful set func buildStatefulSetModificationFunction(mdb mdbv1.MongoDBCommunity, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage string) statefulset.Modification { - commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage, true) + commonModification := construct.BuildMongoDBReplicaSetStatefulSetModificationFunction(&mdb, &mdb, mongodbImage, agentImage, versionUpgradeHookImage, readinessProbeImage, true, "") return statefulset.Apply( commonModification, statefulset.WithOwnerReference(mdb.GetOwnerReferences()), diff --git a/mongodb-community-operator/pkg/kube/podtemplatespec/podspec_template.go b/mongodb-community-operator/pkg/kube/podtemplatespec/podspec_template.go index 355730375..1b17f69b0 100644 --- a/mongodb-community-operator/pkg/kube/podtemplatespec/podspec_template.go +++ b/mongodb-community-operator/pkg/kube/podtemplatespec/podspec_template.go @@ -66,6 +66,19 @@ func WithContainerByIndex(index int, funcs ...func(container *corev1.Container)) } } +// WithContainerByIndexIfExists applies the modifications to the container with the provided index +// only if the container already exists. If the index is out of range, no changes are made. +func WithContainerByIndexIfExists(index int, funcs ...func(container *corev1.Container)) func(podTemplateSpec *corev1.PodTemplateSpec) { + return func(podTemplateSpec *corev1.PodTemplateSpec) { + if index < len(podTemplateSpec.Spec.Containers) { + c := &podTemplateSpec.Spec.Containers[index] + for _, f := range funcs { + f(c) + } + } + } +} + // WithInitContainer applies the modifications to the init container with the provided name func WithInitContainer(name string, containerfunc func(*corev1.Container)) Modification { return func(podTemplateSpec *corev1.PodTemplateSpec) { diff --git a/pipeline.py b/pipeline.py index ee48ed919..2aac7fe40 100755 --- a/pipeline.py +++ b/pipeline.py @@ -14,6 +14,7 @@ import sys import tarfile import time +import traceback from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from dataclasses import dataclass from datetime import datetime, timedelta, timezone @@ -110,6 +111,7 @@ class BuildConfiguration: architecture: Optional[List[str]] = None sign: bool = False all_agents: bool = False + agent_to_build: str = "" pipeline: bool = True debug: bool = True @@ -163,6 +165,7 @@ def operator_build_configuration( sign: bool = False, all_agents: bool = False, parallel_factor: int = 0, + agent_to_build: str = "", ) -> BuildConfiguration: bc = BuildConfiguration( image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), @@ -177,6 +180,7 @@ def operator_build_configuration( architecture=architecture, sign=sign, parallel_factor=parallel_factor, + agent_to_build=agent_to_build, ) logger.info(f"is_running_in_patch: {is_running_in_patch()}") @@ -306,10 +310,14 @@ def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> This method calls docker directly on the command line, this is different from the rest of the code which uses Sonar as an interface to docker. We decided to keep this asymmetry for now, as Sonar will be removed soon. """ + docker_cmd = shutil.which("docker") + if docker_cmd is None: + raise Exception("Docker executable not found in PATH") + final_manifest = image + ":" + tag args = [ - "docker", + docker_cmd, "manifest", "create", final_manifest, @@ -325,7 +333,7 @@ def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> if cp.returncode != 0: raise Exception(cp.stderr) - args = ["docker", "manifest", "push", final_manifest] + args = [docker_cmd, "manifest", "push", final_manifest] args_str = " ".join(args) logger.info(f"pushing new manifest: {args_str}") cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) @@ -523,15 +531,12 @@ def build_operator_image(build_configuration: BuildConfiguration): current_span.set_attribute("mck.image_name", image_name) current_span.set_attribute("mck.architecture", architectures) - ecr_registry = os.environ.get("BASE_REPO_URL", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") - base_repo = QUAY_REGISTRY_URL if build_configuration.is_release_step_executed() else ecr_registry - build_image_generic( config=build_configuration, image_name=image_name, inventory_file="inventory.yaml", - registry_address=f"{base_repo}/{image_name}", multi_arch_args_list=multi_arch_args_list, + with_image_base=False, is_multi_arch=True, ) @@ -672,7 +677,6 @@ def args_for_daily_image(image_name: str) -> Dict[str, str]: image_config("init-ops-manager", ubi_suffix=""), image_config("mongodb-kubernetes", name_prefix="", ubi_suffix=""), image_config("ops-manager", name_prefix="mongodb-enterprise-"), - image_config("mongodb-agent", name_prefix="", ubi_suffix="-ubi", base_suffix="-ubi"), image_config( image_name="mongodb-kubernetes-operator", name_prefix="", @@ -690,6 +694,13 @@ def args_for_daily_image(image_name: str) -> Dict[str, str]: ubi_suffix="", s3_bucket="enterprise-operator-dockerfiles", ), + image_config( + image_name="mongodb-agent", + name_prefix="", + s3_bucket="enterprise-operator-dockerfiles", + ubi_suffix="-ubi", + base_suffix="-ubi", + ), ] images = {k: v for k, v in image_configs} @@ -1003,7 +1014,7 @@ def build_om_image(build_configuration: BuildConfiguration): image_name="ops-manager", inventory_file="inventories/om.yaml", extra_args=args, - registry_address=f"{QUAY_REGISTRY_URL}/mongodb-enterprise-ops-manager", + registry_address_override=f"{QUAY_REGISTRY_URL}/mongodb-enterprise-ops-manager", ) @@ -1013,80 +1024,92 @@ def build_image_generic( image_name: str, inventory_file: str, extra_args: dict = None, - registry_address: str = None, + with_image_base: bool = True, is_multi_arch: bool = False, multi_arch_args_list: list = None, is_run_in_parallel: bool = False, + registry_address_override: str = "", ): """Build image generic builds context images and is used for triggering release. During releases it signs and verifies the context image. The release process uses the daily images build process. + The with_image_base parameter determines whether the image being built should include a base image prefix. + When set to True, the function prepends "mongodb-kubernetes-" to the image name """ + image_base = "" + if with_image_base: + image_base = "mongodb-kubernetes-" if not multi_arch_args_list: multi_arch_args_list = [extra_args or {}] + version = multi_arch_args_list[0].get("version", "") - version = multi_arch_args_list[0].get("version", "") # the version is the same in multi-arch for each item - registry = f"{QUAY_REGISTRY_URL}/mongodb-kubernetes-{image_name}" if not registry_address else registry_address - - for args in multi_arch_args_list: # in case we are building multiple architectures - args["quay_registry"] = registry - sonar_build_image(image_name, config, args, inventory_file, False) - if is_multi_arch: - # we only push the manifests of the context images here, - # since daily rebuilds will push the manifests for the proper images later - architectures = [v["architecture"] for v in multi_arch_args_list] - create_and_push_manifest(registry_address, f"{version}-context", architectures=architectures) - if not config.is_release_step_executed(): - # Normally daily rebuild would create and push the manifests for the non-context images. - # But since we don't run daily rebuilds on ecr image builds, we can do that step instead here. - # We only need to push manifests for multi-arch images. - create_and_push_manifest(registry_address, version, architectures=architectures) - latest_tag = "latest" - if not is_running_in_patch() and is_running_in_evg_pipeline(): - logger.info(f"Tagging and pushing {registry_address}:{version} as {latest_tag}") - try: - client = docker.from_env() - source_image = client.images.pull(f"{registry_address}:{version}") - source_image.tag(registry_address, latest_tag) - client.images.push(registry_address, tag=latest_tag) - span = trace.get_current_span() - span.set_attribute("mck.image.push_latest", f"{registry_address}:{latest_tag}") - logger.info(f"Successfully tagged and pushed {registry_address}:{latest_tag}") - except docker.errors.DockerException as e: - logger.error(f"Failed to tag/push {latest_tag} image: {e}") - raise - else: - logger.info( - f"Skipping tagging and pushing {registry_address}:{version} as {latest_tag} tag; is_running_in_patch={is_running_in_patch()}, is_running_in_evg_pipeline={is_running_in_evg_pipeline()}" - ) + if config.is_release_step_executed(): + registry = f"{QUAY_REGISTRY_URL}/{image_base}{image_name}" + else: + registry = f"{config.base_repository}/{image_base}{image_name}" - # Sign and verify the context image if on releases if required. - if config.sign and config.is_release_step_executed(): - sign_and_verify_context_image(registry, version) + if registry_address_override: + registry = registry_address_override - span = trace.get_current_span() - span.set_attribute("mck.image.image_name", image_name) - span.set_attribute("mck.image.version", version) - span.set_attribute("mck.image.is_release", config.is_release_step_executed()) - span.set_attribute("mck.image.is_multi_arch", is_multi_arch) + try: + for args in multi_arch_args_list: # in case we are building multiple architectures + args["quay_registry"] = registry + sonar_build_image(image_name, config, args, inventory_file, False) + if is_multi_arch: + # we only push the manifests of the context images here, + # since daily rebuilds will push the manifests for the proper images later + architectures = [v["architecture"] for v in multi_arch_args_list] + create_and_push_manifest(registry, f"{version}-context", architectures=architectures) + if not config.is_release_step_executed(): + # Normally daily rebuild would create and push the manifests for the non-context images. + # But since we don't run daily rebuilds on ecr image builds, we can do that step instead here. + # We only need to push manifests for multi-arch images. + create_and_push_manifest(registry, version, architectures=architectures) + latest_tag = "latest" + if not is_running_in_patch() and is_running_in_evg_pipeline(): + logger.info(f"Tagging and pushing {registry}:{version} as {latest_tag}") + try: + client = docker.from_env() + source_image = client.images.pull(f"{registry}:{version}") + source_image.tag(registry, latest_tag) + client.images.push(registry, tag=latest_tag) + span = trace.get_current_span() + span.set_attribute("mck.image.push_latest", f"{registry}:{latest_tag}") + logger.info(f"Successfully tagged and pushed {registry}:{latest_tag}") + except docker.errors.DockerException as e: + logger.error(f"Failed to tag/push {latest_tag} image: {e}") + raise + else: + logger.info( + f"Skipping tagging and pushing {registry}:{version} as {latest_tag} tag; is_running_in_patch={is_running_in_patch()}, is_running_in_evg_pipeline={is_running_in_evg_pipeline()}" + ) - # Release step. Release images via the daily image process. - if config.is_release_step_executed() and version and QUAY_REGISTRY_URL in registry: - logger.info( - f"finished building context images, releasing them now via daily builds process for" - f" image: {image_name} and version: {version}!" - ) - # Sleep for a random time between 0 and 5 seconds to distribute daily builds better, - # as we do a lot of things there that require network connections like: - # - Kondukto uploads, downloads - # - image verification and signings - # - manifest creations - # - docker image pushes - # - etc. - if is_run_in_parallel: - time.sleep(random.uniform(0, 5)) - build_image_daily(image_name, version, version)(config) + # Sign and verify the context image if on releases if required. + if config.sign and config.is_release_step_executed(): + sign_and_verify_context_image(registry, version) + + span = trace.get_current_span() + span.set_attribute("mck.image.image_name", image_name) + span.set_attribute("mck.image.version", version) + span.set_attribute("mck.image.is_release", config.is_release_step_executed()) + span.set_attribute("mck.image.is_multi_arch", is_multi_arch) + + if config.is_release_step_executed() and version and QUAY_REGISTRY_URL in registry: + logger.info( + f"finished building context images, releasing them now via daily builds process for" + f" image: {image_name} and version: {version}!" + ) + if is_run_in_parallel: + time.sleep(random.uniform(0, 5)) + build_image_daily(image_name, version, version)(config) + + except Exception as e: + logger.error(f"Error during build_image_generic for image {image_name}: {e}") + logger.error(f"Full traceback for build_image_generic error:") + for line in traceback.format_exception(type(e), e, e.__traceback__): + logger.error(line.rstrip()) + raise def sign_and_verify_context_image(registry, version): @@ -1141,15 +1164,12 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s } multi_arch_args_list.append(arch_args) - ecr_registry = os.environ.get("BASE_REPO_URL", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") - base_repo = QUAY_REGISTRY_URL if build_configuration.is_release_step_executed() else ecr_registry - build_image_generic( config=build_configuration, image_name=image_name, + with_image_base=False, multi_arch_args_list=multi_arch_args_list, inventory_file=inventory_file, - registry_address=f"{base_repo}/{image_name}", is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches ) @@ -1168,35 +1188,6 @@ def build_upgrade_hook_image(build_configuration: BuildConfiguration): build_community_image(build_configuration, "upgrade-hook") -def build_agent_in_sonar( - build_configuration: BuildConfiguration, - image_version, - init_database_image, - mongodb_tools_url_ubi, - mongodb_agent_url_ubi: str, - agent_version, -): - args = { - "version": image_version, - "mongodb_tools_url_ubi": mongodb_tools_url_ubi, - "mongodb_agent_url_ubi": mongodb_agent_url_ubi, - "init_database_image": init_database_image, - } - - agent_quay_registry = QUAY_REGISTRY_URL + f"/mongodb-agent-ubi" - args["quay_registry"] = agent_quay_registry - args["agent_version"] = agent_version - - build_image_generic( - config=build_configuration, - image_name="mongodb-agent", - inventory_file="inventories/agent.yaml", - extra_args=args, - registry_address=agent_quay_registry, - is_run_in_parallel=True, - ) - - def build_multi_arch_agent_in_sonar( build_configuration: BuildConfiguration, image_version, @@ -1234,9 +1225,6 @@ def build_multi_arch_agent_in_sonar( arch_arm["tools_distro"] = "rhel93-aarch64" arch_amd["tools_distro"] = "rhel93-x86_64" - ecr_registry = os.environ.get("REGISTRY", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") - ecr_agent_registry = ecr_registry + f"/mongodb-agent-ubi" - quay_agent_registry = QUAY_REGISTRY_URL + f"/mongodb-agent-ubi" joined_args = [args | arch_amd] # Only include arm64 if we shouldn't skip it @@ -1245,10 +1233,10 @@ def build_multi_arch_agent_in_sonar( build_image_generic( config=build_configuration, - image_name="mongodb-agent", - inventory_file="inventories/agent_non_matrix.yaml", + image_name="mongodb-agent-ubi", + inventory_file="inventories/agent.yaml", multi_arch_args_list=joined_args, - registry_address=quay_agent_registry if is_release else ecr_agent_registry, + with_image_base=False, is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches is_run_in_parallel=True, ) @@ -1260,19 +1248,18 @@ def build_agent_default_case(build_configuration: BuildConfiguration): See more information in the function: build_agent_on_agent_bump """ - release = get_release() + release_json = get_release() - operator_version = get_git_release_tag() is_release = build_configuration.is_release_step_executed() # We need to release [all agents x latest operator] on operator releases if is_release: - agent_versions_to_build = gather_all_supported_agent_versions(release) + agent_versions_to_build = gather_all_supported_agent_versions(release_json) # We only need [latest agents (for each OM major version and for CM) x patch ID] for patches else: - agent_versions_to_build = gather_latest_agent_versions(release) + agent_versions_to_build = gather_latest_agent_versions(release_json, build_configuration.agent_to_build) - logger.info(f"Building Agent versions: {agent_versions_to_build} for Operator versions: {operator_version}") + logger.info(f"Building Agent versions: {agent_versions_to_build}") tasks_queue = Queue() max_workers = 1 @@ -1294,9 +1281,7 @@ def build_agent_default_case(build_configuration: BuildConfiguration): agent_version[1], ) ) - _build_agent_operator( - agent_version, build_configuration, executor, operator_version, tasks_queue, is_release - ) + _add_to_agent_queue(agent_version, build_configuration, executor, tasks_queue) queue_exception_handling(tasks_queue) @@ -1310,24 +1295,18 @@ def build_agent_on_agent_bump(build_configuration: BuildConfiguration): - operator releases - OM/CM bumps via PCT - We don't require building a full matrix on e2e test runs and operator releases. - "Operator releases" and "e2e test runs" require only the latest operator x agents - - In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well. - This function takes care of that. + In OM/CM bumps, we release a new agent. """ - release = get_release() + release_json = get_release() is_release = build_configuration.is_release_step_executed() if build_configuration.all_agents: - # We need to release [all agents x latest operator] on operator releases to make e2e tests work - # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960 - agent_versions_to_build = gather_all_supported_agent_versions(release) + agent_versions_to_build = gather_all_supported_agent_versions(release_json) else: # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore. - agent_versions_to_build = gather_latest_agent_versions(release) + agent_versions_to_build = gather_latest_agent_versions(release_json, build_configuration.agent_to_build) - legacy_agent_versions_to_build = release["supportedImages"]["mongodb-agent"]["versions"] + legacy_agent_versions_to_build = release_json["supportedImages"]["mongodb-agent"]["versions"] tasks_queue = Queue() max_workers = 1 @@ -1364,11 +1343,8 @@ def build_agent_on_agent_bump(build_configuration: BuildConfiguration): agent_version[1], ) ) - for operator_version in get_supported_operator_versions(): - logger.info(f"Building Agent versions: {agent_version} for Operator versions: {operator_version}") - _build_agent_operator( - agent_version, build_configuration, executor, operator_version, tasks_queue, is_release - ) + logger.info(f"Building Agent versions: {agent_version}") + _add_to_agent_queue(agent_version, build_configuration, executor, tasks_queue) queue_exception_handling(tasks_queue) @@ -1389,7 +1365,12 @@ def queue_exception_handling(tasks_queue): exceptions_found = True exception_count += 1 exception_types.add(type(task.exception()).__name__) - logger.fatal(f"The following exception has been found when building: {task.exception()}") + + exception_info = task.exception() + logger.fatal(f"=== THREAD EXCEPTION DETAILS ===") + logger.fatal(f"Exception Type: {type(exception_info).__name__}") + logger.fatal(f"Exception Message: {str(exception_info)}") + logger.fatal(f"=== END THREAD EXCEPTION DETAILS ===") span.set_attribute("mck.agent.queue.exceptions_count", exception_count) span.set_attribute( @@ -1404,37 +1385,21 @@ def queue_exception_handling(tasks_queue): ) -def _build_agent_operator( +def _add_to_agent_queue( agent_version: Tuple[str, str], build_configuration: BuildConfiguration, executor: ProcessPoolExecutor, - operator_version: str, tasks_queue: Queue, - use_quay: bool = False, ): - agent_distro = "rhel9_x86_64" tools_version = agent_version[1] - tools_distro = get_tools_distro(tools_version)["amd"] - image_version = f"{agent_version[0]}_{operator_version}" - mongodb_tools_url_ubi = ( - f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" - ) - mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" - # We use Quay if not in a patch - # We could rely on input params (quay_registry or registry), but it makes templating more complex in the inventory - non_quay_registry = os.environ.get("REGISTRY", "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev") - base_init_database_repo = QUAY_REGISTRY_URL if use_quay else non_quay_registry - init_database_image = f"{base_init_database_repo}/mongodb-kubernetes-init-database:{operator_version}" + image_version = f"{agent_version[0]}" tasks_queue.put( executor.submit( - build_agent_in_sonar, + build_multi_arch_agent_in_sonar, build_configuration, image_version, - init_database_image, - mongodb_tools_url_ubi, - mongodb_agent_url_ubi, - agent_version[0], + tools_version, ) ) @@ -1455,7 +1420,7 @@ def gather_all_supported_agent_versions(release: Dict) -> List[Tuple[str, str]]: return sorted(list(set(agent_versions_to_build))) -def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: +def gather_latest_agent_versions(release: Dict, agent_to_build: str = "") -> List[Tuple[str, str]]: """ This function is used when we release a new agent via OM bump. That means we will need to release that agent with all supported operators. @@ -1497,6 +1462,11 @@ def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: # https://jira.mongodb.org/browse/CLOUDP-297377 agent_versions_to_build.append(("107.0.12.8669-1", "100.10.0")) + if agent_to_build != "": + for agent_tuple in agent_versions_to_build: + if agent_tuple[0] == agent_to_build: + return [agent_tuple] + return sorted(list(set(agent_versions_to_build))) @@ -1585,11 +1555,12 @@ def build_all_images( architecture: Optional[List[str]] = None, sign: bool = False, all_agents: bool = False, + agent_to_build: str = "", parallel_factor: int = 0, ): """Builds all the images in the `images` list.""" build_configuration = operator_build_configuration( - builder, parallel, debug, architecture, sign, all_agents, parallel_factor + builder, parallel, debug, architecture, sign, all_agents, parallel_factor, agent_to_build ) if sign: mongodb_artifactory_login() @@ -1653,6 +1624,11 @@ def main(): help="optional parameter to be able to push " "all non operator suffixed agents, even if we are not in a release", ) + parser.add_argument( + "--build-one-agent", + default="", + help="optional parameter to push one agent", + ) args = parser.parse_args() if args.list_images: @@ -1678,6 +1654,7 @@ def main(): architecture=args.arch, sign=args.sign, all_agents=args.all_agents, + agent_to_build=args.build_one_agent, parallel_factor=args.parallel_factor, ) diff --git a/pipeline_test.py b/pipeline_test.py index 68b7e3a8e..dab707faa 100644 --- a/pipeline_test.py +++ b/pipeline_test.py @@ -257,8 +257,9 @@ def test_all_retries_fail(self, mock_sleep, mock_run): self.assertEqual(mock_sleep.call_count, 2) +@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") @patch("subprocess.run") -def test_create_and_push_manifest_success(mock_run): +def test_create_and_push_manifest_success(mock_run, mock_which): """Test successful creation and pushing of manifest with multiple architectures.""" # Setup mock to return success for both calls mock_run.return_value = subprocess.CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b"") @@ -276,7 +277,7 @@ def test_create_and_push_manifest_success(mock_run): # Verify first call - create manifest create_call_args = mock_run.call_args_list[0][0][0] assert create_call_args == [ - "docker", + "/mock/path/to/docker", "manifest", "create", "test/image:1.0.0", @@ -288,11 +289,12 @@ def test_create_and_push_manifest_success(mock_run): # Verify second call - push manifest push_call_args = mock_run.call_args_list[1][0][0] - assert push_call_args == ["docker", "manifest", "push", f"{image}:{tag}"] + assert push_call_args == ["/mock/path/to/docker", "manifest", "push", f"{image}:{tag}"] +@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") @patch("subprocess.run") -def test_create_and_push_manifest_single_arch(mock_run): +def test_create_and_push_manifest_single_arch(mock_run, mock_which): """Test manifest creation with a single architecture.""" # Setup mock to return success for both calls mock_run.return_value = subprocess.CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b"") @@ -307,11 +309,15 @@ def test_create_and_push_manifest_single_arch(mock_run): # Verify first call - create manifest (should only include one architecture) create_call_args = mock_run.call_args_list[0][0][0] - assert " ".join(create_call_args) == f"docker manifest create {image}:{tag} --amend {image}:{tag}-amd64" + assert ( + " ".join(create_call_args) + == "/mock/path/to/docker manifest create test/image:1.0.0 --amend test/image:1.0.0-amd64" + ) +@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") @patch("subprocess.run") -def test_create_and_push_manifest_create_error(mock_run): +def test_create_and_push_manifest_create_error(mock_run, mock_which): """Test error handling when manifest creation fails.""" # Setup mock to return error for create call mock_run.return_value = subprocess.CompletedProcess( @@ -332,13 +338,14 @@ def test_create_and_push_manifest_create_error(mock_run): assert mock_run.call_count == 1 # Only the create call, not the push call +@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") @patch("subprocess.run") -def test_create_and_push_manifest_push_error(mock_run): +def test_create_and_push_manifest_push_error(mock_run, mock_which): """Test error handling when manifest push fails.""" # Setup mock to return success for create but error for push mock_run.side_effect = [ subprocess.CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b""), # create success - subprocess.CompletedProcess(args=[], returncode=1, stdout=b"", stderr=b"Error pushing manifest"), # push error + subprocess.CompletedProcess(args=[], returncode=1, stdout=b"", stderr=b"Push failed"), # push fails ] # Call function with test parameters @@ -352,5 +359,6 @@ def test_create_and_push_manifest_push_error(mock_run): with pytest.raises(Exception) as exc_info: create_and_push_manifest(image, tag, architectures) - assert "Error pushing manifest" in str(exc_info.value) + # The function raises the stderr directly, so we should check for the exact error message + assert "Push failed" in str(exc_info.value) assert mock_run.call_count == 2 # Both create and push calls diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 4cc19cdc6..c649eda4e 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -89,6 +89,7 @@ const ( BackupDaemonContainerName = "mongodb-backup-daemon" DatabaseContainerName = "mongodb-enterprise-database" AgentContainerName = "mongodb-agent" + AgentContainerUtilitiesName = "mongodb-agent-operator-utilities" InitOpsManagerContainerName = "mongodb-kubernetes-init-ops-manager" PvcNameData = "data" PvcMountPathData = "/data" diff --git a/scripts/dev/print_operator_env.sh b/scripts/dev/print_operator_env.sh index 1b02179f4..2c7bd9a2f 100755 --- a/scripts/dev/print_operator_env.sh +++ b/scripts/dev/print_operator_env.sh @@ -100,6 +100,10 @@ OPERATOR_NAME=\"${OPERATOR_NAME}\" if [[ "${MDB_MAX_CONCURRENT_RECONCILES:-""}" != "" ]]; then echo "MDB_MAX_CONCURRENT_RECONCILES=${MDB_MAX_CONCURRENT_RECONCILES}" fi + + if [[ "${OPERATOR_NAME:-""}" != "" ]]; then + echo "OPERATOR_NAME=${OPERATOR_NAME}" + fi } print_operator_env