-
Notifications
You must be signed in to change notification settings - Fork 3
Security context #88
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Security context #88
Changes from 10 commits
292353f
a149991
82cfc2a
afb7aff
03305a2
b8f098e
93d3fae
63e1183
1e686eb
6ede073
a7fee6f
04f8519
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -677,6 +677,7 @@ func generateContainer( | |
| volumeMounts []v1.VolumeMount, | ||
| privilegedMode bool, | ||
| privilegeEscalationMode *bool, | ||
| readOnlyRootFilesystem *bool, | ||
| additionalPodCapabilities *v1.Capabilities, | ||
| ) *v1.Container { | ||
| return &v1.Container{ | ||
|
|
@@ -703,7 +704,7 @@ func generateContainer( | |
| SecurityContext: &v1.SecurityContext{ | ||
| AllowPrivilegeEscalation: privilegeEscalationMode, | ||
| Privileged: &privilegedMode, | ||
| ReadOnlyRootFilesystem: util.False(), | ||
| ReadOnlyRootFilesystem: readOnlyRootFilesystem, | ||
| Capabilities: additionalPodCapabilities, | ||
| }, | ||
| } | ||
|
|
@@ -736,7 +737,7 @@ func (c *Cluster) generateSidecarContainers(sidecars []cpov1.Sidecar, | |
| } | ||
|
|
||
| // adds common fields to sidecars | ||
| func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry) []v1.Container { | ||
| func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry, privilegedMode bool, privilegeEscalationMode *bool, additionalPodCapabilities *v1.Capabilities) []v1.Container { | ||
| result := []v1.Container{} | ||
|
|
||
| for _, container := range in { | ||
|
|
@@ -777,6 +778,7 @@ func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, su | |
| }, | ||
| } | ||
| container.Env = appendEnvVars(env, container.Env...) | ||
|
|
||
| result = append(result, container) | ||
| } | ||
|
|
||
|
|
@@ -873,6 +875,15 @@ func (c *Cluster) generatePodTemplate( | |
| podSpec.PriorityClassName = priorityClassName | ||
| } | ||
|
|
||
| if c.Postgresql.Spec.Monitoring != nil { | ||
| addEmptyDirVolume(&podSpec, "exporter-tmp", "postgres-exporter", "/tmp") | ||
| } | ||
|
|
||
| if c.OpConfig.ReadOnlyRootFilesystem != nil && *c.OpConfig.ReadOnlyRootFilesystem { | ||
| addRunVolume(&podSpec, "postgres-run", "postgres", "/run") | ||
| addEmptyDirVolume(&podSpec, "postgres-tmp", "postgres", "/tmp") | ||
| } | ||
|
|
||
| if sharePgSocketWithSidecars != nil && *sharePgSocketWithSidecars { | ||
| addVarRunVolume(&podSpec) | ||
| } | ||
|
|
@@ -988,6 +999,19 @@ func (c *Cluster) generateSpiloPodEnvVars( | |
| Name: "HUMAN_ROLE", | ||
| Value: c.OpConfig.PamRoleName, | ||
| }, | ||
| // NSS WRAPPER | ||
| { | ||
| Name: "LD_PRELOAD", | ||
| Value: "/usr/lib64/libnss_wrapper.so", | ||
| }, | ||
| { | ||
| Name: "NSS_WRAPPER_PASSWD", | ||
| Value: "/tmp/nss_wrapper/passwd", | ||
| }, | ||
| { | ||
| Name: "NSS_WRAPPER_GROUP", | ||
| Value: "/tmp/nss_wrapper/group", | ||
| }, | ||
| } | ||
|
|
||
| if c.OpConfig.EnableSpiloWalPathCompat { | ||
|
|
@@ -1243,6 +1267,8 @@ func getSidecarContainer(sidecar cpov1.Sidecar, index int, resources *v1.Resourc | |
| Resources: *resources, | ||
| Env: sidecar.Env, | ||
| Ports: sidecar.Ports, | ||
| SecurityContext: sidecar.SecurityContext, | ||
| VolumeMounts: sidecar.VolumeMounts, | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -1292,6 +1318,23 @@ func generateSpiloReadinessProbe() *v1.Probe { | |
| } | ||
| } | ||
|
|
||
| func generatePatroniLivenessProbe() *v1.Probe { | ||
| return &v1.Probe{ | ||
| FailureThreshold: 6, | ||
| ProbeHandler: v1.ProbeHandler{ | ||
| HTTPGet: &v1.HTTPGetAction{ | ||
| Path: "/health", | ||
| Port: intstr.IntOrString{IntVal: patroni.ApiPort}, | ||
| Scheme: v1.URISchemeHTTP, | ||
| }, | ||
| }, | ||
| InitialDelaySeconds: 30, | ||
| PeriodSeconds: 10, | ||
| TimeoutSeconds: 5, | ||
| SuccessThreshold: 1, | ||
| } | ||
| } | ||
|
|
||
| func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.StatefulSet, error) { | ||
|
|
||
| var ( | ||
|
|
@@ -1422,6 +1465,17 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu | |
| } | ||
| additionalVolumes = append(additionalVolumes, tlsVolumes...) | ||
| } | ||
| // if monitoring is enabled, add a empty volume | ||
| // if c.Postgresql.Spec.Monitoring != nil { | ||
| // additionalVolumes = append(additionalVolumes, cpov1.AdditionalVolume{ | ||
| // Name: "exporter-tmp", | ||
| // MountPath: "/tmp", | ||
| // VolumeSource: v1.VolumeSource{ | ||
| // EmptyDir: &v1.EmptyDirVolumeSource{}, | ||
| // }, | ||
| // TargetContainers: []string{"postgres-exporter"}, | ||
| // }) | ||
| // } | ||
|
||
| repo_host_mode := false | ||
| // Add this envVar so that it is not added to the pgbackrest initcontainer | ||
| if specHasPgbackrestPVCRepo(spec) { | ||
|
|
@@ -1444,13 +1498,18 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu | |
| volumeMounts, | ||
| c.OpConfig.Resources.SpiloPrivileged, | ||
| c.OpConfig.Resources.SpiloAllowPrivilegeEscalation, | ||
| c.OpConfig.Resources.ReadOnlyRootFilesystem, | ||
| generateCapabilities(c.OpConfig.AdditionalPodCapabilities), | ||
| ) | ||
|
|
||
| // Patroni responds 200 to probe only if it either owns the leader lock or postgres is running and DCS is accessible | ||
| if c.OpConfig.EnableReadinessProbe { | ||
| spiloContainer.ReadinessProbe = generateSpiloReadinessProbe() | ||
| } | ||
| // | ||
| if c.OpConfig.EnableLivenessProbe { | ||
| spiloContainer.LivenessProbe = generatePatroniLivenessProbe() | ||
| } | ||
|
|
||
| // generate container specs for sidecars specified in the cluster manifest | ||
| clusterSpecificSidecars := []v1.Container{} | ||
|
|
@@ -1506,7 +1565,7 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu | |
| containerName, containerName) | ||
| } | ||
|
|
||
| sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger) | ||
| sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger, c.OpConfig.Resources.SpiloPrivileged, c.OpConfig.Resources.SpiloAllowPrivilegeEscalation, generateCapabilities(c.OpConfig.AdditionalPodCapabilities)) | ||
|
|
||
| tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) | ||
| topologySpreadConstraintsSpec := topologySpreadConstraints(&spec.TopologySpreadConstraints) | ||
|
|
@@ -1515,7 +1574,7 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu | |
| podAnnotations := c.generatePodAnnotations(spec) | ||
|
|
||
| if spec.GetBackup().Pgbackrest != nil { | ||
| initContainers = append(initContainers, c.generatePgbackrestRestoreContainer(spec, repo_host_mode, volumeMounts, resourceRequirements)) | ||
| initContainers = append(initContainers, c.generatePgbackrestRestoreContainer(spec, repo_host_mode, volumeMounts, resourceRequirements, c.OpConfig.Resources.SpiloPrivileged, c.OpConfig.Resources.SpiloAllowPrivilegeEscalation, generateCapabilities(c.OpConfig.AdditionalPodCapabilities))) | ||
|
|
||
| additionalVolumes = append(additionalVolumes, c.generatePgbackrestConfigVolume(spec.Backup.Pgbackrest, false)) | ||
|
|
||
|
|
@@ -1618,7 +1677,7 @@ func (c *Cluster) generateStatefulSet(spec *cpov1.PostgresSpec) (*appsv1.Statefu | |
| return statefulSet, nil | ||
| } | ||
|
|
||
| func (c *Cluster) generatePgbackrestRestoreContainer(spec *cpov1.PostgresSpec, repo_host_mode bool, volumeMounts []v1.VolumeMount, resourceRequirements *v1.ResourceRequirements) v1.Container { | ||
| func (c *Cluster) generatePgbackrestRestoreContainer(spec *cpov1.PostgresSpec, repo_host_mode bool, volumeMounts []v1.VolumeMount, resourceRequirements *v1.ResourceRequirements, privilegedMode bool, privilegeEscalationMode *bool, additionalPodCapabilities *v1.Capabilities) v1.Container { | ||
| isOptional := true | ||
| pgbackrestRestoreEnvVars := []v1.EnvVar{ | ||
| { | ||
|
|
@@ -1698,6 +1757,12 @@ func (c *Cluster) generatePgbackrestRestoreContainer(spec *cpov1.PostgresSpec, r | |
| Env: pgbackrestRestoreEnvVars, | ||
| VolumeMounts: volumeMounts, | ||
| Resources: *resourceRequirements, | ||
| SecurityContext: &v1.SecurityContext{ | ||
| AllowPrivilegeEscalation: privilegeEscalationMode, | ||
| Privileged: &privilegedMode, | ||
| ReadOnlyRootFilesystem: util.True(), | ||
| Capabilities: additionalPodCapabilities, | ||
| }, | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -1756,6 +1821,7 @@ func (c *Cluster) generateRepoHostStatefulSet(spec *cpov1.PostgresSpec) (*appsv1 | |
| volumeMounts, | ||
| c.OpConfig.Resources.SpiloPrivileged, | ||
| c.OpConfig.Resources.SpiloAllowPrivilegeEscalation, | ||
| c.OpConfig.Resources.ReadOnlyRootFilesystem, | ||
| generateCapabilities(c.OpConfig.AdditionalPodCapabilities), | ||
| ) | ||
|
|
||
|
|
@@ -2159,6 +2225,48 @@ func addShmVolume(podSpec *v1.PodSpec) { | |
| podSpec.Volumes = volumes | ||
| } | ||
|
|
||
| func addEmptyDirVolume(podSpec *v1.PodSpec, volumeName string, containerName string, path string) { | ||
| vol := v1.Volume{ | ||
| Name: volumeName, | ||
| VolumeSource: v1.VolumeSource{ | ||
| EmptyDir: &v1.EmptyDirVolumeSource{}, | ||
| }, | ||
| } | ||
| podSpec.Volumes = append(podSpec.Volumes, vol) | ||
|
|
||
| mount := v1.VolumeMount{ | ||
| Name: vol.Name, | ||
| MountPath: path, | ||
| } | ||
|
|
||
| for i := range podSpec.Containers { | ||
| if podSpec.Containers[i].Name == containerName { | ||
| podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, mount) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| func addRunVolume(podSpec *v1.PodSpec, volumeName string, containerName string, path string) { | ||
| vol := v1.Volume{ | ||
| Name: volumeName, | ||
| VolumeSource: v1.VolumeSource{ | ||
| EmptyDir: &v1.EmptyDirVolumeSource{}, | ||
| }, | ||
| } | ||
| podSpec.Volumes = append(podSpec.Volumes, vol) | ||
|
|
||
| mount := v1.VolumeMount{ | ||
| Name: vol.Name, | ||
| MountPath: path, | ||
| } | ||
|
|
||
| for i := range podSpec.Containers { | ||
| if podSpec.Containers[i].Name == containerName { | ||
| podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, mount) | ||
| } | ||
| } | ||
| } | ||
|
|
||
| func addVarRunVolume(podSpec *v1.PodSpec) { | ||
| volumes := append(podSpec.Volumes, v1.Volume{ | ||
| Name: "postgresql-run", | ||
|
|
@@ -2726,6 +2834,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) { | |
| []v1.VolumeMount{}, | ||
| c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods | ||
| c.OpConfig.SpiloAllowPrivilegeEscalation, | ||
| util.False(), | ||
| nil, | ||
| ) | ||
|
|
||
|
|
@@ -3252,9 +3361,13 @@ func (c *Cluster) generatePgbackrestJob(backup *cpov1.Pgbackrest, repo *cpov1.Re | |
| []v1.VolumeMount{}, | ||
| c.OpConfig.SpiloPrivileged, // use same value as for normal DB pods | ||
| c.OpConfig.SpiloAllowPrivilegeEscalation, | ||
| c.OpConfig.Resources.ReadOnlyRootFilesystem, | ||
| nil, | ||
| ) | ||
|
|
||
| // Patch securityContext - readOnlyRootFilesystem | ||
| pgbackrestContainer.SecurityContext.ReadOnlyRootFilesystem = util.True() | ||
|
|
||
| podAffinityTerm := v1.PodAffinityTerm{ | ||
| LabelSelector: c.roleLabelsSelector(Master), | ||
| TopologyKey: "kubernetes.io/hostname", | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The correct endpoint is /liveness. This checks that Patroni is working, but ignores postgres. Checking /health will break during maintenance mode, upgrade and if starting postgres takes too long.
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
fixed with a7fee6f