Skip to content

Commit 5375789

Browse files
Jonathan S. Katzjkatz
authored andcommitted
Allow tablespaces to be added to existing PostgreSQL clusters
This commit introduces the "pgo update cluster --tablespace" flag, which allows for users to add additional tablespaces to a PostgreSQL cluster. The Operator takes care of creating PVCs for the additional PVCs for the primary and all replicas in the PostgreSQL cluster, as well as updating the Deployment template for each instance. This does have the side effect of causing the PostgreSQL Pods to restart, as the new volumes need to be mounted. Issue: [ch7232]
1 parent 7e0c1e5 commit 5375789

File tree

12 files changed

+329
-82
lines changed

12 files changed

+329
-82
lines changed

apiserver/clusterservice/clusterimpl.go

Lines changed: 58 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -557,23 +557,10 @@ func CreateCluster(request *msgs.CreateClusterRequest, ns, pgouser string) msgs.
557557
// validate the storage type for each specified tablespace actually exists.
558558
// if a PVCSize is passed in, also validate that it follows the Kubernetes
559559
// format
560-
if len(request.Tablespaces) > 0 {
561-
for _, tablespace := range request.Tablespaces {
562-
if !apiserver.IsValidStorageName(tablespace.StorageConfig) {
563-
resp.Status.Code = msgs.Error
564-
resp.Status.Msg = fmt.Sprintf("%s storage config for tablespace %s was not found",
565-
tablespace.StorageConfig, tablespace.Name)
566-
return resp
567-
}
568-
569-
if tablespace.PVCSize != "" {
570-
if err := apiserver.ValidateQuantity(tablespace.PVCSize); err != nil {
571-
resp.Status.Code = msgs.Error
572-
resp.Status.Msg = fmt.Sprintf(apiserver.ErrMessagePVCSize, tablespace.PVCSize, err.Error())
573-
return resp
574-
}
575-
}
576-
}
560+
if err := validateTablespaces(request.Tablespaces); err != nil {
561+
resp.Status.Code = msgs.Error
562+
resp.Status.Msg = err.Error()
563+
return resp
577564
}
578565

579566
// validate the TLS parameters for enabling TLS in a PostgreSQL cluster
@@ -987,24 +974,20 @@ func getClusterParams(request *msgs.CreateClusterRequest, name string, userLabel
987974
spec.PrimaryStorage.Size = request.PVCSize
988975
}
989976

990-
// extract the parameters for th TablespacEMounts and put them in the format
977+
// extract the parameters for the TablespaceMounts and put them in the format
991978
// that is required by the pgcluster CRD
992-
if len(request.Tablespaces) > 0 {
993-
tablespaceMountsMap := map[string]crv1.PgStorageSpec{}
979+
spec.TablespaceMounts = map[string]crv1.PgStorageSpec{}
994980

995-
for _, tablespace := range request.Tablespaces {
996-
storageSpec, _ := apiserver.Pgo.GetStorageSpec(tablespace.StorageConfig)
997-
998-
// if a PVCSize is specified, override the value of the Size parameter in
999-
// storage spec
1000-
if tablespace.PVCSize != "" {
1001-
storageSpec.Size = tablespace.PVCSize
1002-
}
981+
for _, tablespace := range request.Tablespaces {
982+
storageSpec, _ := apiserver.Pgo.GetStorageSpec(tablespace.StorageConfig)
1003983

1004-
tablespaceMountsMap[tablespace.Name] = storageSpec
984+
// if a PVCSize is specified, override the value of the Size parameter in
985+
// storage spec
986+
if tablespace.PVCSize != "" {
987+
storageSpec.Size = tablespace.PVCSize
1005988
}
1006989

1007-
spec.TablespaceMounts = tablespaceMountsMap
990+
spec.TablespaceMounts[tablespace.Name] = storageSpec
1008991
}
1009992

1010993
spec.ReplicaStorage, _ = apiserver.Pgo.GetStorageSpec(apiserver.Pgo.ReplicaStorage)
@@ -1423,6 +1406,15 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons
14231406
return response
14241407
}
14251408

1409+
// validate the storage type for each specified tablespace actually exists.
1410+
// if a PVCSize is passed in, also validate that it follows the Kubernetes
1411+
// format
1412+
if err := validateTablespaces(request.Tablespaces); err != nil {
1413+
response.Status.Code = msgs.Error
1414+
response.Status.Msg = err.Error()
1415+
return response
1416+
}
1417+
14261418
clusterList := crv1.PgclusterList{}
14271419

14281420
//get the clusters list
@@ -1502,9 +1494,21 @@ func UpdateCluster(request *msgs.UpdateClusterRequest) msgs.UpdateClusterRespons
15021494
cluster.Spec.Shutdown = true
15031495
}
15041496

1505-
err = kubeapi.Updatepgcluster(apiserver.RESTClient,
1506-
&cluster, cluster.Spec.Name, request.Namespace)
1507-
if err != nil {
1497+
// extract the parameters for the TablespaceMounts and put them in the
1498+
// format that is required by the pgcluster CRD
1499+
for _, tablespace := range request.Tablespaces {
1500+
storageSpec, _ := apiserver.Pgo.GetStorageSpec(tablespace.StorageConfig)
1501+
1502+
// if a PVCSize is specified, override the value of the Size parameter in
1503+
// storage spec
1504+
if tablespace.PVCSize != "" {
1505+
storageSpec.Size = tablespace.PVCSize
1506+
}
1507+
1508+
cluster.Spec.TablespaceMounts[tablespace.Name] = storageSpec
1509+
}
1510+
1511+
if err := kubeapi.Updatepgcluster(apiserver.RESTClient, &cluster, cluster.Spec.Name, request.Namespace); err != nil {
15081512
response.Status.Code = msgs.Error
15091513
response.Status.Msg = err.Error()
15101514
return response
@@ -1622,6 +1626,27 @@ func validateClusterTLS(request *msgs.CreateClusterRequest) error {
16221626
return nil
16231627
}
16241628

1629+
// validateTablespaces validates the tablespace parameters. if there is an error
1630+
// it aborts and returns an error
1631+
func validateTablespaces(tablespaces []msgs.ClusterTablespaceDetail) error {
1632+
// iterate through the list of tablespaces and return any erors
1633+
for _, tablespace := range tablespaces {
1634+
if !apiserver.IsValidStorageName(tablespace.StorageConfig) {
1635+
return fmt.Errorf("%s storage config for tablespace %s was not found",
1636+
tablespace.StorageConfig, tablespace.Name)
1637+
}
1638+
1639+
if tablespace.PVCSize != "" {
1640+
if err := apiserver.ValidateQuantity(tablespace.PVCSize); err != nil {
1641+
return fmt.Errorf(apiserver.ErrMessagePVCSize,
1642+
tablespace.PVCSize, err.Error())
1643+
}
1644+
}
1645+
}
1646+
1647+
return nil
1648+
}
1649+
16251650
// determines if any of the required S3 configuration settings (bucket, endpoint
16261651
// and region) are missing from both the incoming request or the pgo.yaml config file
16271652
func isMissingS3Config(request *msgs.CreateClusterRequest) bool {

apiservermsgs/clustermsgs.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,7 @@ type UpdateClusterRequest struct {
273273
Standby UpdateClusterStandbyStatus
274274
Startup bool
275275
Shutdown bool
276+
Tablespaces []ClusterTablespaceDetail
276277
}
277278

278279
// UpdateClusterResponse ...

controller/pgcluster/pgclustercontroller.go

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ import (
1919
"context"
2020
"fmt"
2121
"io/ioutil"
22+
"reflect"
2223
"strconv"
2324
"strings"
2425
"sync"
@@ -212,6 +213,15 @@ func (c *Controller) onUpdate(oldObj, newObj interface{}) {
212213
return
213214
}
214215
}
216+
217+
// if we are not in a standby state, check to see if the tablespaces have
218+
// differed, and if so, add the additional volumes to the primary and replicas
219+
if !reflect.DeepEqual(oldcluster.Spec.TablespaceMounts, newcluster.Spec.TablespaceMounts) {
220+
if err := updateTablespaces(c, oldcluster, newcluster); err != nil {
221+
log.Error(err)
222+
return
223+
}
224+
}
215225
}
216226

217227
// onDelete is called when a pgcluster is deleted
@@ -314,3 +324,131 @@ func addIdentifier(clusterCopy *crv1.Pgcluster) {
314324

315325
clusterCopy.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = string(u[:len(u)-1])
316326
}
327+
328+
// updateTablespaces updates the PostgreSQL instance Deployments to reflect the
329+
// new PostgreSQL tablespaces that should be added
330+
func updateTablespaces(c *Controller, oldCluster *crv1.Pgcluster, newCluster *crv1.Pgcluster) error {
331+
// first, get a list of all of the available deployments so we can properly
332+
// mount the tablespace PVCs after we create them
333+
// NOTE: this will also get the pgBackRest deployments, but we will filter
334+
// these out later
335+
selector := fmt.Sprintf("%s=%s,%s=%s", config.LABEL_VENDOR, config.LABEL_CRUNCHY,
336+
config.LABEL_PG_CLUSTER, newCluster.Name)
337+
338+
deployments, err := kubeapi.GetDeployments(c.PgclusterClientset, selector, newCluster.Namespace)
339+
340+
if err != nil {
341+
return err
342+
}
343+
344+
// now get the instance names, which will make it easier to create all the
345+
// PVCs
346+
instanceNames := []string{}
347+
348+
for _, deployment := range deployments.Items {
349+
labels := deployment.ObjectMeta.GetLabels()
350+
351+
// get the name of the PostgreSQL instance. If the "deployment-name"
352+
// label is not present, then we know it's not a PostgreSQL cluster.
353+
// Otherwise, the "deployment-name" label doubles as the name of the
354+
// instance
355+
if instanceName, ok := labels[config.LABEL_DEPLOYMENT_NAME]; ok {
356+
log.Debugf("instance found [%s]", instanceName)
357+
358+
instanceNames = append(instanceNames, instanceName)
359+
}
360+
}
361+
362+
// iterate through the the tablespace mount map that is present in the new
363+
// cluster. Any entry that is not in the old cluster, create PVCs
364+
newTablespaces := map[string]crv1.PgStorageSpec{}
365+
366+
for tablespaceName, storageSpec := range newCluster.Spec.TablespaceMounts {
367+
// if the tablespace does not exist in the old version of the cluster,
368+
// then add it in!
369+
if _, ok := oldCluster.Spec.TablespaceMounts[tablespaceName]; !ok {
370+
log.Debugf("new tablespace found: [%s]", tablespaceName)
371+
372+
newTablespaces[tablespaceName] = storageSpec
373+
}
374+
}
375+
376+
// now we can start creating the new tablespaces! First, create the new
377+
// PVCs. The PVCs are created for each **instance** in the cluster, as every
378+
// instance needs to have a distinct PVC for each tablespace
379+
for tablespaceName, storageSpec := range newTablespaces {
380+
for _, instanceName := range instanceNames {
381+
// get the name of the tablespace PVC for that instance
382+
tablespacePVCName := operator.GetTablespacePVCName(instanceName, tablespaceName)
383+
384+
log.Debugf("creating tablespace PVC [%s] for [%s]", tablespacePVCName, instanceName)
385+
386+
// and now create it! If it errors, we just need to return, which
387+
// potentially leaves things in an inconsistent state, but at this point
388+
// only PVC objects have been created
389+
if err := clusteroperator.CreateTablespacePVC(c.PgclusterClientset, newCluster.Namespace, newCluster.Name,
390+
tablespacePVCName, &storageSpec); err != nil {
391+
return err
392+
}
393+
}
394+
}
395+
396+
// now the fun step: update each deployment with the new volumes
397+
for _, deployment := range deployments.Items {
398+
labels := deployment.ObjectMeta.GetLabels()
399+
400+
// same deal as before: if this is not a PostgreSQL instance, skip it
401+
instanceName, ok := labels[config.LABEL_DEPLOYMENT_NAME]
402+
if !ok {
403+
continue
404+
}
405+
406+
log.Debugf("attach tablespace volumes to [%s]", instanceName)
407+
408+
// iterate through each table space and prepare the Volume and
409+
// VolumeMount clause for each instance
410+
for tablespaceName, _ := range newTablespaces {
411+
// this is the volume to be added for the tablespace
412+
volume := v1.Volume{
413+
Name: operator.GetTablespaceVolumeName(tablespaceName),
414+
VolumeSource: v1.VolumeSource{
415+
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
416+
ClaimName: operator.GetTablespacePVCName(instanceName, tablespaceName),
417+
},
418+
},
419+
}
420+
421+
// add the volume to the list of volumes
422+
deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, volume)
423+
424+
// now add the volume mount point to that of the database container
425+
volumeMount := v1.VolumeMount{
426+
MountPath: fmt.Sprintf("%s%s", config.VOLUME_TABLESPACE_PATH_PREFIX, tablespaceName),
427+
Name: operator.GetTablespaceVolumeName(tablespaceName),
428+
}
429+
430+
// we can do this as we always know that the "database" contianer is the
431+
// first container in the list
432+
deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(
433+
deployment.Spec.Template.Spec.Containers[0].VolumeMounts, volumeMount)
434+
}
435+
436+
// find the "PGHA_TABLESPACES" value and update it with the new tablespace
437+
// name list
438+
for i, envVar := range deployment.Spec.Template.Spec.Containers[0].Env {
439+
// yup, it's an old fashioned linear time lookup
440+
if envVar.Name == "PGHA_TABLESPACES" {
441+
deployment.Spec.Template.Spec.Containers[0].Env[i].Value = operator.GetTablespaceNames(
442+
newCluster.Spec.TablespaceMounts)
443+
}
444+
}
445+
446+
// finally, update the Deployment. Potential to put things into an
447+
// inconsistent state if any of these updates fail
448+
if err := kubeapi.UpdateDeployment(c.PgclusterClientset, &deployment); err != nil {
449+
return err
450+
}
451+
}
452+
453+
return nil
454+
}

docs/content/architecture/tablespaces.md

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ a recovery
7373
- Deprovisioining: Tablespaces are deleted when a PostgreSQL instance or cluster
7474
is deleted
7575

76-
## Creating Tablespaces
76+
## Adding Tablespaces to a New Cluster
7777

7878
Tablespaces can be used in a cluster with the [`pgo create cluster`](/pgo-client/reference/pgo_create_cluster/)
7979
command. The command follows this general format:
@@ -107,5 +107,27 @@ CREATE TABLE sensor_data (
107107
TABLESPACE faststorage1;
108108
```
109109

110+
## Adding Tablespaces to Existing Clusters
111+
112+
You can also add a tablespace to an existing PostgreSQL cluster with the
113+
[`pgo update cluster`](/pgo-client/reference/pgo_update_cluster/) command.
114+
Adding a tablespace to a cluster uses a similar syntax to creating a cluster
115+
with tablespaces, for example:
116+
117+
```shell
118+
pgo update cluster hacluster \
119+
--tablespace=name=tablespace3:storageconfig=storageconfigname
120+
```
121+
122+
**NOTE**: This operation can cause downtime. In order to add a tablespace to a
123+
PostgreSQL cluster, persistent volume claims (PVCs) need to be created and
124+
mounted to each PostgreSQL instance in the cluster. The act of mounting a new
125+
PVC to a Kubernetes Deployment causes the Pods in the deployment to restart.
126+
127+
When the operation completes, the tablespace will be set up and accessible to
128+
use within the PostgreSQL cluster.
129+
130+
## More Information
131+
110132
For more information on how tablespaces work in PostgreSQL please refer to the
111133
[PostgreSQL manual](https://www.postgresql.org/docs/current/manage-ag-tablespaces.html).

docs/content/pgo-client/common-tasks.md

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -826,6 +826,38 @@ pgo scaledown hacluster --target=hacluster-abcd
826826
where `hacluster-abcd` is the name of the PostgreSQL replica that you want to
827827
destroy.
828828

829+
## Cluster Maintenance & Resource Management
830+
831+
There are several operations that you can perform to modify a PostgreSQL cluster
832+
over its lifetime.
833+
834+
#### Adding a Tablespace to a Cluster
835+
836+
Based on your workload or volume of data, you may wish to add a
837+
[tablespace](https://www.postgresql.org/docs/current/manage-ag-tablespaces.html) to
838+
your PostgreSQL cluster.
839+
840+
You can add a tablespace to an existing PostgreSQL cluster with the
841+
[`pgo update cluster`](/pgo-client/reference/pgo_update_cluster/) command.
842+
Adding a tablespace to a cluster uses a similar syntax to
843+
[creating a cluster with a tablespace](#create-a-postgresql-cluster-with-a-tablespace), for example:
844+
845+
```shell
846+
pgo update cluster hacluster \
847+
--tablespace=name=tablespace3:storageconfig=storageconfigname
848+
```
849+
850+
**NOTE**: This operation can cause downtime. In order to add a tablespace to a
851+
PostgreSQL cluster, persistent volume claims (PVCs) need to be created and
852+
mounted to each PostgreSQL instance in the cluster. The act of mounting a new
853+
PVC to a Kubernetes Deployment causes the Pods in the deployment to restart.
854+
855+
When the operation completes, the tablespace will be set up and accessible to
856+
use within the PostgreSQL cluster.
857+
858+
For more information on tablespaces, please visit the [tablespace](/architecture/tablespaces/)
859+
section of the documentation.
860+
829861
## Clone a PostgreSQL Cluster
830862

831863
You can create a copy of an existing PostgreSQL cluster in a new PostgreSQL

0 commit comments

Comments
 (0)