Skip to content

Commit 9a87283

Browse files
author
Jeff McCormick
committed
fix upgrade logic for storage changes
1 parent d446250 commit 9a87283

File tree

5 files changed

+22
-19
lines changed

5 files changed

+22
-19
lines changed

client/cmd/upgrade.go

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,6 @@ func createUpgrade(args []string) {
203203
fmt.Println("cluster " + arg + " uses emptydir storage and can not be upgraded")
204204
break
205205
}
206-
fmt.Println("after break")
207206

208207
// Create an instance of our TPR
209208
newInstance, err = getUpgradeParams(arg)

conf/postgres-operator/cluster/1/cluster-upgrade-job-1.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,9 @@
2525
"claimName": "{{.NEW_PVC_NAME}}"
2626
}
2727
}],
28-
"securityContext": {
29-
"supplementalGroups": [65534]
30-
},
28+
29+
{{.SECURITY_CONTEXT}}
30+
3131
"containers": [{
3232
"name": "upgrade",
3333
"image": "crunchydata/crunchy-upgrade:{{.CCP_IMAGE_TAG}}",

docs/config.asciidoc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,7 @@ the *.pgo.yaml* configuration file by specifying values within
163163
the REPLICA_STORAGE, MASTER_STORAGE, and BACKUP_STORAGE settings.
164164

165165
The following STORAGE_TYPE values are possible:
166+
166167
* *dynamic* - currently not implemented, this will allow for dynamic
167168
provisioning of storage using a StorageClass
168169
* *existing* - this setting allows you to use a PVC that already exists,

operator/cluster/cluster_strategy_1.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -238,9 +238,14 @@ func shutdownCluster(clientset *kubernetes.Clientset, client *rest.RESTClient, c
238238
//in the delete options to also delete the replica sets
239239

240240
//delete the deployments
241+
delOptions := meta_v1.DeleteOptions{}
242+
var delProp meta_v1.DeletionPropagation
243+
delProp = meta_v1.DeletePropagationForeground
244+
delOptions.PropagationPolicy = &delProp
245+
241246
for _, d := range deployments.Items {
242247
log.Debug("deleting deployment " + d.ObjectMeta.Name)
243-
err = clientset.ExtensionsV1beta1().Deployments(namespace).Delete(d.ObjectMeta.Name, &meta_v1.DeleteOptions{})
248+
err = clientset.ExtensionsV1beta1().Deployments(namespace).Delete(d.ObjectMeta.Name, &delOptions)
244249
if err != nil {
245250
log.Error("error deleting replica Deployment " + err.Error())
246251
}

operator/cluster/upgrade_strategy_1.go

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,7 @@ type JobTemplateFields struct {
4444
NEW_DATABASE_NAME string
4545
OLD_VERSION string
4646
NEW_VERSION string
47+
SECURITY_CONTEXT string
4748
}
4849

4950
const DB_UPGRADE_JOB_PATH = "/operator-conf/cluster-upgrade-job-1.json"
@@ -127,30 +128,20 @@ func (r ClusterStrategy1) MajorUpgrade(clientset *kubernetes.Clientset, tprclien
127128
}
128129

129130
//create the PVC if necessary
130-
if upgrade.Spec.NEW_PVC_NAME != upgrade.Spec.OLD_PVC_NAME {
131-
if pvc.Exists(clientset, upgrade.Spec.NEW_PVC_NAME, namespace) {
132-
log.Info("pvc " + upgrade.Spec.NEW_PVC_NAME + " already exists, will not create")
133-
} else {
134-
log.Info("creating pvc " + upgrade.Spec.NEW_PVC_NAME)
135-
err = pvc.Create(clientset, upgrade.Spec.NEW_PVC_NAME, upgrade.Spec.StorageSpec.PvcAccessMode, upgrade.Spec.StorageSpec.PvcSize, upgrade.Spec.StorageSpec.StorageType, upgrade.Spec.StorageSpec.StorageClass, namespace)
136-
if err != nil {
137-
log.Error("error in pvc create " + err.Error())
138-
return err
139-
}
140-
log.Info("created PVC =" + upgrade.Spec.NEW_PVC_NAME + " in namespace " + namespace)
141-
}
142-
}
131+
pvcName, err := pvc.CreatePVC(clientset, cl.Spec.Name+"-upgrade", &cl.Spec.MasterStorage, namespace)
132+
log.Debug("created pvc for upgrade as [" + pvcName + "]")
143133

144134
//upgrade the master data
145135
jobFields := JobTemplateFields{
146136
Name: upgrade.Spec.Name,
147-
NEW_PVC_NAME: upgrade.Spec.NEW_PVC_NAME,
137+
NEW_PVC_NAME: pvcName,
148138
OLD_PVC_NAME: upgrade.Spec.OLD_PVC_NAME,
149139
CCP_IMAGE_TAG: upgrade.Spec.CCP_IMAGE_TAG,
150140
OLD_DATABASE_NAME: upgrade.Spec.OLD_DATABASE_NAME,
151141
NEW_DATABASE_NAME: upgrade.Spec.NEW_DATABASE_NAME,
152142
OLD_VERSION: upgrade.Spec.OLD_VERSION,
153143
NEW_VERSION: upgrade.Spec.NEW_VERSION,
144+
SECURITY_CONTEXT: util.CreateSecContext(cl.Spec.MasterStorage.FSGROUP, cl.Spec.MasterStorage.SUPPLEMENTAL_GROUPS),
154145
}
155146

156147
var doc bytes.Buffer
@@ -176,6 +167,13 @@ func (r ClusterStrategy1) MajorUpgrade(clientset *kubernetes.Clientset, tprclien
176167
}
177168
log.Info("created Job " + resultJob.Name)
178169

170+
//patch the upgrade tpr with the new pvc name
171+
err = util.Patch(tprclient, "/spec/newpvcname", pvcName, tpr.UPGRADE_RESOURCE, upgrade.Spec.Name, namespace)
172+
if err != nil {
173+
log.Error(err)
174+
return err
175+
}
176+
179177
//the remainder of the major upgrade is done via the upgrade watcher
180178

181179
return err

0 commit comments

Comments
 (0)