Skip to content

Commit 5122023

Browse files
committed
MTV-3353 | Remove populator finalizers from PVCs during archive
This PR enhances the plan archive mechanism with a two-tier cleanup strategy and ensures PVC populator finalizers are properly removed. Archive Cleanup Strategy: - Tier 1: Delete all temporary resources by plan label (pods, secrets, configmaps, jobs, populator CRs) and remove populator finalizers from all PVCs - Tier 2: Loop over migration history - apply VM status logic for current migration (preserve successful VMs, delete failed if DeleteVmOnFailMigration), delete ALL resources unconditionally for previous migrations Labeler Refactoring: - Add VMLabelsWithExtra() method to centralized Labeler for provider-specific labels while ensuring core labels (plan, migration, vmID) are always present - Refactor oVirt, OpenStack, and vSphere builders to use centralized Labeler instead of manually creating label maps - Add plan label to OvirtVolumePopulator and OpenstackVolumePopulator utils Tests: - Add unit tests for plan and migration cleanup functions - Fix vSphere builder tests to properly initialize Labeler in Context Ref: https://issues.redhat.com/browse/MTV-3353 Resolves: MTV-3353 Signed-off-by: yaacov <yzamir@redhat.com>
1 parent 67e32f2 commit 5122023

File tree

13 files changed

+723
-173
lines changed

13 files changed

+723
-173
lines changed

pkg/controller/plan/adapter/openstack/builder.go

Lines changed: 7 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1051,11 +1051,9 @@ func (r *Builder) createVolumePopulatorCR(image model.Image, secretName, vmId st
10511051
ObjectMeta: meta.ObjectMeta{
10521052
GenerateName: fmt.Sprintf("%s-", image.Name),
10531053
Namespace: r.Plan.Spec.TargetNamespace,
1054-
Labels: map[string]string{
1055-
"vmID": vmId,
1056-
"migration": getMigrationID(r.Context),
1057-
"imageID": image.ID,
1058-
},
1054+
Labels: r.Labeler.VMLabelsWithExtra(ref.Ref{ID: vmId}, map[string]string{
1055+
"imageID": image.ID,
1056+
}),
10591057
},
10601058
Spec: api.OpenstackVolumePopulatorSpec{
10611059
IdentityURL: r.Source.Provider.Spec.URL,
@@ -1224,11 +1222,9 @@ func (r *Builder) persistentVolumeClaimWithSourceRef(image model.Image,
12241222
GenerateName: fmt.Sprintf("%s-", image.ID),
12251223
Namespace: r.Plan.Spec.TargetNamespace,
12261224
Annotations: annotations,
1227-
Labels: map[string]string{
1228-
"migration": getMigrationID(r.Context),
1229-
"imageID": image.ID,
1230-
"vmID": vmID,
1231-
},
1225+
Labels: r.Labeler.VMLabelsWithExtra(ref.Ref{ID: vmID}, map[string]string{
1226+
"imageID": image.ID,
1227+
}),
12321228
},
12331229
Spec: core.PersistentVolumeClaimSpec{
12341230
AccessModes: accessModes,
@@ -1316,7 +1312,7 @@ func (r *Builder) SetPopulatorDataSourceLabels(vmRef ref.Ref, pvcs []*core.Persi
13161312
images = append(images, image)
13171313
}
13181314
}
1319-
migrationID := string(r.Plan.Status.Migration.ActiveSnapshot().Migration.UID)
1315+
migrationID := r.ActiveMigrationUID()
13201316
for _, image := range images {
13211317
populatorCr, err := r.getVolumePopulatorCR(image.ID)
13221318
if err != nil {

pkg/controller/plan/adapter/openstack/destinationclient.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ func (r *DestinationClient) getPopulatorCrList() (populatorCrList v1beta1.Openst
6767
&populatorCrList,
6868
&client.ListOptions{
6969
Namespace: r.Plan.Spec.TargetNamespace,
70-
LabelSelector: labels.SelectorFromSet(map[string]string{"migration": string(r.Plan.Status.Migration.ActiveSnapshot().Migration.UID)}),
70+
LabelSelector: labels.SelectorFromSet(map[string]string{"migration": r.ActiveMigrationUID()}),
7171
})
7272
return
7373
}
@@ -103,7 +103,7 @@ func (r *DestinationClient) findPVCByCR(cr *v1beta1.OpenstackVolumePopulator) (p
103103
&client.ListOptions{
104104
Namespace: r.Plan.Spec.TargetNamespace,
105105
LabelSelector: labels.SelectorFromSet(map[string]string{
106-
"migration": string(r.Plan.Status.Migration.ActiveSnapshot().Migration.UID),
106+
"migration": r.ActiveMigrationUID(),
107107
"imageID": cr.Spec.ImageID,
108108
}),
109109
})

pkg/controller/plan/adapter/ovirt/builder.go

Lines changed: 11 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -666,12 +666,9 @@ func (r *Builder) LunPersistentVolumes(vmRef ref.Ref) (pvs []core.PersistentVolu
666666
planbase.AnnDiskSource: da.Disk.ID,
667667
"lun": "true",
668668
},
669-
Labels: map[string]string{
670-
"volume": fmt.Sprintf("%v-%v", vm.Name, da.ID),
671-
"vmID": vm.ID,
672-
"plan": string(r.Plan.UID),
673-
"migration": string(r.Migration.UID),
674-
},
669+
Labels: r.Labeler.VMLabelsWithExtra(vmRef, map[string]string{
670+
"volume": fmt.Sprintf("%v-%v", vm.Name, da.ID),
671+
}),
675672
},
676673
Spec: core.PersistentVolumeSpec{
677674
PersistentVolumeSource: pvSource,
@@ -710,11 +707,7 @@ func (r *Builder) LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.Persiste
710707
planbase.AnnDiskSource: da.Disk.ID,
711708
"lun": "true",
712709
},
713-
Labels: map[string]string{
714-
"vmID": vm.ID,
715-
"plan": string(r.Plan.UID),
716-
"migration": string(r.Migration.UID),
717-
},
710+
Labels: r.Labeler.VMLabels(vmRef),
718711
},
719712
Spec: core.PersistentVolumeClaimSpec{
720713
AccessModes: []core.PersistentVolumeAccessMode{
@@ -833,7 +826,6 @@ func (r *Builder) getVolumePopulator(diskID string) (populatorCr api.OvirtVolume
833826
}
834827

835828
func (r *Builder) createVolumePopulatorCR(diskAttachment model.XDiskAttachment, secretName, vmId string) (name string, err error) {
836-
migrationId := string(r.Migration.UID)
837829
providerURL, err := url.Parse(r.Source.Provider.Spec.URL)
838830
if err != nil {
839831
err = liberr.Wrap(err)
@@ -847,11 +839,9 @@ func (r *Builder) createVolumePopulatorCR(diskAttachment model.XDiskAttachment,
847839
ObjectMeta: meta.ObjectMeta{
848840
GenerateName: fmt.Sprintf("%s-", diskAttachment.DiskAttachment.ID),
849841
Namespace: r.Plan.Spec.TargetNamespace,
850-
Labels: map[string]string{
851-
"vmID": vmId,
852-
"migration": migrationId,
853-
"diskID": diskAttachment.Disk.ID,
854-
},
842+
Labels: r.Labeler.VMLabelsWithExtra(ref.Ref{ID: vmId}, map[string]string{
843+
"diskID": diskAttachment.Disk.ID,
844+
}),
855845
},
856846
Spec: api.OvirtVolumePopulatorSpec{
857847
EngineURL: engineURL.String(),
@@ -919,11 +909,9 @@ func (r *Builder) persistentVolumeClaimWithSourceRef(diskAttachment model.XDiskA
919909
GenerateName: fmt.Sprintf("%s-", diskAttachment.DiskAttachment.ID),
920910
Namespace: r.Plan.Spec.TargetNamespace,
921911
Annotations: annotations,
922-
Labels: map[string]string{
923-
"migration": string(r.Migration.UID),
924-
"vmID": vmID,
925-
"diskID": diskAttachment.Disk.ID,
926-
},
912+
Labels: r.Labeler.VMLabelsWithExtra(ref.Ref{ID: vmID}, map[string]string{
913+
"diskID": diskAttachment.Disk.ID,
914+
}),
927915
},
928916
Spec: core.PersistentVolumeClaimSpec{
929917
AccessModes: accessModes,
@@ -990,7 +978,7 @@ func (r *Builder) SetPopulatorDataSourceLabels(vmRef ref.Ref, pvcs []*core.Persi
990978
diskIds = append(diskIds, pvc.Spec.DataSource.Name)
991979
}
992980
}
993-
migrationID := string(r.Plan.Status.Migration.ActiveSnapshot().Migration.UID)
981+
migrationID := r.ActiveMigrationUID()
994982
for _, id := range diskIds {
995983
populatorCr, err := r.getVolumePopulator(id)
996984
if err != nil {

pkg/controller/plan/adapter/ovirt/destinationclient.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ func (r *DestinationClient) getPopulatorCrList() (populatorCrList v1beta1.OvirtV
6969
&populatorCrList,
7070
&client.ListOptions{
7171
Namespace: r.Plan.Spec.TargetNamespace,
72-
LabelSelector: labels.SelectorFromSet(map[string]string{"migration": string(r.Plan.Status.Migration.ActiveSnapshot().Migration.UID)}),
72+
LabelSelector: labels.SelectorFromSet(map[string]string{"migration": r.ActiveMigrationUID()}),
7373
})
7474
return
7575
}
@@ -104,7 +104,7 @@ func (r *DestinationClient) findPVCByCR(cr *v1beta1.OvirtVolumePopulator) (pvc *
104104
&client.ListOptions{
105105
Namespace: r.Plan.Spec.TargetNamespace,
106106
LabelSelector: labels.SelectorFromSet(map[string]string{
107-
"migration": string(r.Plan.Status.Migration.ActiveSnapshot().Migration.UID),
107+
"migration": r.ActiveMigrationUID(),
108108
"diskID": cr.Spec.DiskID,
109109
}),
110110
})

pkg/controller/plan/adapter/vsphere/builder.go

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1395,12 +1395,10 @@ func (r *Builder) PopulatorVolumes(vmRef ref.Ref, annotations map[string]string,
13951395
}
13961396

13971397
namespace := r.Plan.Spec.TargetNamespace
1398-
labels := map[string]string{
1399-
"migration": string(r.Migration.UID),
1400-
// we need uniqness and a value which is less than 64 chars, hence using vmRef.id + disk.key
1398+
// we need uniqueness and a value which is less than 64 chars, hence using vmRef.id + disk.key
1399+
labels := r.Labeler.VMLabelsWithExtra(vmRef, map[string]string{
14011400
"vmdkKey": fmt.Sprint(disk.Key),
1402-
"vmID": vmRef.ID,
1403-
}
1401+
})
14041402
// Only add the NAA label if it's a valid Kubernetes label value
14051403
if errs := k8svalidation.IsValidLabelValue(naa); len(errs) == 0 {
14061404
labels[TemplateNAALabel] = naa

pkg/controller/plan/adapter/vsphere/builder_test.go

Lines changed: 23 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -594,28 +594,31 @@ func createBuilder(objs ...runtime.Object) *Builder {
594594
WithScheme(scheme).
595595
WithRuntimeObjects(objs...).
596596
Build()
597-
return &Builder{
598-
Context: &plancontext.Context{
599-
Destination: plancontext.Destination{
600-
Client: client,
601-
},
602-
Source: plancontext.Source{
603-
Provider: &v1beta1.Provider{
604-
ObjectMeta: meta.ObjectMeta{Name: "test-vsphere-provider", Namespace: "test"},
605-
Spec: v1beta1.ProviderSpec{
606-
Type: (*v1beta1.ProviderType)(ptr.To("vsphere")),
607-
URL: "test-vsphere-provider",
608-
},
609-
},
610-
Inventory: nil,
611-
Secret: &core.Secret{
612-
ObjectMeta: meta.ObjectMeta{Name: "test-provider-secret", Namespace: "test"},
597+
ctx := &plancontext.Context{
598+
Destination: plancontext.Destination{
599+
Client: client,
600+
},
601+
Source: plancontext.Source{
602+
Provider: &v1beta1.Provider{
603+
ObjectMeta: meta.ObjectMeta{Name: "test-vsphere-provider", Namespace: "test"},
604+
Spec: v1beta1.ProviderSpec{
605+
Type: (*v1beta1.ProviderType)(ptr.To("vsphere")),
606+
URL: "test-vsphere-provider",
613607
},
614608
},
615-
Plan: createPlan(),
616-
Migration: &v1beta1.Migration{ObjectMeta: meta.ObjectMeta{UID: k8stypes.UID("123")}},
617-
Log: builderLog,
618-
Client: client,
609+
Inventory: nil,
610+
Secret: &core.Secret{
611+
ObjectMeta: meta.ObjectMeta{Name: "test-provider-secret", Namespace: "test"},
612+
},
619613
},
614+
Plan: createPlan(),
615+
Migration: &v1beta1.Migration{ObjectMeta: meta.ObjectMeta{UID: k8stypes.UID("123")}},
616+
Log: builderLog,
617+
Client: client,
618+
}
619+
// Initialize the Labeler (normally done in Context.build())
620+
ctx.Labeler = plancontext.Labeler{Context: ctx}
621+
return &Builder{
622+
Context: ctx,
620623
}
621624
}

pkg/controller/plan/context/labeler.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,3 +35,14 @@ func (r *Labeler) VMLabels(vmRef ref.Ref) map[string]string {
3535
labels[LabelVM] = vmRef.ID
3636
return labels
3737
}
38+
39+
// VMLabelsWithExtra returns the standard VM labels (plan, migration, vmID) plus any additional
40+
// provider-specific labels. This allows providers to add labels like diskID, imageID, vmdkKey, etc.
41+
// while ensuring the core labels (plan, migration, vmID) are always present.
42+
func (r *Labeler) VMLabelsWithExtra(vmRef ref.Ref, extraLabels map[string]string) map[string]string {
43+
labels := r.VMLabels(vmRef)
44+
for k, v := range extraLabels {
45+
labels[k] = v
46+
}
47+
return labels
48+
}

pkg/controller/plan/context/migration.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,17 @@ func (r *Context) build() (err error) {
100100
return
101101
}
102102

103+
// ActiveMigrationUID returns the UID of the active migration.
104+
// During execution the Migration is set on the context; during archive
105+
// it is intentionally empty so we fall back to the plan's history.
106+
func (r *Context) ActiveMigrationUID() string {
107+
if r.Migration != nil && r.Migration.UID != "" {
108+
return string(r.Migration.UID)
109+
}
110+
snapshot := r.Plan.Status.Migration.ActiveSnapshot()
111+
return string(snapshot.Migration.UID)
112+
}
113+
103114
// Set the migration.
104115
// This will update the logger context.
105116
func (r *Context) SetMigration(migration *api.Migration) {

0 commit comments

Comments
 (0)