Skip to content

Commit 32390fa

Browse files
author
Ricardo Katz
committed
Storagepolicy should select ds based on connected cluster
1 parent de5ee99 commit 32390fa

File tree

1 file changed

+29
-32
lines changed

1 file changed

+29
-32
lines changed

pkg/services/govmomi/vcenter/clone.go

Lines changed: 29 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@ import (
2525
"github.com/vmware/govmomi/object"
2626
"github.com/vmware/govmomi/pbm"
2727
pbmTypes "github.com/vmware/govmomi/pbm/types"
28-
"github.com/vmware/govmomi/view"
2928
"github.com/vmware/govmomi/vim25/mo"
3029
"github.com/vmware/govmomi/vim25/types"
3130
"k8s.io/utils/pointer"
@@ -229,27 +228,31 @@ func Clone(ctx *context.VMContext, bootstrapData []byte, format bootstrapv1.Form
229228
return errors.Wrapf(err, "unable to get storageProfileID from name %s for %q", ctx.VSphereVM.Spec.StoragePolicyName, ctx)
230229
}
231230

232-
kind := []string{"Datastore"}
233-
m := view.NewManager(ctx.Session.Client.Client)
234-
235-
v, err := m.CreateContainerView(ctx, ctx.Session.Client.Client.ServiceContent.RootFolder, kind, true)
236-
if err != nil {
237-
return errors.Wrapf(err, "unable to create container view for Datastore for %q", ctx)
238-
}
239-
240-
var content []types.ObjectContent
241-
err = v.Retrieve(ctx, kind, []string{"name"}, &content)
242-
_ = v.Destroy(ctx)
243-
if err != nil {
244-
return errors.Wrapf(err, "unable to retrieve container view for Datastore for %q", ctx)
245-
}
246-
247231
var hubs []pbmTypes.PbmPlacementHub
248-
for _, ds := range content {
232+
233+
// If there's a Datastore configured, it should be the only one for which we check if it matches the requirements of the Storage Policy
234+
if datastoreRef != nil {
249235
hubs = append(hubs, pbmTypes.PbmPlacementHub{
250-
HubType: ds.Obj.Type,
251-
HubId: ds.Obj.Value,
236+
HubType: datastoreRef.Type,
237+
HubId: datastoreRef.Value,
252238
})
239+
} else {
240+
// Otherwise we should get just the Datastores connected to our pool
241+
cluster, err := pool.Owner(ctx)
242+
if err != nil {
243+
return errors.Wrapf(err, "failed to get owning cluster of resourcepool %q to calculate datastore based on storage policy", pool)
244+
}
245+
dsGetter := object.NewComputeResource(ctx.Session.Client.Client, cluster.Reference())
246+
datastores, err := dsGetter.Datastores(ctx)
247+
if err != nil {
248+
return errors.Wrapf(err, "unable to list datastores from owning cluster of requested resourcepool")
249+
}
250+
for _, ds := range datastores {
251+
hubs = append(hubs, pbmTypes.PbmPlacementHub{
252+
HubType: ds.Reference().Type,
253+
HubId: ds.Reference().Value,
254+
})
255+
}
253256
}
254257

255258
var constraints []pbmTypes.BasePbmPlacementRequirement
@@ -263,25 +266,18 @@ func Clone(ctx *context.VMContext, bootstrapData []byte, format bootstrapv1.Form
263266
return fmt.Errorf("no compatible datastores found for storage policy: %s", ctx.VSphereVM.Spec.StoragePolicyName)
264267
}
265268

266-
if datastoreRef != nil {
267-
ctx.Logger.Info("datastore and storagepolicy defined; searching for datastore in storage policy compatible datastores")
268-
found := false
269-
for _, ds := range result.CompatibleDatastores() {
270-
compatibleRef := types.ManagedObjectReference{Type: ds.HubType, Value: ds.HubId}
271-
if compatibleRef.String() == datastoreRef.String() {
272-
found = true
273-
}
274-
}
275-
if !found {
276-
return fmt.Errorf("couldn't find specified datastore: %s in compatible list of datastores for storage policy", ctx.VSphereVM.Spec.Datastore)
277-
}
278-
} else {
269+
// If datastoreRef is nil here it means that the user didn't specify a Datastore. So we should
270+
// select one of the datastores of the owning cluster of the resource pool that matched the
271+
// requirements of the storage policy.
272+
if datastoreRef == nil {
279273
r := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec
280274
ds := result.CompatibleDatastores()[r.Intn(len(result.CompatibleDatastores()))]
281275
datastoreRef = &types.ManagedObjectReference{Type: ds.HubType, Value: ds.HubId}
282276
}
283277
}
284278

279+
// if datastoreRef is nil here, means that user didn't specified a datastore NOR a
280+
// storagepolicy, so we should select the default
285281
if datastoreRef == nil {
286282
// if no datastore defined through VM spec or storage policy, use default
287283
datastore, err := ctx.Session.Finder.DefaultDatastore(ctx)
@@ -294,6 +290,7 @@ func Clone(ctx *context.VMContext, bootstrapData []byte, format bootstrapv1.Form
294290
disks := devices.SelectByType((*types.VirtualDisk)(nil))
295291
isLinkedClone := snapshotRef != nil
296292
spec.Location.Disk = getDiskLocators(disks, *datastoreRef, isLinkedClone)
293+
spec.Location.Datastore = datastoreRef
297294

298295
ctx.Logger.Info("cloning machine", "namespace", ctx.VSphereVM.Namespace, "name", ctx.VSphereVM.Name, "cloneType", ctx.VSphereVM.Status.CloneMode)
299296
task, err := tpl.Clone(ctx, folder, ctx.VSphereVM.Name, spec)

0 commit comments

Comments
 (0)