Skip to content

Commit 74a5992

Browse files
authored
Add test for ScheduleVM (#423)
1 parent bed7ddf commit 74a5992

10 files changed

+208
-2
lines changed

api/v1alpha1/proxmoxmachine_types.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -113,6 +113,16 @@ type ProxmoxMachineSpec struct {
113113
// MetadataSettings defines the metadata settings for this machine's VM.
114114
// +optional
115115
MetadataSettings *MetadataSettings `json:"metadataSettings,omitempty"`
116+
117+
// AllowedNodes specifies all Proxmox nodes which will be considered
118+
// for operations. This implies that VMs can be cloned on different nodes from
119+
// the node which holds the VM template.
120+
//
121+
// This field is optional and should only be set if you want to restrict
122+
// the nodes where the VM can be cloned.
123+
// If not set, the ProxmoxCluster will be used to determine the nodes.
124+
// +optional
125+
AllowedNodes []string `json:"allowedNodes,omitempty"`
116126
}
117127

118128
// Storage is the physical storage on the node.

api/v1alpha1/zz_generated.deepcopy.go

Lines changed: 5 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,19 @@ spec:
7272
description: ProxmoxMachineSpec defines the desired state of
7373
a ProxmoxMachine.
7474
properties:
75+
allowedNodes:
76+
description: |-
77+
AllowedNodes specifies all Proxmox nodes which will be considered
78+
for operations. This implies that VMs can be cloned on different nodes from
79+
the node which holds the VM template.
80+
81+
82+
This field is optional and should only be set if you want to restrict
83+
the nodes where the VM can be cloned.
84+
If not set, the ProxmoxCluster will be used to determine the nodes.
85+
items:
86+
type: string
87+
type: array
7588
checks:
7689
description: Checks defines possibles checks to skip.
7790
properties:

config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclustertemplates.yaml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,19 @@ spec:
9494
description: ProxmoxMachineSpec defines the desired
9595
state of a ProxmoxMachine.
9696
properties:
97+
allowedNodes:
98+
description: |-
99+
AllowedNodes specifies all Proxmox nodes which will be considered
100+
for operations. This implies that VMs can be cloned on different nodes from
101+
the node which holds the VM template.
102+
103+
104+
This field is optional and should only be set if you want to restrict
105+
the nodes where the VM can be cloned.
106+
If not set, the ProxmoxCluster will be used to determine the nodes.
107+
items:
108+
type: string
109+
type: array
97110
checks:
98111
description: Checks defines possibles checks to
99112
skip.

config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachines.yaml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,19 @@ spec:
6565
spec:
6666
description: ProxmoxMachineSpec defines the desired state of a ProxmoxMachine.
6767
properties:
68+
allowedNodes:
69+
description: |-
70+
AllowedNodes specifies all Proxmox nodes which will be considered
71+
for operations. This implies that VMs can be cloned on different nodes from
72+
the node which holds the VM template.
73+
74+
75+
This field is optional and should only be set if you want to restrict
76+
the nodes where the VM can be cloned.
77+
If not set, the ProxmoxCluster will be used to determine the nodes.
78+
items:
79+
type: string
80+
type: array
6881
checks:
6982
description: Checks defines possibles checks to skip.
7083
properties:

config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxmachinetemplates.yaml

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,19 @@ spec:
7676
description: ProxmoxMachineSpec defines the desired state of a
7777
ProxmoxMachine.
7878
properties:
79+
allowedNodes:
80+
description: |-
81+
AllowedNodes specifies all Proxmox nodes which will be considered
82+
for operations. This implies that VMs can be cloned on different nodes from
83+
the node which holds the VM template.
84+
85+
86+
This field is optional and should only be set if you want to restrict
87+
the nodes where the VM can be cloned.
88+
If not set, the ProxmoxCluster will be used to determine the nodes.
89+
items:
90+
type: string
91+
type: array
7992
checks:
8093
description: Checks defines possibles checks to skip.
8194
properties:

internal/service/scheduler/vmscheduler.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,13 +47,19 @@ func (err InsufficientMemoryError) Error() string {
4747
// It requires the machine's ProxmoxCluster to have at least 1 allowed node.
4848
func ScheduleVM(ctx context.Context, machineScope *scope.MachineScope) (string, error) {
4949
client := machineScope.InfraCluster.ProxmoxClient
50+
// Use the default allowed nodes from the ProxmoxCluster.
5051
allowedNodes := machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes
5152
schedulerHints := machineScope.InfraCluster.ProxmoxCluster.Spec.SchedulerHints
5253
locations := machineScope.InfraCluster.ProxmoxCluster.Status.NodeLocations.Workers
5354
if util.IsControlPlaneMachine(machineScope.Machine) {
5455
locations = machineScope.InfraCluster.ProxmoxCluster.Status.NodeLocations.ControlPlane
5556
}
5657

58+
// If ProxmoxMachine defines allowedNodes use them instead
59+
if len(machineScope.ProxmoxMachine.Spec.AllowedNodes) > 0 {
60+
allowedNodes = machineScope.ProxmoxMachine.Spec.AllowedNodes
61+
}
62+
5763
return selectNode(ctx, client, machineScope.ProxmoxMachine, locations, allowedNodes, schedulerHints)
5864
}
5965

internal/service/scheduler/vmscheduler_test.go

Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,19 @@ import (
2222
"testing"
2323

2424
"github.com/stretchr/testify/require"
25+
corev1 "k8s.io/api/core/v1"
26+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/apimachinery/pkg/runtime"
28+
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
29+
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
30+
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
31+
"sigs.k8s.io/controller-runtime/pkg/client"
32+
"sigs.k8s.io/controller-runtime/pkg/client/fake"
2533

2634
infrav1 "github.com/ionos-cloud/cluster-api-provider-proxmox/api/v1alpha1"
35+
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/kubernetes/ipam"
36+
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/proxmox/proxmoxtest"
37+
"github.com/ionos-cloud/cluster-api-provider-proxmox/pkg/scope"
2738
)
2839

2940
type fakeResourceClient map[string]uint64
@@ -95,3 +106,101 @@ func TestSelectNode(t *testing.T) {
95106
require.Equal(t, expectMem, availableMem)
96107
})
97108
}
109+
110+
func TestScheduleVM(t *testing.T) {
111+
ctrlClient := setupClient()
112+
require.NotNil(t, ctrlClient)
113+
114+
ipamHelper := &ipam.Helper{}
115+
116+
proxmoxCluster := infrav1.ProxmoxCluster{
117+
ObjectMeta: metav1.ObjectMeta{
118+
Name: "bar",
119+
},
120+
Spec: infrav1.ProxmoxClusterSpec{
121+
AllowedNodes: []string{"pve1", "pve2", "pve3"},
122+
},
123+
Status: infrav1.ProxmoxClusterStatus{
124+
NodeLocations: &infrav1.NodeLocations{
125+
ControlPlane: []infrav1.NodeLocation{},
126+
Workers: []infrav1.NodeLocation{
127+
{
128+
Node: "pve1",
129+
Machine: corev1.LocalObjectReference{
130+
Name: "foo-machine",
131+
},
132+
},
133+
},
134+
},
135+
},
136+
}
137+
138+
err := ctrlClient.Create(context.Background(), &proxmoxCluster)
139+
require.NoError(t, err)
140+
141+
proxmoxMachine := &infrav1.ProxmoxMachine{
142+
ObjectMeta: metav1.ObjectMeta{
143+
Name: "foo-machine",
144+
Labels: map[string]string{
145+
"cluster.x-k8s.io/cluster-name": "bar",
146+
},
147+
},
148+
Spec: infrav1.ProxmoxMachineSpec{
149+
MemoryMiB: 10,
150+
},
151+
}
152+
153+
fakeProxmoxClient := proxmoxtest.NewMockClient(t)
154+
155+
cluster := &clusterv1.Cluster{
156+
ObjectMeta: metav1.ObjectMeta{
157+
Name: "bar",
158+
Namespace: "default",
159+
},
160+
}
161+
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
162+
Client: ctrlClient,
163+
Machine: &clusterv1.Machine{
164+
ObjectMeta: metav1.ObjectMeta{
165+
Name: "foo-machine",
166+
Namespace: "default",
167+
},
168+
},
169+
Cluster: cluster,
170+
InfraCluster: &scope.ClusterScope{
171+
Cluster: cluster,
172+
ProxmoxCluster: &proxmoxCluster,
173+
ProxmoxClient: fakeProxmoxClient,
174+
},
175+
ProxmoxMachine: proxmoxMachine,
176+
IPAMHelper: ipamHelper,
177+
})
178+
require.NoError(t, err)
179+
180+
fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve1", uint64(100)).Return(miBytes(60), nil)
181+
fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve2", uint64(100)).Return(miBytes(20), nil)
182+
fakeProxmoxClient.EXPECT().GetReservableMemoryBytes(context.Background(), "pve3", uint64(100)).Return(miBytes(20), nil)
183+
184+
node, err := ScheduleVM(context.Background(), machineScope)
185+
require.NoError(t, err)
186+
require.Equal(t, "pve2", node)
187+
}
188+
189+
func TestInsufficientMemoryError_Error(t *testing.T) {
190+
err := InsufficientMemoryError{
191+
node: "pve1",
192+
available: 10,
193+
requested: 20,
194+
}
195+
require.Equal(t, "cannot reserve 20B of memory on node pve1: 10B available memory left", err.Error())
196+
}
197+
198+
func setupClient() client.Client {
199+
scheme := runtime.NewScheme()
200+
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
201+
utilruntime.Must(infrav1.AddToScheme(scheme))
202+
utilruntime.Must(infrav1.AddToScheme(scheme))
203+
204+
fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build()
205+
return fakeClient
206+
}

internal/service/vmservice/vm.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -368,9 +368,10 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe
368368
scope.InfraCluster.ProxmoxCluster.Status.NodeLocations = new(infrav1alpha1.NodeLocations)
369369
}
370370

371-
// if no target was specified but we have a set of nodes defined in the cluster spec, we want to evenly distribute
371+
// if no target was specified but we have a set of nodes defined in the spec, we want to evenly distribute
372372
// the nodes across the cluster.
373-
if scope.ProxmoxMachine.Spec.Target == nil && len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 {
373+
if scope.ProxmoxMachine.Spec.Target == nil &&
374+
(len(scope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes) > 0 || len(scope.ProxmoxMachine.Spec.AllowedNodes) > 0) {
374375
// select next node as a target
375376
var err error
376377
options.Target, err = selectNextNode(ctx, scope)

internal/service/vmservice/vm_test.go

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,29 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode(t *testing.T) {
241241
requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)
242242
}
243243

244+
func TestEnsureVirtualMachine_CreateVM_SelectNode_MachineAllowedNodes(t *testing.T) {
245+
machineScope, proxmoxClient, _ := setupReconcilerTest(t)
246+
machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1", "node2", "node3", "node4"}
247+
machineScope.ProxmoxMachine.Spec.AllowedNodes = []string{"node1", "node2"}
248+
249+
selectNextNode = func(context.Context, *scope.MachineScope) (string, error) {
250+
return "node2", nil
251+
}
252+
t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM })
253+
254+
expectedOptions := proxmox.VMCloneRequest{Node: "node1", Name: "test", Target: "node2"}
255+
response := proxmox.VMCloneResponse{NewID: 123, Task: newTask()}
256+
proxmoxClient.EXPECT().CloneVM(context.Background(), 123, expectedOptions).Return(response, nil).Once()
257+
258+
requeue, err := ensureVirtualMachine(context.Background(), machineScope)
259+
require.NoError(t, err)
260+
require.True(t, requeue)
261+
262+
require.Equal(t, "node2", *machineScope.ProxmoxMachine.Status.ProxmoxNode)
263+
require.True(t, machineScope.InfraCluster.ProxmoxCluster.HasMachine(machineScope.Name(), false))
264+
requireConditionIsFalse(t, machineScope.ProxmoxMachine, infrav1alpha1.VMProvisionedCondition)
265+
}
266+
244267
func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing.T) {
245268
machineScope, _, _ := setupReconcilerTest(t)
246269
machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1"}

0 commit comments

Comments
 (0)