Skip to content

Commit 74a67b6

Browse files
magnusmengwikkyk
authored andcommitted
schedule vms evenly across nodes when memory allows
1 parent ebec335 commit 74a67b6

File tree

2 files changed

+68
-2
lines changed

2 files changed

+68
-2
lines changed

internal/service/scheduler/vmscheduler.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,9 +109,12 @@ func selectNode(
109109
sort.Sort(byReplicas)
110110

111111
decision := byMemory[0].Name
112-
if requestedMemory < byReplicas[0].AvailableMemory {
112+
for _, info := range byReplicas {
113113
// distribute round-robin when memory allows it
114-
decision = byReplicas[0].Name
114+
if requestedMemory < info.AvailableMemory {
115+
decision = info.Name
116+
break
117+
}
115118
}
116119

117120
if logger := logr.FromContextOrDiscard(ctx); logger.V(4).Enabled() {

internal/service/scheduler/vmscheduler_test.go

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,69 @@ func TestSelectNode(t *testing.T) {
107107
})
108108
}
109109

110+
func TestSelectNodeEvenlySpread(t *testing.T) {
111+
// Verify that VMs are scheduled evenly across nodes when memory allows
112+
allowedNodes := []string{"pve1", "pve2", "pve3"}
113+
var locations []infrav1.NodeLocation
114+
const requestMiB = 8
115+
availableMem := map[string]uint64{
116+
"pve1": miBytes(25), // enough for 3 VMs
117+
"pve2": miBytes(35), // enough for 4 VMs
118+
"pve3": miBytes(15), // enough for 1 VM
119+
}
120+
121+
expectedNodes := []string{
122+
// initial round-robin: everyone has enough memory
123+
"pve2", "pve1", "pve3",
124+
// second round-robin: pve3 out of memory
125+
"pve2", "pve1", "pve2",
126+
// third round-robin: pve1 and pve2 has room for one more VM each
127+
"pve1", "pve2",
128+
}
129+
130+
for i, expectedNode := range expectedNodes {
131+
t.Run(fmt.Sprintf("round %d", i+1), func(t *testing.T) {
132+
proxmoxMachine := &infrav1.ProxmoxMachine{
133+
Spec: infrav1.ProxmoxMachineSpec{
134+
MemoryMiB: requestMiB,
135+
},
136+
}
137+
138+
client := fakeResourceClient(availableMem)
139+
140+
node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{})
141+
require.NoError(t, err)
142+
require.Equal(t, expectedNode, node)
143+
144+
require.Greater(t, availableMem[node], miBytes(requestMiB))
145+
availableMem[node] -= miBytes(requestMiB)
146+
147+
locations = append(locations, infrav1.NodeLocation{Node: node})
148+
})
149+
}
150+
151+
t.Run("out of memory", func(t *testing.T) {
152+
proxmoxMachine := &infrav1.ProxmoxMachine{
153+
Spec: infrav1.ProxmoxMachineSpec{
154+
MemoryMiB: requestMiB,
155+
},
156+
}
157+
158+
client := fakeResourceClient(availableMem)
159+
160+
node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{})
161+
require.ErrorAs(t, err, &InsufficientMemoryError{})
162+
require.Empty(t, node)
163+
164+
expectMem := map[string]uint64{
165+
"pve1": miBytes(1), // 25 - 8 x 3
166+
"pve2": miBytes(3), // 35 - 8 x 4
167+
"pve3": miBytes(7), // 15 - 8 x 1
168+
}
169+
require.Equal(t, expectMem, availableMem)
170+
})
171+
}
172+
110173
func TestScheduleVM(t *testing.T) {
111174
ctrlClient := setupClient()
112175
require.NotNil(t, ctrlClient)

0 commit comments

Comments
 (0)