diff --git a/api/v1alpha1/proxmoxcluster_types.go b/api/v1alpha1/proxmoxcluster_types.go index 7dbf9b2e..782b65b5 100644 --- a/api/v1alpha1/proxmoxcluster_types.go +++ b/api/v1alpha1/proxmoxcluster_types.go @@ -78,6 +78,15 @@ type SchedulerHints struct { // By default 100% of a node's memory will be used for allocation. // +optional MemoryAdjustment *uint64 `json:"memoryAdjustment,omitempty"` + + // Like MemoryAdjustment, but for CPU resources. + // Defaults to 0 (disabled), as CPU is a compressible resource. + // +optional + CPUAdjustment *uint64 `json:"cpuAdjustment,omitempty"` + + // +optional + // +kubebuilder:default=true + PreferLowerGuestCount bool `json:"preferLowerGuestCount,omitempty"` } // GetMemoryAdjustment returns the memory adjustment percentage to use within the scheduler. @@ -91,6 +100,17 @@ func (sh *SchedulerHints) GetMemoryAdjustment() uint64 { return memoryAdjustment } +// GetCPUAdjustment returns the cpu adjustment percentage to use within the scheduler. +func (sh *SchedulerHints) GetCPUAdjustment() uint64 { + cpuAdjustment := uint64(0) + + if sh != nil { + cpuAdjustment = ptr.Deref(sh.CPUAdjustment, 0) + } + + return cpuAdjustment +} + // ProxmoxClusterStatus defines the observed state of ProxmoxCluster. type ProxmoxClusterStatus struct { // Ready indicates that the cluster is ready. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 0471b26a..4d49e2af 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -596,6 +596,11 @@ func (in *SchedulerHints) DeepCopyInto(out *SchedulerHints) { *out = new(uint64) **out = **in } + if in.CPUAdjustment != nil { + in, out := &in.CPUAdjustment, &out.CPUAdjustment + *out = new(uint64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchedulerHints. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml index 45d09b39..9b2eabc6 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_proxmoxclusters.yaml @@ -137,6 +137,11 @@ spec: to a node's resources, to allow for overprovisioning or to ensure a node will always have a safety buffer. properties: + cpuAdjustment: + description: Like MemoryAdjustment, but for CPU resources. Defaults + to 0 (disabled), as CPU is a compressible resource. + format: int64 + type: integer memoryAdjustment: description: MemoryAdjustment allows to adjust a node's memory by a given percentage. For example, setting it to 300 allows @@ -146,6 +151,9 @@ spec: default 100% of a node's memory will be used for allocation. format: int64 type: integer + preferLowerGuestCount: + default: true + type: boolean type: object required: - dnsServers diff --git a/internal/service/scheduler/vmscheduler.go b/internal/service/scheduler/vmscheduler.go index ac6c97a2..114f2bd0 100644 --- a/internal/service/scheduler/vmscheduler.go +++ b/internal/service/scheduler/vmscheduler.go @@ -29,17 +29,16 @@ import ( "sigs.k8s.io/cluster-api/util" ) -// InsufficientMemoryError is used when the scheduler cannot assign a VM to a node because it would -// exceed the node's memory limit. -type InsufficientMemoryError struct { - node string - available uint64 - requested uint64 +// InsufficientResourcesError is used when the scheduler cannot assign a VM to a node because no node +// would be able to provide the requested resources. +type InsufficientResourcesError struct { + requestedMemory uint64 + requestedCores uint64 } -func (err InsufficientMemoryError) Error() string { - return fmt.Sprintf("cannot reserve %dB of memory on node %s: %dB available memory left", - err.requested, err.node, err.available) +func (err InsufficientResourcesError) Error() string { + return fmt.Sprintf("cannot reserve %dB of memory and/or %d vCores in cluster", + err.requestedMemory, err.requestedCores) } // ScheduleVM decides which node to a ProxmoxMachine should be scheduled on. @@ -64,46 +63,66 @@ func selectNode( allowedNodes []string, schedulerHints *infrav1.SchedulerHints, ) (string, error) { - byMemory := make(sortByAvailableMemory, len(allowedNodes)) - for i, nodeName := range allowedNodes { - mem, err := client.GetReservableMemoryBytes(ctx, nodeName, schedulerHints.GetMemoryAdjustment()) + var nodes []nodeInfo + + requestedMemory := uint64(machine.Spec.MemoryMiB) * 1024 * 1024 // convert to bytes + requestedCores := uint64(machine.Spec.NumCores) + + for _, nodeName := range allowedNodes { + mem, cpu, err := client.GetReservableResources( + ctx, + nodeName, + schedulerHints.GetMemoryAdjustment(), + schedulerHints.GetCPUAdjustment(), + ) if err != nil { return "", err } - byMemory[i] = nodeInfo{Name: nodeName, AvailableMemory: mem} - } - sort.Sort(byMemory) + // if MemoryAdjustment is explicitly set to 0 (zero), pretend we have enough mem for the guest + if schedulerHints.GetMemoryAdjustment() == 0 { + mem = requestedMemory + } + // if CPUAdjustment is explicitly set to 0 (zero), pretend we have enough cpu for the guest + if schedulerHints.GetCPUAdjustment() == 0 { + cpu = requestedCores + } - requestedMemory := uint64(machine.Spec.MemoryMiB) * 1024 * 1024 // convert to bytes - if requestedMemory > byMemory[0].AvailableMemory { - // no more space on the node with the highest amount of available memory - return "", InsufficientMemoryError{ - node: byMemory[0].Name, - available: byMemory[0].AvailableMemory, - requested: requestedMemory, + node := nodeInfo{Name: nodeName, AvailableMemory: mem, AvailableCPU: cpu} + if node.AvailableMemory >= requestedMemory && node.AvailableCPU >= requestedCores { + nodes = append(nodes, node) } } + if len(nodes) == 0 { + return "", InsufficientResourcesError{requestedMemory, requestedCores} + } + + // Sort nodes by free memory and then free CPU in descending order + byResources := make(sortByResources, len(nodes)) + copy(byResources, nodes) + sort.Sort(byResources) + + decision := byResources[0].Name + // count the existing vms per node nodeCounter := make(map[string]int) for _, nl := range locations { nodeCounter[nl.Node]++ } - for i, info := range byMemory { + for i, info := range byResources { info.ScheduledVMs = nodeCounter[info.Name] - byMemory[i] = info + byResources[i] = info } - byReplicas := make(sortByReplicas, len(byMemory)) - copy(byReplicas, byMemory) + byReplicas := make(sortByReplicas, len(byResources)) + copy(byReplicas, byResources) sort.Sort(byReplicas) - decision := byMemory[0].Name - if requestedMemory < byReplicas[0].AvailableMemory { - // distribute round-robin when memory allows it + // if memory allocation allows it, pick the node with the least amount of guests + if schedulerHints.PreferLowerGuestCount { decision = byReplicas[0].Name } @@ -111,9 +130,11 @@ func selectNode( // only construct values when message should actually be logged logger.Info("Scheduler decision", "byReplicas", byReplicas.String(), - "byMemory", byMemory.String(), + "byResources", byResources.String(), "requestedMemory", requestedMemory, + "requestedCores", requestedCores, "resultNode", decision, + "schedulerHints", schedulerHints, ) } @@ -121,12 +142,13 @@ func selectNode( } type resourceClient interface { - GetReservableMemoryBytes(context.Context, string, uint64) (uint64, error) + GetReservableResources(context.Context, string, uint64, uint64) (uint64, uint64, error) } type nodeInfo struct { Name string `json:"node"` AvailableMemory uint64 `json:"mem"` + AvailableCPU uint64 `json:"cpu"` ScheduledVMs int `json:"vms"` } @@ -143,16 +165,21 @@ func (a sortByReplicas) String() string { return string(o) } -type sortByAvailableMemory []nodeInfo +type sortByResources []nodeInfo + +func (a sortByResources) Len() int { return len(a) } +func (a sortByResources) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a sortByResources) Less(i, j int) bool { + // Compare by free memory and free CPU in descending order + if a[i].AvailableMemory != a[j].AvailableMemory { + return a[i].AvailableMemory > a[j].AvailableMemory + } -func (a sortByAvailableMemory) Len() int { return len(a) } -func (a sortByAvailableMemory) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a sortByAvailableMemory) Less(i, j int) bool { - // more available memory = lower index - return a[i].AvailableMemory > a[j].AvailableMemory + // If free memory is equal, sort by free CPU in descending order + return a[i].AvailableCPU > a[j].AvailableCPU || (a[i].AvailableCPU == a[j].AvailableCPU && a[i].ScheduledVMs < a[j].ScheduledVMs) } -func (a sortByAvailableMemory) String() string { +func (a sortByResources) String() string { o, _ := json.Marshal(a) return string(o) } diff --git a/internal/service/scheduler/vmscheduler_test.go b/internal/service/scheduler/vmscheduler_test.go index b0e18c44..b49f6a6e 100644 --- a/internal/service/scheduler/vmscheduler_test.go +++ b/internal/service/scheduler/vmscheduler_test.go @@ -25,10 +25,10 @@ import ( "github.com/stretchr/testify/require" ) -type fakeResourceClient map[string]uint64 +type fakeResourceClient map[string]nodeInfo -func (c fakeResourceClient) GetReservableMemoryBytes(_ context.Context, nodeName string, _ uint64) (uint64, error) { - return c[nodeName], nil +func (c fakeResourceClient) GetReservableResources(_ context.Context, nodeName string, _ uint64, _ uint64) (uint64, uint64, error) { + return c[nodeName].AvailableMemory, c[nodeName].AvailableCPU, nil } func miBytes(in uint64) uint64 { @@ -39,10 +39,18 @@ func TestSelectNode(t *testing.T) { allowedNodes := []string{"pve1", "pve2", "pve3"} var locations []infrav1.NodeLocation const requestMiB = 8 - availableMem := map[string]uint64{ - "pve1": miBytes(20), - "pve2": miBytes(30), - "pve3": miBytes(15), + const requestCores = 2 + cpuAdjustment := uint64(100) + + schedulerHints := &infrav1.SchedulerHints{ + // This defaults to true in our CRD + PreferLowerGuestCount: true, + CPUAdjustment: &cpuAdjustment, + } + availableResources := map[string]nodeInfo{ + "pve1": {AvailableMemory: miBytes(20), AvailableCPU: uint64(16)}, + "pve2": {AvailableMemory: miBytes(30), AvailableCPU: uint64(16)}, + "pve3": {AvailableMemory: miBytes(15), AvailableCPU: uint64(16)}, } expectedNodes := []string{ @@ -57,40 +65,47 @@ func TestSelectNode(t *testing.T) { proxmoxMachine := &infrav1.ProxmoxMachine{ Spec: infrav1.ProxmoxMachineSpec{ MemoryMiB: requestMiB, + NumCores: requestCores, }, } - client := fakeResourceClient(availableMem) + client := fakeResourceClient(availableResources) - node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}) + node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, schedulerHints) require.NoError(t, err) require.Equal(t, expectedNode, node) - require.Greater(t, availableMem[node], miBytes(requestMiB)) - availableMem[node] -= miBytes(requestMiB) + require.Greater(t, availableResources[node].AvailableMemory, miBytes(requestMiB)) + if entry, ok := availableResources[node]; ok { + entry.AvailableMemory -= miBytes(requestMiB) + entry.AvailableCPU -= requestCores + availableResources[node] = entry + } locations = append(locations, infrav1.NodeLocation{Node: node}) }) } - t.Run("out of memory", func(t *testing.T) { + t.Run("out of resources", func(t *testing.T) { proxmoxMachine := &infrav1.ProxmoxMachine{ Spec: infrav1.ProxmoxMachineSpec{ MemoryMiB: requestMiB, + NumCores: requestCores, }, } - client := fakeResourceClient(availableMem) + client := fakeResourceClient(availableResources) - node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, &infrav1.SchedulerHints{}) - require.ErrorAs(t, err, &InsufficientMemoryError{}) + node, err := selectNode(context.Background(), client, proxmoxMachine, locations, allowedNodes, schedulerHints) + require.ErrorAs(t, err, &InsufficientResourcesError{}) require.Empty(t, node) - expectMem := map[string]uint64{ - "pve1": miBytes(4), // 20 - 8 x 2 - "pve2": miBytes(6), // 30 - 8 x 3 - "pve3": miBytes(7), // 15 - 8 x 1 + expectResources := map[string]nodeInfo{ + "pve1": {AvailableMemory: miBytes(4), AvailableCPU: uint64(12)}, // 20 - 8 x 2 + "pve2": {AvailableMemory: miBytes(6), AvailableCPU: uint64(10)}, // 30 - 8 x 3 + "pve3": {AvailableMemory: miBytes(7), AvailableCPU: uint64(14)}, // 15 - 8 x 1 } - require.Equal(t, expectMem, availableMem) + + require.Equal(t, expectResources, availableResources) }) } diff --git a/internal/service/vmservice/vm.go b/internal/service/vmservice/vm.go index 7dd95908..75340fd3 100644 --- a/internal/service/vmservice/vm.go +++ b/internal/service/vmservice/vm.go @@ -314,7 +314,7 @@ func createVM(ctx context.Context, scope *scope.MachineScope) (proxmox.VMCloneRe var err error options.Target, err = selectNextNode(ctx, scope) if err != nil { - if errors.As(err, &scheduler.InsufficientMemoryError{}) { + if errors.As(err, &scheduler.InsufficientResourcesError{}) { scope.SetFailureMessage(err) scope.SetFailureReason(capierrors.InsufficientResourcesMachineError) } diff --git a/internal/service/vmservice/vm_test.go b/internal/service/vmservice/vm_test.go index 7b88d0f6..f9bc414c 100644 --- a/internal/service/vmservice/vm_test.go +++ b/internal/service/vmservice/vm_test.go @@ -105,7 +105,7 @@ func TestEnsureVirtualMachine_CreateVM_SelectNode_InsufficientMemory(t *testing. machineScope.InfraCluster.ProxmoxCluster.Spec.AllowedNodes = []string{"node1"} selectNextNode = func(context.Context, *scope.MachineScope) (string, error) { - return "", fmt.Errorf("error: %w", scheduler.InsufficientMemoryError{}) + return "", fmt.Errorf("error: %w", scheduler.InsufficientResourcesError{}) } t.Cleanup(func() { selectNextNode = scheduler.ScheduleVM }) diff --git a/pkg/proxmox/client.go b/pkg/proxmox/client.go index ee5e1aa4..0a6889cf 100644 --- a/pkg/proxmox/client.go +++ b/pkg/proxmox/client.go @@ -37,7 +37,7 @@ type Client interface { GetTask(ctx context.Context, upID string) (*proxmox.Task, error) - GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) + GetReservableResources(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64, nodeCPUAdjustment uint64) (uint64, uint64, error) ResizeDisk(ctx context.Context, vm *proxmox.VirtualMachine, disk, size string) error diff --git a/pkg/proxmox/goproxmox/api_client.go b/pkg/proxmox/goproxmox/api_client.go index c4f77d10..9dba2d6f 100644 --- a/pkg/proxmox/goproxmox/api_client.go +++ b/pkg/proxmox/goproxmox/api_client.go @@ -180,22 +180,28 @@ func (c *APIClient) GetTask(ctx context.Context, upID string) (*proxmox.Task, er return task, nil } -// GetReservableMemoryBytes returns the memory that can be reserved by a new VM, in bytes. -func (c *APIClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { +// GetReservableResources returns the memory that can be reserved by a new VM, in bytes. +func (c *APIClient) GetReservableResources(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64, nodeCPUAdjustment uint64) (uint64, uint64, error) { node, err := c.Client.Node(ctx, nodeName) if err != nil { - return 0, fmt.Errorf("cannot find node with name %s: %w", nodeName, err) + return 0, 0, fmt.Errorf("cannot find node with name %s: %w", nodeName, err) } reservableMemory := uint64(float64(node.Memory.Total) / 100 * float64(nodeMemoryAdjustment)) + reservableCpus := uint64(float64(node.CPUInfo.CPUs) / 100 * float64(nodeCPUAdjustment)) if nodeMemoryAdjustment == 0 { - return node.Memory.Total, nil + // We go by x1000 to actually take the node's resources out of the equation. + // Otherwise, simply returning for E.g. 32GB node memory, wouldn't be able to schedule a 64GB guest. + reservableMemory = node.Memory.Total + } + if nodeCPUAdjustment == 0 { + reservableCpus = uint64(node.CPUInfo.CPUs) } vms, err := node.VirtualMachines(ctx) if err != nil { - return 0, fmt.Errorf("cannot list vms for node %s: %w", nodeName, err) + return 0, 0, fmt.Errorf("cannot list vms for node %s: %w", nodeName, err) } for _, vm := range vms { @@ -203,27 +209,45 @@ func (c *APIClient) GetReservableMemoryBytes(ctx context.Context, nodeName strin if vm.Template { continue } - if reservableMemory < vm.MaxMem { - reservableMemory = 0 - } else { - reservableMemory -= vm.MaxMem + if nodeMemoryAdjustment > 0 { + if reservableMemory < vm.MaxMem { + reservableMemory = 0 + } else { + reservableMemory -= vm.MaxMem + } + } + if nodeCPUAdjustment > 0 { + if reservableCpus < uint64(vm.CPUs) { + reservableCpus = 0 + } else { + reservableCpus -= uint64(vm.CPUs) + } } } containers, err := node.Containers(ctx) if err != nil { - return 0, fmt.Errorf("cannot list containers for node %s: %w", nodeName, err) + return 0, 0, fmt.Errorf("cannot list containers for node %s: %w", nodeName, err) } for _, ct := range containers { - if reservableMemory < ct.MaxMem { - reservableMemory = 0 - } else { - reservableMemory -= ct.MaxMem + if nodeMemoryAdjustment > 0 { + if reservableMemory < ct.MaxMem { + reservableMemory = 0 + } else { + reservableMemory -= ct.MaxMem + } + } + if nodeCPUAdjustment > 0 { + if reservableCpus < uint64(ct.CPUs) { + reservableCpus = 0 + } else { + reservableCpus -= uint64(ct.CPUs) + } } } - return reservableMemory, nil + return reservableMemory, reservableCpus, nil } // ResizeDisk resizes a VM disk to the specified size. diff --git a/pkg/proxmox/goproxmox/api_client_test.go b/pkg/proxmox/goproxmox/api_client_test.go index 1babd08a..2f47af13 100644 --- a/pkg/proxmox/goproxmox/api_client_test.go +++ b/pkg/proxmox/goproxmox/api_client_test.go @@ -46,54 +46,80 @@ func newJSONResponder(status int, data any) httpmock.Responder { return httpmock.NewJsonResponderOrPanic(status, map[string]any{"data": data}).Once() } -func TestProxmoxAPIClient_GetReservableMemoryBytes(t *testing.T) { +func TestProxmoxAPIClient_GetReservableResources(t *testing.T) { + nodeMemory := uint64(30) + nodeCPUs := 16 tests := []struct { name string - maxMem uint64 // memory size of already provisioned guest - expect uint64 // expected available memory of the host + guestMaxMemory uint64 // memory size of already provisioned guest + guestCPUs uint64 + expectMemory uint64 // expected available memory of the host + expectCPUs uint64 nodeMemoryAdjustment uint64 // factor like 1.0 to multiply host memory with for overprovisioning + nodeCPUAdjustment uint64 }{ { name: "under zero - no overprovisioning", - maxMem: 29, - expect: 1, + guestMaxMemory: 29, + guestCPUs: 1, + expectMemory: nodeMemory - 29, + expectCPUs: uint64(nodeCPUs - 1), nodeMemoryAdjustment: 100, + nodeCPUAdjustment: 100, }, { name: "exact zero - no overprovisioning", - maxMem: 30, - expect: 0, + guestMaxMemory: 30, + guestCPUs: 1, + expectMemory: 0, + expectCPUs: uint64(nodeCPUs - 1), nodeMemoryAdjustment: 100, + nodeCPUAdjustment: 100, }, { name: "over zero - no overprovisioning", - maxMem: 31, - expect: 0, + guestMaxMemory: 31, + guestCPUs: 1, + expectMemory: 0, + expectCPUs: uint64(nodeCPUs - 1), nodeMemoryAdjustment: 100, + nodeCPUAdjustment: 100, }, { name: "under zero - overprovisioning", - maxMem: 58, - expect: 2, + guestMaxMemory: 58, + guestCPUs: 1, + expectMemory: 2, + expectCPUs: uint64(nodeCPUs*2 - 1), nodeMemoryAdjustment: 200, + nodeCPUAdjustment: 200, }, { name: "exact zero - overprovisioning", - maxMem: 30 * 2, - expect: 0, + guestMaxMemory: 30, + guestCPUs: 1, + expectMemory: nodeMemory*2 - 30, + expectCPUs: uint64(nodeCPUs*2 - 1), nodeMemoryAdjustment: 200, + nodeCPUAdjustment: 200, }, { name: "over zero - overprovisioning", - maxMem: 31 * 2, - expect: 0, + guestMaxMemory: 31, + guestCPUs: 1, + expectMemory: nodeMemory*2 - 31, + expectCPUs: uint64(nodeCPUs*2 - 1), nodeMemoryAdjustment: 200, + nodeCPUAdjustment: 200, }, { name: "scheduler disabled", - maxMem: 100, - expect: 30, + guestMaxMemory: 100, + guestCPUs: 1, + expectMemory: nodeMemory, + expectCPUs: uint64(nodeCPUs), nodeMemoryAdjustment: 0, + nodeCPUAdjustment: 0, }, } @@ -101,7 +127,15 @@ func TestProxmoxAPIClient_GetReservableMemoryBytes(t *testing.T) { t.Run(test.name, func(t *testing.T) { client := newTestClient(t) httpmock.RegisterResponder(http.MethodGet, `=~/nodes/test/status`, - newJSONResponder(200, proxmox.Node{Memory: proxmox.Memory{Total: 30}})) + newJSONResponder(200, + proxmox.Node{ + Memory: proxmox.Memory{ + Total: nodeMemory, + }, + CPUInfo: proxmox.CPUInfo{ + CPUs: nodeCPUs, + }, + })) httpmock.RegisterResponder(http.MethodGet, `=~/nodes/test/qemu`, // Somehow, setting proxmox.VirtualMachines{} ALWAYS has `Template: true` when defined this way. @@ -109,14 +143,14 @@ func TestProxmoxAPIClient_GetReservableMemoryBytes(t *testing.T) { newJSONResponder(200, []interface{}{ map[string]interface{}{ "name": "legit-worker", - "maxmem": test.maxMem, + "maxmem": test.guestMaxMemory, "vmid": 1111, "diskwrite": 0, "mem": 0, "uptime": 0, "disk": 0, "cpu": 0, - "cpus": 1, + "cpus": test.guestCPUs, "status": "stopped", "netout": 0, "maxdisk": 0, @@ -124,29 +158,30 @@ func TestProxmoxAPIClient_GetReservableMemoryBytes(t *testing.T) { "diskread": 0, }, map[string]interface{}{ - "name": "template", - "maxmem": 102400, - "vmid": 2222, - "diskwrite": 0, - "mem": 0, - "uptime": 0, - "disk": 0, - "cpu": 0, - "template": 1, - "cpus": 1, - "status": "stopped", - "netout": 0, - "maxdisk": 0, - "netin": 0, - "diskread": 0, + "name": "template", + "guestMaxMemory": 102400, + "vmid": 2222, + "diskwrite": 0, + "mem": 0, + "uptime": 0, + "disk": 0, + "cpu": 0, + "template": 1, + "cpus": 42, + "status": "stopped", + "netout": 0, + "maxdisk": 0, + "netin": 0, + "diskread": 0, }})) httpmock.RegisterResponder(http.MethodGet, `=~/nodes/test/lxc`, newJSONResponder(200, proxmox.Containers{})) - reservable, err := client.GetReservableMemoryBytes(context.Background(), "test", test.nodeMemoryAdjustment) + reservableMem, reservableCPUs, err := client.GetReservableResources(context.Background(), "test", test.nodeMemoryAdjustment, test.nodeCPUAdjustment) require.NoError(t, err) - require.Equal(t, test.expect, reservable) + require.Equal(t, test.expectMemory, reservableMem) + require.Equal(t, test.expectCPUs, reservableCPUs) }) } } diff --git a/pkg/proxmox/proxmoxtest/mock_client.go b/pkg/proxmox/proxmoxtest/mock_client.go index 5baae1a5..9fca7708 100644 --- a/pkg/proxmox/proxmoxtest/mock_client.go +++ b/pkg/proxmox/proxmoxtest/mock_client.go @@ -274,56 +274,64 @@ func (_c *MockClient_FindVMResource_Call) RunAndReturn(run func(context.Context, return _c } -// GetReservableMemoryBytes provides a mock function with given fields: ctx, nodeName, nodeMemoryAdjustment -func (_m *MockClient) GetReservableMemoryBytes(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64) (uint64, error) { - ret := _m.Called(ctx, nodeName, nodeMemoryAdjustment) +// GetReservableResources provides a mock function with given fields: ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment +func (_m *MockClient) GetReservableResources(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64, nodeCPUAdjustment uint64) (uint64, uint64, error) { + ret := _m.Called(ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment) var r0 uint64 - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, uint64) (uint64, error)); ok { - return rf(ctx, nodeName, nodeMemoryAdjustment) + var r1 uint64 + var r2 error + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) (uint64, uint64, error)); ok { + return rf(ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment) } - if rf, ok := ret.Get(0).(func(context.Context, string, uint64) uint64); ok { - r0 = rf(ctx, nodeName, nodeMemoryAdjustment) + if rf, ok := ret.Get(0).(func(context.Context, string, uint64, uint64) uint64); ok { + r0 = rf(ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment) } else { r0 = ret.Get(0).(uint64) } - if rf, ok := ret.Get(1).(func(context.Context, string, uint64) error); ok { - r1 = rf(ctx, nodeName, nodeMemoryAdjustment) + if rf, ok := ret.Get(1).(func(context.Context, string, uint64, uint64) uint64); ok { + r1 = rf(ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(uint64) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, string, uint64, uint64) error); ok { + r2 = rf(ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 } -// MockClient_GetReservableMemoryBytes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReservableMemoryBytes' -type MockClient_GetReservableMemoryBytes_Call struct { +// MockClient_GetReservableResources_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReservableResources' +type MockClient_GetReservableResources_Call struct { *mock.Call } -// GetReservableMemoryBytes is a helper method to define mock.On call +// GetReservableResources is a helper method to define mock.On call // - ctx context.Context // - nodeName string // - nodeMemoryAdjustment uint64 -func (_e *MockClient_Expecter) GetReservableMemoryBytes(ctx interface{}, nodeName interface{}, nodeMemoryAdjustment interface{}) *MockClient_GetReservableMemoryBytes_Call { - return &MockClient_GetReservableMemoryBytes_Call{Call: _e.mock.On("GetReservableMemoryBytes", ctx, nodeName, nodeMemoryAdjustment)} +// - nodeCPUAdjustment uint64 +func (_e *MockClient_Expecter) GetReservableResources(ctx interface{}, nodeName interface{}, nodeMemoryAdjustment interface{}, nodeCPUAdjustment interface{}) *MockClient_GetReservableResources_Call { + return &MockClient_GetReservableResources_Call{Call: _e.mock.On("GetReservableResources", ctx, nodeName, nodeMemoryAdjustment, nodeCPUAdjustment)} } -func (_c *MockClient_GetReservableMemoryBytes_Call) Run(run func(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64)) *MockClient_GetReservableMemoryBytes_Call { +func (_c *MockClient_GetReservableResources_Call) Run(run func(ctx context.Context, nodeName string, nodeMemoryAdjustment uint64, nodeCPUAdjustment uint64)) *MockClient_GetReservableResources_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(uint64)) + run(args[0].(context.Context), args[1].(string), args[2].(uint64), args[3].(uint64)) }) return _c } -func (_c *MockClient_GetReservableMemoryBytes_Call) Return(_a0 uint64, _a1 error) *MockClient_GetReservableMemoryBytes_Call { - _c.Call.Return(_a0, _a1) +func (_c *MockClient_GetReservableResources_Call) Return(_a0 uint64, _a1 uint64, _a2 error) *MockClient_GetReservableResources_Call { + _c.Call.Return(_a0, _a1, _a2) return _c } -func (_c *MockClient_GetReservableMemoryBytes_Call) RunAndReturn(run func(context.Context, string, uint64) (uint64, error)) *MockClient_GetReservableMemoryBytes_Call { +func (_c *MockClient_GetReservableResources_Call) RunAndReturn(run func(context.Context, string, uint64, uint64) (uint64, uint64, error)) *MockClient_GetReservableResources_Call { _c.Call.Return(run) return _c }