Skip to content

Commit b4b6bb2

Browse files
committed
nrt: filter: introduce and use filterInfo
add a helper struct to group together all the context we need to carry in the internal implementation functions. The goals are to streamline the existing code, to reduce redundant call and to make room for more arguments to be passed through, like the topology manager configuration. Signed-off-by: Francesco Romani <[email protected]>
1 parent 6c47d4e commit b4b6bb2

File tree

3 files changed

+39
-38
lines changed

3 files changed

+39
-38
lines changed

pkg/noderesourcetopology/filter.go

Lines changed: 29 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -41,24 +41,17 @@ const highestNUMAID = 8
4141

4242
type PolicyHandler func(pod *v1.Pod, zoneMap topologyv1alpha2.ZoneList) *framework.Status
4343

44-
func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status {
44+
func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *framework.Status {
4545
lh.V(5).Info("container level single NUMA node handler")
4646

47-
// prepare NUMANodes list from zoneMap
48-
nodes := createNUMANodeList(lh, zones)
49-
qos := v1qos.GetPodQOS(pod)
50-
51-
// Node() != nil already verified in Filter(), which is the only public entry point
52-
logNumaNodes(lh, "container handler NUMA resources", nodeInfo.Node().Name, nodes)
53-
5447
// the init containers are running SERIALLY and BEFORE the normal containers.
5548
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#understanding-init-containers
5649
// therefore, we don't need to accumulate their resources together
5750
for _, initContainer := range pod.Spec.InitContainers {
5851
clh := lh.WithValues(logging.KeyContainer, initContainer.Name, logging.KeyContainerKind, logging.GetInitContainerKind(&initContainer))
5952
clh.V(6).Info("desired resources", stringify.ResourceListToLoggable(initContainer.Resources.Requests)...)
6053

61-
_, match := resourcesAvailableInAnyNUMANodes(clh, nodes, initContainer.Resources.Requests, qos, nodeInfo)
54+
_, match := resourcesAvailableInAnyNUMANodes(clh, info, initContainer.Resources.Requests)
6255
if !match {
6356
// we can't align init container, so definitely we can't align a pod
6457
clh.V(2).Info("cannot align init container")
@@ -70,7 +63,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
7063
clh := lh.WithValues(logging.KeyContainer, container.Name, logging.KeyContainerKind, logging.KindContainerApp)
7164
clh.V(6).Info("container requests", stringify.ResourceListToLoggable(container.Resources.Requests)...)
7265

73-
numaID, match := resourcesAvailableInAnyNUMANodes(clh, nodes, container.Resources.Requests, qos, nodeInfo)
66+
numaID, match := resourcesAvailableInAnyNUMANodes(clh, info, container.Resources.Requests)
7467
if !match {
7568
// we can't align container, so definitely we can't align a pod
7669
clh.V(2).Info("cannot align container")
@@ -79,7 +72,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
7972

8073
// subtract the resources requested by the container from the given NUMA.
8174
// this is necessary, so we won't allocate the same resources for the upcoming containers
82-
err := subtractResourcesFromNUMANodeList(clh, nodes, numaID, qos, container.Resources.Requests)
75+
err := subtractResourcesFromNUMANodeList(clh, info.numaNodes, numaID, info.qos, container.Resources.Requests)
8376
if err != nil {
8477
// this is an internal error which should never happen
8578
return framework.NewStatus(framework.Error, "inconsistent resource accounting", err.Error())
@@ -91,14 +84,14 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
9184

9285
// resourcesAvailableInAnyNUMANodes checks for sufficient resource and return the NUMAID that would be selected by Kubelet.
9386
// this function requires NUMANodeList with properly populated NUMANode, NUMAID should be in range 0-63
94-
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, resources v1.ResourceList, qos v1.PodQOSClass, nodeInfo *framework.NodeInfo) (int, bool) {
87+
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, info *filterInfo, resources v1.ResourceList) (int, bool) {
9588
numaID := highestNUMAID
9689
bitmask := bm.NewEmptyBitMask()
9790
// set all bits, each bit is a NUMA node, if resources couldn't be aligned
9891
// on the NUMA node, bit should be unset
9992
bitmask.Fill()
10093

101-
nodeResources := util.ResourceList(nodeInfo.Allocatable)
94+
nodeResources := util.ResourceList(info.node.Allocatable)
10295

10396
for resource, quantity := range resources {
10497
if quantity.IsZero() {
@@ -119,14 +112,14 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
119112
// obvious, bits which are not in the NUMA id's range would be unset
120113
hasNUMAAffinity := false
121114
resourceBitmask := bm.NewEmptyBitMask()
122-
for _, numaNode := range numaNodes {
115+
for _, numaNode := range info.numaNodes {
123116
numaQuantity, ok := numaNode.Resources[resource]
124117
if !ok {
125118
continue
126119
}
127120

128121
hasNUMAAffinity = true
129-
if !isResourceSetSuitable(qos, resource, quantity, numaQuantity) {
122+
if !isResourceSetSuitable(info.qos, resource, quantity, numaQuantity) {
130123
continue
131124
}
132125

@@ -159,18 +152,13 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
159152
return numaID, ret
160153
}
161154

162-
func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status {
155+
func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *framework.Status {
163156
lh.V(5).Info("pod level single NUMA node handler")
164157

165158
resources := util.GetPodEffectiveRequest(pod)
166-
167-
nodes := createNUMANodeList(lh, zones)
168-
169-
// Node() != nil already verified in Filter(), which is the only public entry point
170-
logNumaNodes(lh, "pod handler NUMA resources", nodeInfo.Node().Name, nodes)
171159
lh.V(6).Info("pod desired resources", stringify.ResourceListToLoggable(resources)...)
172160

173-
numaID, match := resourcesAvailableInAnyNUMANodes(lh, createNUMANodeList(lh, zones), resources, v1qos.GetPodQOS(pod), nodeInfo)
161+
numaID, match := resourcesAvailableInAnyNUMANodes(lh, info, resources)
174162
if !match {
175163
lh.V(2).Info("cannot align pod", "name", pod.Name)
176164
return framework.NewStatus(framework.Unschedulable, "cannot align pod")
@@ -184,7 +172,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
184172
if nodeInfo.Node() == nil {
185173
return framework.NewStatus(framework.Error, "node not found")
186174
}
187-
if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
175+
qos := v1qos.GetPodQOS(pod)
176+
if qos == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
188177
return nil
189178
}
190179

@@ -210,26 +199,36 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
210199

211200
lh.V(4).Info("found nrt data", "object", stringify.NodeResourceTopologyResources(nodeTopology), "conf", conf.String())
212201

213-
handler := filterHandlerFromTopologyManager(conf)
202+
handler, boundary := filterHandlerFromTopologyManager(conf)
214203
if handler == nil {
215204
return nil
216205
}
217-
status := handler(lh, pod, nodeTopology.Zones, nodeInfo)
206+
207+
numaNodes := createNUMANodeList(lh, nodeTopology.Zones)
208+
lh.V(4).Info("aligning resources", "boundary", boundary, "numaCells", len(numaNodes))
209+
fi := filterInfo{
210+
nodeName: nodeName,
211+
node: nodeInfo,
212+
topologyManager: conf,
213+
qos: qos,
214+
numaNodes: numaNodes,
215+
}
216+
status := handler(lh, pod, &fi)
218217
if status != nil {
219218
tm.nrtCache.NodeMaybeOverReserved(nodeName, pod)
220219
}
221220
return status
222221
}
223222

224-
func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) filterFn {
223+
func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) (filterFn, string) {
225224
if conf.Policy != kubeletconfig.SingleNumaNodeTopologyManagerPolicy {
226-
return nil
225+
return nil, ""
227226
}
228227
if conf.Scope == kubeletconfig.PodTopologyManagerScope {
229-
return singleNUMAPodLevelHandler
228+
return singleNUMAPodLevelHandler, "pod"
230229
}
231230
if conf.Scope == kubeletconfig.ContainerTopologyManagerScope {
232-
return singleNUMAContainerLevelHandler
231+
return singleNUMAContainerLevelHandler, "container"
233232
}
234-
return nil // cannot happen
233+
return nil, "" // cannot happen
235234
}

pkg/noderesourcetopology/plugin.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
apiconfig "sigs.k8s.io/scheduler-plugins/apis/config"
3131
"sigs.k8s.io/scheduler-plugins/apis/config/validation"
3232
nrtcache "sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/cache"
33+
"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/nodeconfig"
3334

3435
"github.com/go-logr/logr"
3536
topologyapi "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology"
@@ -48,7 +49,15 @@ func init() {
4849
utilruntime.Must(topologyv1alpha2.AddToScheme(scheme))
4950
}
5051

51-
type filterFn func(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status
52+
type filterInfo struct {
53+
nodeName string // shortcut, used very often
54+
node *framework.NodeInfo
55+
topologyManager nodeconfig.TopologyManager
56+
qos v1.PodQOSClass
57+
numaNodes NUMANodeList
58+
}
59+
60+
type filterFn func(logr.Logger, *v1.Pod, *filterInfo) *framework.Status
5261
type scoringFn func(logr.Logger, *v1.Pod, topologyv1alpha2.ZoneList) (int64, *framework.Status)
5362

5463
// TopologyMatch plugin which run simplified version of TopologyManager's admit handler

pkg/noderesourcetopology/pluginhelpers.go

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -180,10 +180,3 @@ func getForeignPodsDetectMode(lh logr.Logger, cfg *apiconfig.NodeResourceTopolog
180180
}
181181
return foreignPodsDetect
182182
}
183-
184-
func logNumaNodes(lh logr.Logger, desc, nodeName string, nodes NUMANodeList) {
185-
for _, numaNode := range nodes {
186-
numaItems := []interface{}{"numaCell", numaNode.NUMAID}
187-
lh.V(6).Info(desc, stringify.ResourceListToLoggableWithValues(numaItems, numaNode.Resources)...)
188-
}
189-
}

0 commit comments

Comments
 (0)