Skip to content

Commit 6204460

Browse files
committed
nrt: filter: introduce and use filterInfo
add a helper struct to group together all the context we need to carry in the internal implementation functions. The goals are to streamline the existing code, to reduce redundant call and to make room for more arguments to be passed through, like the topology manager configuration. Signed-off-by: Francesco Romani <[email protected]>
1 parent a41e4db commit 6204460

File tree

3 files changed

+39
-38
lines changed

3 files changed

+39
-38
lines changed

pkg/noderesourcetopology/filter.go

Lines changed: 29 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -41,16 +41,9 @@ const highestNUMAID = 8
4141

4242
type PolicyHandler func(pod *v1.Pod, zoneMap topologyv1alpha2.ZoneList) *framework.Status
4343

44-
func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status {
44+
func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *framework.Status {
4545
lh.V(5).Info("container level single NUMA node handler")
4646

47-
// prepare NUMANodes list from zoneMap
48-
nodes := createNUMANodeList(lh, zones)
49-
qos := v1qos.GetPodQOS(pod)
50-
51-
// Node() != nil already verified in Filter(), which is the only public entry point
52-
logNumaNodes(lh, "container handler NUMA resources", nodeInfo.Node().Name, nodes)
53-
5447
// the init containers are running SERIALLY and BEFORE the normal containers.
5548
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#understanding-init-containers
5649
// therefore, we don't need to accumulate their resources together
@@ -59,7 +52,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
5952
clh := lh.WithValues(logging.KeyContainer, initContainer.Name, logging.KeyContainerKind, cntKind)
6053
clh.V(6).Info("desired resources", stringify.ResourceListToLoggable(initContainer.Resources.Requests)...)
6154

62-
_, match := resourcesAvailableInAnyNUMANodes(clh, nodes, initContainer.Resources.Requests, qos, nodeInfo)
55+
_, match := resourcesAvailableInAnyNUMANodes(clh, info, initContainer.Resources.Requests)
6356
if !match {
6457
msg := "cannot align " + cntKind + " container"
6558
// we can't align init container, so definitely we can't align a pod
@@ -72,7 +65,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
7265
clh := lh.WithValues(logging.KeyContainer, container.Name, logging.KeyContainerKind, logging.KindContainerApp)
7366
clh.V(6).Info("container requests", stringify.ResourceListToLoggable(container.Resources.Requests)...)
7467

75-
numaID, match := resourcesAvailableInAnyNUMANodes(clh, nodes, container.Resources.Requests, qos, nodeInfo)
68+
numaID, match := resourcesAvailableInAnyNUMANodes(clh, info, container.Resources.Requests)
7669
if !match {
7770
// we can't align container, so definitely we can't align a pod
7871
clh.V(2).Info("cannot align container")
@@ -81,7 +74,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
8174

8275
// subtract the resources requested by the container from the given NUMA.
8376
// this is necessary, so we won't allocate the same resources for the upcoming containers
84-
err := subtractResourcesFromNUMANodeList(clh, nodes, numaID, qos, container.Resources.Requests)
77+
err := subtractResourcesFromNUMANodeList(clh, info.numaNodes, numaID, info.qos, container.Resources.Requests)
8578
if err != nil {
8679
// this is an internal error which should never happen
8780
return framework.NewStatus(framework.Error, "inconsistent resource accounting", err.Error())
@@ -93,14 +86,14 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
9386

9487
// resourcesAvailableInAnyNUMANodes checks for sufficient resource and return the NUMAID that would be selected by Kubelet.
9588
// this function requires NUMANodeList with properly populated NUMANode, NUMAID should be in range 0-63
96-
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, resources v1.ResourceList, qos v1.PodQOSClass, nodeInfo *framework.NodeInfo) (int, bool) {
89+
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, info *filterInfo, resources v1.ResourceList) (int, bool) {
9790
numaID := highestNUMAID
9891
bitmask := bm.NewEmptyBitMask()
9992
// set all bits, each bit is a NUMA node, if resources couldn't be aligned
10093
// on the NUMA node, bit should be unset
10194
bitmask.Fill()
10295

103-
nodeResources := util.ResourceList(nodeInfo.Allocatable)
96+
nodeResources := util.ResourceList(info.node.Allocatable)
10497

10598
for resource, quantity := range resources {
10699
if quantity.IsZero() {
@@ -121,14 +114,14 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
121114
// obvious, bits which are not in the NUMA id's range would be unset
122115
hasNUMAAffinity := false
123116
resourceBitmask := bm.NewEmptyBitMask()
124-
for _, numaNode := range numaNodes {
117+
for _, numaNode := range info.numaNodes {
125118
numaQuantity, ok := numaNode.Resources[resource]
126119
if !ok {
127120
continue
128121
}
129122

130123
hasNUMAAffinity = true
131-
if !isResourceSetSuitable(qos, resource, quantity, numaQuantity) {
124+
if !isResourceSetSuitable(info.qos, resource, quantity, numaQuantity) {
132125
continue
133126
}
134127

@@ -161,18 +154,13 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
161154
return numaID, ret
162155
}
163156

164-
func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status {
157+
func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *framework.Status {
165158
lh.V(5).Info("pod level single NUMA node handler")
166159

167160
resources := util.GetPodEffectiveRequest(pod)
168-
169-
nodes := createNUMANodeList(lh, zones)
170-
171-
// Node() != nil already verified in Filter(), which is the only public entry point
172-
logNumaNodes(lh, "pod handler NUMA resources", nodeInfo.Node().Name, nodes)
173161
lh.V(6).Info("pod desired resources", stringify.ResourceListToLoggable(resources)...)
174162

175-
numaID, match := resourcesAvailableInAnyNUMANodes(lh, createNUMANodeList(lh, zones), resources, v1qos.GetPodQOS(pod), nodeInfo)
163+
numaID, match := resourcesAvailableInAnyNUMANodes(lh, info, resources)
176164
if !match {
177165
lh.V(2).Info("cannot align pod", "name", pod.Name)
178166
return framework.NewStatus(framework.Unschedulable, "cannot align pod")
@@ -186,7 +174,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
186174
if nodeInfo.Node() == nil {
187175
return framework.NewStatus(framework.Error, "node not found")
188176
}
189-
if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
177+
qos := v1qos.GetPodQOS(pod)
178+
if qos == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
190179
return nil
191180
}
192181

@@ -212,26 +201,36 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
212201

213202
lh.V(4).Info("found nrt data", "object", stringify.NodeResourceTopologyResources(nodeTopology), "conf", conf.String())
214203

215-
handler := filterHandlerFromTopologyManager(conf)
204+
handler, boundary := filterHandlerFromTopologyManager(conf)
216205
if handler == nil {
217206
return nil
218207
}
219-
status := handler(lh, pod, nodeTopology.Zones, nodeInfo)
208+
209+
numaNodes := createNUMANodeList(lh, nodeTopology.Zones)
210+
lh.V(4).Info("aligning resources", "boundary", boundary, "numaCells", len(numaNodes))
211+
fi := filterInfo{
212+
nodeName: nodeName,
213+
node: nodeInfo,
214+
topologyManager: conf,
215+
qos: qos,
216+
numaNodes: numaNodes,
217+
}
218+
status := handler(lh, pod, &fi)
220219
if status != nil {
221220
tm.nrtCache.NodeMaybeOverReserved(nodeName, pod)
222221
}
223222
return status
224223
}
225224

226-
func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) filterFn {
225+
func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) (filterFn, string) {
227226
if conf.Policy != kubeletconfig.SingleNumaNodeTopologyManagerPolicy {
228-
return nil
227+
return nil, ""
229228
}
230229
if conf.Scope == kubeletconfig.PodTopologyManagerScope {
231-
return singleNUMAPodLevelHandler
230+
return singleNUMAPodLevelHandler, "pod"
232231
}
233232
if conf.Scope == kubeletconfig.ContainerTopologyManagerScope {
234-
return singleNUMAContainerLevelHandler
233+
return singleNUMAContainerLevelHandler, "container"
235234
}
236-
return nil // cannot happen
235+
return nil, "" // cannot happen
237236
}

pkg/noderesourcetopology/plugin.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
apiconfig "sigs.k8s.io/scheduler-plugins/apis/config"
3131
"sigs.k8s.io/scheduler-plugins/apis/config/validation"
3232
nrtcache "sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/cache"
33+
"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/nodeconfig"
3334

3435
"github.com/go-logr/logr"
3536
topologyapi "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology"
@@ -48,7 +49,15 @@ func init() {
4849
utilruntime.Must(topologyv1alpha2.AddToScheme(scheme))
4950
}
5051

51-
type filterFn func(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status
52+
type filterInfo struct {
53+
nodeName string // shortcut, used very often
54+
node *framework.NodeInfo
55+
topologyManager nodeconfig.TopologyManager
56+
qos v1.PodQOSClass
57+
numaNodes NUMANodeList
58+
}
59+
60+
type filterFn func(logr.Logger, *v1.Pod, *filterInfo) *framework.Status
5261
type scoringFn func(logr.Logger, *v1.Pod, topologyv1alpha2.ZoneList) (int64, *framework.Status)
5362

5463
// TopologyMatch plugin which run simplified version of TopologyManager's admit handler

pkg/noderesourcetopology/pluginhelpers.go

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -180,10 +180,3 @@ func getForeignPodsDetectMode(lh logr.Logger, cfg *apiconfig.NodeResourceTopolog
180180
}
181181
return foreignPodsDetect
182182
}
183-
184-
func logNumaNodes(lh logr.Logger, desc, nodeName string, nodes NUMANodeList) {
185-
for _, numaNode := range nodes {
186-
numaItems := []interface{}{"numaCell", numaNode.NUMAID}
187-
lh.V(6).Info(desc, stringify.ResourceListToLoggableWithValues(numaItems, numaNode.Resources)...)
188-
}
189-
}

0 commit comments

Comments
 (0)