Skip to content

Commit 848b590

Browse files
committed
nrt: filter: introduce and use filterInfo
add a helper struct to group together all the context we need to carry in the internal implementation functions. The goals are to streamline the existing code, to reduce redundant call and to make room for more arguments to be passed through, like the topology manager configuration. Signed-off-by: Francesco Romani <[email protected]>
1 parent a41e4db commit 848b590

File tree

3 files changed

+39
-42
lines changed

3 files changed

+39
-42
lines changed

pkg/noderesourcetopology/filter.go

Lines changed: 29 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -41,16 +41,7 @@ const highestNUMAID = 8
4141

4242
type PolicyHandler func(pod *v1.Pod, zoneMap topologyv1alpha2.ZoneList) *framework.Status
4343

44-
func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status {
45-
lh.V(5).Info("container level single NUMA node handler")
46-
47-
// prepare NUMANodes list from zoneMap
48-
nodes := createNUMANodeList(lh, zones)
49-
qos := v1qos.GetPodQOS(pod)
50-
51-
// Node() != nil already verified in Filter(), which is the only public entry point
52-
logNumaNodes(lh, "container handler NUMA resources", nodeInfo.Node().Name, nodes)
53-
44+
func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *framework.Status {
5445
// the init containers are running SERIALLY and BEFORE the normal containers.
5546
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#understanding-init-containers
5647
// therefore, we don't need to accumulate their resources together
@@ -59,7 +50,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
5950
clh := lh.WithValues(logging.KeyContainer, initContainer.Name, logging.KeyContainerKind, cntKind)
6051
clh.V(6).Info("desired resources", stringify.ResourceListToLoggable(initContainer.Resources.Requests)...)
6152

62-
_, match := resourcesAvailableInAnyNUMANodes(clh, nodes, initContainer.Resources.Requests, qos, nodeInfo)
53+
_, match := resourcesAvailableInAnyNUMANodes(clh, info, initContainer.Resources.Requests)
6354
if !match {
6455
msg := "cannot align " + cntKind + " container"
6556
// we can't align init container, so definitely we can't align a pod
@@ -72,7 +63,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
7263
clh := lh.WithValues(logging.KeyContainer, container.Name, logging.KeyContainerKind, logging.KindContainerApp)
7364
clh.V(6).Info("container requests", stringify.ResourceListToLoggable(container.Resources.Requests)...)
7465

75-
numaID, match := resourcesAvailableInAnyNUMANodes(clh, nodes, container.Resources.Requests, qos, nodeInfo)
66+
numaID, match := resourcesAvailableInAnyNUMANodes(clh, info, container.Resources.Requests)
7667
if !match {
7768
// we can't align container, so definitely we can't align a pod
7869
clh.V(2).Info("cannot align container")
@@ -81,7 +72,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
8172

8273
// subtract the resources requested by the container from the given NUMA.
8374
// this is necessary, so we won't allocate the same resources for the upcoming containers
84-
err := subtractResourcesFromNUMANodeList(clh, nodes, numaID, qos, container.Resources.Requests)
75+
err := subtractResourcesFromNUMANodeList(clh, info.numaNodes, numaID, info.qos, container.Resources.Requests)
8576
if err != nil {
8677
// this is an internal error which should never happen
8778
return framework.NewStatus(framework.Error, "inconsistent resource accounting", err.Error())
@@ -93,14 +84,14 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
9384

9485
// resourcesAvailableInAnyNUMANodes checks for sufficient resource and return the NUMAID that would be selected by Kubelet.
9586
// this function requires NUMANodeList with properly populated NUMANode, NUMAID should be in range 0-63
96-
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, resources v1.ResourceList, qos v1.PodQOSClass, nodeInfo *framework.NodeInfo) (int, bool) {
87+
func resourcesAvailableInAnyNUMANodes(lh logr.Logger, info *filterInfo, resources v1.ResourceList) (int, bool) {
9788
numaID := highestNUMAID
9889
bitmask := bm.NewEmptyBitMask()
9990
// set all bits, each bit is a NUMA node, if resources couldn't be aligned
10091
// on the NUMA node, bit should be unset
10192
bitmask.Fill()
10293

103-
nodeResources := util.ResourceList(nodeInfo.Allocatable)
94+
nodeResources := util.ResourceList(info.node.Allocatable)
10495

10596
for resource, quantity := range resources {
10697
if quantity.IsZero() {
@@ -121,14 +112,14 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
121112
// obvious, bits which are not in the NUMA id's range would be unset
122113
hasNUMAAffinity := false
123114
resourceBitmask := bm.NewEmptyBitMask()
124-
for _, numaNode := range numaNodes {
115+
for _, numaNode := range info.numaNodes {
125116
numaQuantity, ok := numaNode.Resources[resource]
126117
if !ok {
127118
continue
128119
}
129120

130121
hasNUMAAffinity = true
131-
if !isResourceSetSuitable(qos, resource, quantity, numaQuantity) {
122+
if !isResourceSetSuitable(info.qos, resource, quantity, numaQuantity) {
132123
continue
133124
}
134125

@@ -161,18 +152,11 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
161152
return numaID, ret
162153
}
163154

164-
func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status {
165-
lh.V(5).Info("pod level single NUMA node handler")
166-
155+
func singleNUMAPodLevelHandler(lh logr.Logger, pod *v1.Pod, info *filterInfo) *framework.Status {
167156
resources := util.GetPodEffectiveRequest(pod)
168-
169-
nodes := createNUMANodeList(lh, zones)
170-
171-
// Node() != nil already verified in Filter(), which is the only public entry point
172-
logNumaNodes(lh, "pod handler NUMA resources", nodeInfo.Node().Name, nodes)
173157
lh.V(6).Info("pod desired resources", stringify.ResourceListToLoggable(resources)...)
174158

175-
numaID, match := resourcesAvailableInAnyNUMANodes(lh, createNUMANodeList(lh, zones), resources, v1qos.GetPodQOS(pod), nodeInfo)
159+
numaID, match := resourcesAvailableInAnyNUMANodes(lh, info, resources)
176160
if !match {
177161
lh.V(2).Info("cannot align pod", "name", pod.Name)
178162
return framework.NewStatus(framework.Unschedulable, "cannot align pod")
@@ -186,7 +170,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
186170
if nodeInfo.Node() == nil {
187171
return framework.NewStatus(framework.Error, "node not found")
188172
}
189-
if v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
173+
qos := v1qos.GetPodQOS(pod)
174+
if qos == v1.PodQOSBestEffort && !resourcerequests.IncludeNonNative(pod) {
190175
return nil
191176
}
192177

@@ -212,26 +197,36 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
212197

213198
lh.V(4).Info("found nrt data", "object", stringify.NodeResourceTopologyResources(nodeTopology), "conf", conf.String())
214199

215-
handler := filterHandlerFromTopologyManager(conf)
200+
handler, boundary := filterHandlerFromTopologyManager(conf)
216201
if handler == nil {
217202
return nil
218203
}
219-
status := handler(lh, pod, nodeTopology.Zones, nodeInfo)
204+
205+
numaNodes := createNUMANodeList(lh, nodeTopology.Zones)
206+
lh.V(4).Info("aligning resources", "boundary", boundary, "numaCells", len(numaNodes))
207+
fi := filterInfo{
208+
nodeName: nodeName,
209+
node: nodeInfo,
210+
topologyManager: conf,
211+
qos: qos,
212+
numaNodes: numaNodes,
213+
}
214+
status := handler(lh, pod, &fi)
220215
if status != nil {
221216
tm.nrtCache.NodeMaybeOverReserved(nodeName, pod)
222217
}
223218
return status
224219
}
225220

226-
func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) filterFn {
221+
func filterHandlerFromTopologyManager(conf nodeconfig.TopologyManager) (filterFn, string) {
227222
if conf.Policy != kubeletconfig.SingleNumaNodeTopologyManagerPolicy {
228-
return nil
223+
return nil, ""
229224
}
230225
if conf.Scope == kubeletconfig.PodTopologyManagerScope {
231-
return singleNUMAPodLevelHandler
226+
return singleNUMAPodLevelHandler, "pod"
232227
}
233228
if conf.Scope == kubeletconfig.ContainerTopologyManagerScope {
234-
return singleNUMAContainerLevelHandler
229+
return singleNUMAContainerLevelHandler, "container"
235230
}
236-
return nil // cannot happen
231+
return nil, "" // cannot happen
237232
}

pkg/noderesourcetopology/plugin.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030
apiconfig "sigs.k8s.io/scheduler-plugins/apis/config"
3131
"sigs.k8s.io/scheduler-plugins/apis/config/validation"
3232
nrtcache "sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/cache"
33+
"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/nodeconfig"
3334

3435
"github.com/go-logr/logr"
3536
topologyapi "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology"
@@ -48,7 +49,15 @@ func init() {
4849
utilruntime.Must(topologyv1alpha2.AddToScheme(scheme))
4950
}
5051

51-
type filterFn func(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, nodeInfo *framework.NodeInfo) *framework.Status
52+
type filterInfo struct {
53+
nodeName string // shortcut, used very often
54+
node *framework.NodeInfo
55+
topologyManager nodeconfig.TopologyManager
56+
qos v1.PodQOSClass
57+
numaNodes NUMANodeList
58+
}
59+
60+
type filterFn func(logr.Logger, *v1.Pod, *filterInfo) *framework.Status
5261
type scoringFn func(logr.Logger, *v1.Pod, topologyv1alpha2.ZoneList) (int64, *framework.Status)
5362

5463
// TopologyMatch plugin which run simplified version of TopologyManager's admit handler

pkg/noderesourcetopology/pluginhelpers.go

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -180,10 +180,3 @@ func getForeignPodsDetectMode(lh logr.Logger, cfg *apiconfig.NodeResourceTopolog
180180
}
181181
return foreignPodsDetect
182182
}
183-
184-
func logNumaNodes(lh logr.Logger, desc, nodeName string, nodes NUMANodeList) {
185-
for _, numaNode := range nodes {
186-
numaItems := []interface{}{"numaCell", numaNode.NUMAID}
187-
lh.V(6).Info(desc, stringify.ResourceListToLoggableWithValues(numaItems, numaNode.Resources)...)
188-
}
189-
}

0 commit comments

Comments
 (0)