@@ -41,16 +41,7 @@ const highestNUMAID = 8
41
41
42
42
type PolicyHandler func (pod * v1.Pod , zoneMap topologyv1alpha2.ZoneList ) * framework.Status
43
43
44
- func singleNUMAContainerLevelHandler (lh logr.Logger , pod * v1.Pod , zones topologyv1alpha2.ZoneList , nodeInfo * framework.NodeInfo ) * framework.Status {
45
- lh .V (5 ).Info ("container level single NUMA node handler" )
46
-
47
- // prepare NUMANodes list from zoneMap
48
- nodes := createNUMANodeList (lh , zones )
49
- qos := v1qos .GetPodQOS (pod )
50
-
51
- // Node() != nil already verified in Filter(), which is the only public entry point
52
- logNumaNodes (lh , "container handler NUMA resources" , nodeInfo .Node ().Name , nodes )
53
-
44
+ func singleNUMAContainerLevelHandler (lh logr.Logger , pod * v1.Pod , info * filterInfo ) * framework.Status {
54
45
// the init containers are running SERIALLY and BEFORE the normal containers.
55
46
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#understanding-init-containers
56
47
// therefore, we don't need to accumulate their resources together
@@ -59,7 +50,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
59
50
clh := lh .WithValues (logging .KeyContainer , initContainer .Name , logging .KeyContainerKind , cntKind )
60
51
clh .V (6 ).Info ("desired resources" , stringify .ResourceListToLoggable (initContainer .Resources .Requests )... )
61
52
62
- _ , match := resourcesAvailableInAnyNUMANodes (clh , nodes , initContainer .Resources .Requests , qos , nodeInfo )
53
+ _ , match := resourcesAvailableInAnyNUMANodes (clh , info , initContainer .Resources .Requests )
63
54
if ! match {
64
55
msg := "cannot align " + cntKind + " container"
65
56
// we can't align init container, so definitely we can't align a pod
@@ -72,7 +63,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
72
63
clh := lh .WithValues (logging .KeyContainer , container .Name , logging .KeyContainerKind , logging .KindContainerApp )
73
64
clh .V (6 ).Info ("container requests" , stringify .ResourceListToLoggable (container .Resources .Requests )... )
74
65
75
- numaID , match := resourcesAvailableInAnyNUMANodes (clh , nodes , container .Resources .Requests , qos , nodeInfo )
66
+ numaID , match := resourcesAvailableInAnyNUMANodes (clh , info , container .Resources .Requests )
76
67
if ! match {
77
68
// we can't align container, so definitely we can't align a pod
78
69
clh .V (2 ).Info ("cannot align container" )
@@ -81,7 +72,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
81
72
82
73
// subtract the resources requested by the container from the given NUMA.
83
74
// this is necessary, so we won't allocate the same resources for the upcoming containers
84
- err := subtractResourcesFromNUMANodeList (clh , nodes , numaID , qos , container .Resources .Requests )
75
+ err := subtractResourcesFromNUMANodeList (clh , info . numaNodes , numaID , info . qos , container .Resources .Requests )
85
76
if err != nil {
86
77
// this is an internal error which should never happen
87
78
return framework .NewStatus (framework .Error , "inconsistent resource accounting" , err .Error ())
@@ -93,14 +84,14 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
93
84
94
85
// resourcesAvailableInAnyNUMANodes checks for sufficient resource and return the NUMAID that would be selected by Kubelet.
95
86
// this function requires NUMANodeList with properly populated NUMANode, NUMAID should be in range 0-63
96
- func resourcesAvailableInAnyNUMANodes (lh logr.Logger , numaNodes NUMANodeList , resources v1.ResourceList , qos v1. PodQOSClass , nodeInfo * framework. NodeInfo ) (int , bool ) {
87
+ func resourcesAvailableInAnyNUMANodes (lh logr.Logger , info * filterInfo , resources v1.ResourceList ) (int , bool ) {
97
88
numaID := highestNUMAID
98
89
bitmask := bm .NewEmptyBitMask ()
99
90
// set all bits, each bit is a NUMA node, if resources couldn't be aligned
100
91
// on the NUMA node, bit should be unset
101
92
bitmask .Fill ()
102
93
103
- nodeResources := util .ResourceList (nodeInfo .Allocatable )
94
+ nodeResources := util .ResourceList (info . node .Allocatable )
104
95
105
96
for resource , quantity := range resources {
106
97
if quantity .IsZero () {
@@ -121,14 +112,14 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
121
112
// obvious, bits which are not in the NUMA id's range would be unset
122
113
hasNUMAAffinity := false
123
114
resourceBitmask := bm .NewEmptyBitMask ()
124
- for _ , numaNode := range numaNodes {
115
+ for _ , numaNode := range info . numaNodes {
125
116
numaQuantity , ok := numaNode .Resources [resource ]
126
117
if ! ok {
127
118
continue
128
119
}
129
120
130
121
hasNUMAAffinity = true
131
- if ! isResourceSetSuitable (qos , resource , quantity , numaQuantity ) {
122
+ if ! isResourceSetSuitable (info . qos , resource , quantity , numaQuantity ) {
132
123
continue
133
124
}
134
125
@@ -161,18 +152,11 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
161
152
return numaID , ret
162
153
}
163
154
164
- func singleNUMAPodLevelHandler (lh logr.Logger , pod * v1.Pod , zones topologyv1alpha2.ZoneList , nodeInfo * framework.NodeInfo ) * framework.Status {
165
- lh .V (5 ).Info ("pod level single NUMA node handler" )
166
-
155
+ func singleNUMAPodLevelHandler (lh logr.Logger , pod * v1.Pod , info * filterInfo ) * framework.Status {
167
156
resources := util .GetPodEffectiveRequest (pod )
168
-
169
- nodes := createNUMANodeList (lh , zones )
170
-
171
- // Node() != nil already verified in Filter(), which is the only public entry point
172
- logNumaNodes (lh , "pod handler NUMA resources" , nodeInfo .Node ().Name , nodes )
173
157
lh .V (6 ).Info ("pod desired resources" , stringify .ResourceListToLoggable (resources )... )
174
158
175
- numaID , match := resourcesAvailableInAnyNUMANodes (lh , createNUMANodeList ( lh , zones ), resources , v1qos . GetPodQOS ( pod ), nodeInfo )
159
+ numaID , match := resourcesAvailableInAnyNUMANodes (lh , info , resources )
176
160
if ! match {
177
161
lh .V (2 ).Info ("cannot align pod" , "name" , pod .Name )
178
162
return framework .NewStatus (framework .Unschedulable , "cannot align pod" )
@@ -186,7 +170,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
186
170
if nodeInfo .Node () == nil {
187
171
return framework .NewStatus (framework .Error , "node not found" )
188
172
}
189
- if v1qos .GetPodQOS (pod ) == v1 .PodQOSBestEffort && ! resourcerequests .IncludeNonNative (pod ) {
173
+ qos := v1qos .GetPodQOS (pod )
174
+ if qos == v1 .PodQOSBestEffort && ! resourcerequests .IncludeNonNative (pod ) {
190
175
return nil
191
176
}
192
177
@@ -212,26 +197,36 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
212
197
213
198
lh .V (4 ).Info ("found nrt data" , "object" , stringify .NodeResourceTopologyResources (nodeTopology ), "conf" , conf .String ())
214
199
215
- handler := filterHandlerFromTopologyManager (conf )
200
+ handler , boundary := filterHandlerFromTopologyManager (conf )
216
201
if handler == nil {
217
202
return nil
218
203
}
219
- status := handler (lh , pod , nodeTopology .Zones , nodeInfo )
204
+
205
+ numaNodes := createNUMANodeList (lh , nodeTopology .Zones )
206
+ lh .V (4 ).Info ("aligning resources" , "boundary" , boundary , "numaCells" , len (numaNodes ))
207
+ fi := filterInfo {
208
+ nodeName : nodeName ,
209
+ node : nodeInfo ,
210
+ topologyManager : conf ,
211
+ qos : qos ,
212
+ numaNodes : numaNodes ,
213
+ }
214
+ status := handler (lh , pod , & fi )
220
215
if status != nil {
221
216
tm .nrtCache .NodeMaybeOverReserved (nodeName , pod )
222
217
}
223
218
return status
224
219
}
225
220
226
- func filterHandlerFromTopologyManager (conf nodeconfig.TopologyManager ) filterFn {
221
+ func filterHandlerFromTopologyManager (conf nodeconfig.TopologyManager ) ( filterFn , string ) {
227
222
if conf .Policy != kubeletconfig .SingleNumaNodeTopologyManagerPolicy {
228
- return nil
223
+ return nil , ""
229
224
}
230
225
if conf .Scope == kubeletconfig .PodTopologyManagerScope {
231
- return singleNUMAPodLevelHandler
226
+ return singleNUMAPodLevelHandler , "pod"
232
227
}
233
228
if conf .Scope == kubeletconfig .ContainerTopologyManagerScope {
234
- return singleNUMAContainerLevelHandler
229
+ return singleNUMAContainerLevelHandler , "container"
235
230
}
236
- return nil // cannot happen
231
+ return nil , "" // cannot happen
237
232
}
0 commit comments