@@ -41,24 +41,17 @@ const highestNUMAID = 8
41
41
42
42
type PolicyHandler func (pod * v1.Pod , zoneMap topologyv1alpha2.ZoneList ) * framework.Status
43
43
44
- func singleNUMAContainerLevelHandler (lh logr.Logger , pod * v1.Pod , zones topologyv1alpha2. ZoneList , nodeInfo * framework. NodeInfo ) * framework.Status {
44
+ func singleNUMAContainerLevelHandler (lh logr.Logger , pod * v1.Pod , info * filterInfo ) * framework.Status {
45
45
lh .V (5 ).Info ("container level single NUMA node handler" )
46
46
47
- // prepare NUMANodes list from zoneMap
48
- nodes := createNUMANodeList (lh , zones )
49
- qos := v1qos .GetPodQOS (pod )
50
-
51
- // Node() != nil already verified in Filter(), which is the only public entry point
52
- logNumaNodes (lh , "container handler NUMA resources" , nodeInfo .Node ().Name , nodes )
53
-
54
47
// the init containers are running SERIALLY and BEFORE the normal containers.
55
48
// https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#understanding-init-containers
56
49
// therefore, we don't need to accumulate their resources together
57
50
for _ , initContainer := range pod .Spec .InitContainers {
58
51
clh := lh .WithValues (logging .KeyContainer , initContainer .Name , logging .KeyContainerKind , logging .GetInitContainerKind (& initContainer ))
59
52
clh .V (6 ).Info ("desired resources" , stringify .ResourceListToLoggable (initContainer .Resources .Requests )... )
60
53
61
- _ , match := resourcesAvailableInAnyNUMANodes (clh , nodes , initContainer .Resources .Requests , qos , nodeInfo )
54
+ _ , match := resourcesAvailableInAnyNUMANodes (clh , info , initContainer .Resources .Requests )
62
55
if ! match {
63
56
// we can't align init container, so definitely we can't align a pod
64
57
clh .V (2 ).Info ("cannot align init container" )
@@ -70,7 +63,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
70
63
clh := lh .WithValues (logging .KeyContainer , container .Name , logging .KeyContainerKind , logging .KindContainerApp )
71
64
clh .V (6 ).Info ("container requests" , stringify .ResourceListToLoggable (container .Resources .Requests )... )
72
65
73
- numaID , match := resourcesAvailableInAnyNUMANodes (clh , nodes , container .Resources .Requests , qos , nodeInfo )
66
+ numaID , match := resourcesAvailableInAnyNUMANodes (clh , info , container .Resources .Requests )
74
67
if ! match {
75
68
// we can't align container, so definitely we can't align a pod
76
69
clh .V (2 ).Info ("cannot align container" )
@@ -79,7 +72,7 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
79
72
80
73
// subtract the resources requested by the container from the given NUMA.
81
74
// this is necessary, so we won't allocate the same resources for the upcoming containers
82
- err := subtractResourcesFromNUMANodeList (clh , nodes , numaID , qos , container .Resources .Requests )
75
+ err := subtractResourcesFromNUMANodeList (clh , info . numaNodes , numaID , info . qos , container .Resources .Requests )
83
76
if err != nil {
84
77
// this is an internal error which should never happen
85
78
return framework .NewStatus (framework .Error , "inconsistent resource accounting" , err .Error ())
@@ -91,14 +84,14 @@ func singleNUMAContainerLevelHandler(lh logr.Logger, pod *v1.Pod, zones topology
91
84
92
85
// resourcesAvailableInAnyNUMANodes checks for sufficient resource and return the NUMAID that would be selected by Kubelet.
93
86
// this function requires NUMANodeList with properly populated NUMANode, NUMAID should be in range 0-63
94
- func resourcesAvailableInAnyNUMANodes (lh logr.Logger , numaNodes NUMANodeList , resources v1.ResourceList , qos v1. PodQOSClass , nodeInfo * framework. NodeInfo ) (int , bool ) {
87
+ func resourcesAvailableInAnyNUMANodes (lh logr.Logger , info * filterInfo , resources v1.ResourceList ) (int , bool ) {
95
88
numaID := highestNUMAID
96
89
bitmask := bm .NewEmptyBitMask ()
97
90
// set all bits, each bit is a NUMA node, if resources couldn't be aligned
98
91
// on the NUMA node, bit should be unset
99
92
bitmask .Fill ()
100
93
101
- nodeResources := util .ResourceList (nodeInfo .Allocatable )
94
+ nodeResources := util .ResourceList (info . node .Allocatable )
102
95
103
96
for resource , quantity := range resources {
104
97
if quantity .IsZero () {
@@ -119,14 +112,14 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
119
112
// obvious, bits which are not in the NUMA id's range would be unset
120
113
hasNUMAAffinity := false
121
114
resourceBitmask := bm .NewEmptyBitMask ()
122
- for _ , numaNode := range numaNodes {
115
+ for _ , numaNode := range info . numaNodes {
123
116
numaQuantity , ok := numaNode .Resources [resource ]
124
117
if ! ok {
125
118
continue
126
119
}
127
120
128
121
hasNUMAAffinity = true
129
- if ! isResourceSetSuitable (qos , resource , quantity , numaQuantity ) {
122
+ if ! isResourceSetSuitable (info . qos , resource , quantity , numaQuantity ) {
130
123
continue
131
124
}
132
125
@@ -159,18 +152,13 @@ func resourcesAvailableInAnyNUMANodes(lh logr.Logger, numaNodes NUMANodeList, re
159
152
return numaID , ret
160
153
}
161
154
162
- func singleNUMAPodLevelHandler (lh logr.Logger , pod * v1.Pod , zones topologyv1alpha2. ZoneList , nodeInfo * framework. NodeInfo ) * framework.Status {
155
+ func singleNUMAPodLevelHandler (lh logr.Logger , pod * v1.Pod , info * filterInfo ) * framework.Status {
163
156
lh .V (5 ).Info ("pod level single NUMA node handler" )
164
157
165
158
resources := util .GetPodEffectiveRequest (pod )
166
-
167
- nodes := createNUMANodeList (lh , zones )
168
-
169
- // Node() != nil already verified in Filter(), which is the only public entry point
170
- logNumaNodes (lh , "pod handler NUMA resources" , nodeInfo .Node ().Name , nodes )
171
159
lh .V (6 ).Info ("pod desired resources" , stringify .ResourceListToLoggable (resources )... )
172
160
173
- numaID , match := resourcesAvailableInAnyNUMANodes (lh , createNUMANodeList ( lh , zones ), resources , v1qos . GetPodQOS ( pod ), nodeInfo )
161
+ numaID , match := resourcesAvailableInAnyNUMANodes (lh , info , resources )
174
162
if ! match {
175
163
lh .V (2 ).Info ("cannot align pod" , "name" , pod .Name )
176
164
return framework .NewStatus (framework .Unschedulable , "cannot align pod" )
@@ -184,7 +172,8 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
184
172
if nodeInfo .Node () == nil {
185
173
return framework .NewStatus (framework .Error , "node not found" )
186
174
}
187
- if v1qos .GetPodQOS (pod ) == v1 .PodQOSBestEffort && ! resourcerequests .IncludeNonNative (pod ) {
175
+ qos := v1qos .GetPodQOS (pod )
176
+ if qos == v1 .PodQOSBestEffort && ! resourcerequests .IncludeNonNative (pod ) {
188
177
return nil
189
178
}
190
179
@@ -210,26 +199,36 @@ func (tm *TopologyMatch) Filter(ctx context.Context, cycleState *framework.Cycle
210
199
211
200
lh .V (4 ).Info ("found nrt data" , "object" , stringify .NodeResourceTopologyResources (nodeTopology ), "conf" , conf .String ())
212
201
213
- handler := filterHandlerFromTopologyManager (conf )
202
+ handler , boundary := filterHandlerFromTopologyManager (conf )
214
203
if handler == nil {
215
204
return nil
216
205
}
217
- status := handler (lh , pod , nodeTopology .Zones , nodeInfo )
206
+
207
+ numaNodes := createNUMANodeList (lh , nodeTopology .Zones )
208
+ lh .V (4 ).Info ("aligning resources" , "boundary" , boundary , "numaCells" , len (numaNodes ))
209
+ fi := filterInfo {
210
+ nodeName : nodeName ,
211
+ node : nodeInfo ,
212
+ topologyManager : conf ,
213
+ qos : qos ,
214
+ numaNodes : numaNodes ,
215
+ }
216
+ status := handler (lh , pod , & fi )
218
217
if status != nil {
219
218
tm .nrtCache .NodeMaybeOverReserved (nodeName , pod )
220
219
}
221
220
return status
222
221
}
223
222
224
- func filterHandlerFromTopologyManager (conf nodeconfig.TopologyManager ) filterFn {
223
+ func filterHandlerFromTopologyManager (conf nodeconfig.TopologyManager ) ( filterFn , string ) {
225
224
if conf .Policy != kubeletconfig .SingleNumaNodeTopologyManagerPolicy {
226
- return nil
225
+ return nil , ""
227
226
}
228
227
if conf .Scope == kubeletconfig .PodTopologyManagerScope {
229
- return singleNUMAPodLevelHandler
228
+ return singleNUMAPodLevelHandler , "pod"
230
229
}
231
230
if conf .Scope == kubeletconfig .ContainerTopologyManagerScope {
232
- return singleNUMAContainerLevelHandler
231
+ return singleNUMAContainerLevelHandler , "container"
233
232
}
234
- return nil // cannot happen
233
+ return nil , "" // cannot happen
235
234
}
0 commit comments