Skip to content

Commit 78e9e58

Browse files
committed
nrt: score: introduce and use scoreInfo
similarly to what we did for filter, add a helper struct to group together all the context we need to carry in the internal implementation functions. The goals are to streamline the existing code, to reduce redundant call and to make room for more arguments to be passed through, like the topology manager configuration. Signed-off-by: Francesco Romani <[email protected]>
1 parent b4b6bb2 commit 78e9e58

File tree

3 files changed

+34
-31
lines changed

3 files changed

+34
-31
lines changed

pkg/noderesourcetopology/least_numa.go

Lines changed: 7 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -18,12 +18,10 @@ package noderesourcetopology
1818

1919
import (
2020
v1 "k8s.io/api/core/v1"
21-
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
2221
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
2322
"k8s.io/kubernetes/pkg/scheduler/framework"
2423

2524
"github.com/go-logr/logr"
26-
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
2725
"gonum.org/v1/gonum/stat/combin"
2826

2927
"sigs.k8s.io/scheduler-plugins/pkg/util"
@@ -34,20 +32,17 @@ const (
3432
maxDistanceValue = 255
3533
)
3634

37-
func leastNUMAContainerScopeScore(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList) (int64, *framework.Status) {
38-
nodes := createNUMANodeList(lh, zones)
39-
qos := v1qos.GetPodQOS(pod)
40-
35+
func leastNUMAContainerScopeScore(lh logr.Logger, pod *v1.Pod, info *scoreInfo) (int64, *framework.Status) {
4136
maxNUMANodesCount := 0
4237
allContainersMinAvgDistance := true
4338
// the order how TopologyManager asks for hint is important so doing it in the same order
4439
// https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cm/topologymanager/scope_container.go#L52
4540
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
4641
// if a container requests only non NUMA just continue
47-
if onlyNonNUMAResources(nodes, container.Resources.Requests) {
42+
if onlyNonNUMAResources(info.numaNodes, container.Resources.Requests) {
4843
continue
4944
}
50-
numaNodes, isMinAvgDistance := numaNodesRequired(lh, qos, nodes, container.Resources.Requests)
45+
numaNodes, isMinAvgDistance := numaNodesRequired(lh, info.qos, info.numaNodes, container.Resources.Requests)
5146
// container's resources can't fit onto node, return MinNodeScore for whole pod
5247
if numaNodes == nil {
5348
// score plugin should be running after resource filter plugin so we should always find sufficient amount of NUMA nodes
@@ -65,7 +60,7 @@ func leastNUMAContainerScopeScore(lh logr.Logger, pod *v1.Pod, zones topologyv1a
6560

6661
// subtract the resources requested by the container from the given NUMA.
6762
// this is necessary, so we won't allocate the same resources for the upcoming containers
68-
subtractFromNUMAs(container.Resources.Requests, nodes, numaNodes.GetBits()...)
63+
subtractFromNUMAs(container.Resources.Requests, info.numaNodes, numaNodes.GetBits()...)
6964
}
7065

7166
if maxNUMANodesCount == 0 {
@@ -75,17 +70,14 @@ func leastNUMAContainerScopeScore(lh logr.Logger, pod *v1.Pod, zones topologyv1a
7570
return normalizeScore(maxNUMANodesCount, allContainersMinAvgDistance), nil
7671
}
7772

78-
func leastNUMAPodScopeScore(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList) (int64, *framework.Status) {
79-
nodes := createNUMANodeList(lh, zones)
80-
qos := v1qos.GetPodQOS(pod)
81-
73+
func leastNUMAPodScopeScore(lh logr.Logger, pod *v1.Pod, info *scoreInfo) (int64, *framework.Status) {
8274
resources := util.GetPodEffectiveRequest(pod)
8375
// if a pod requests only non NUMA resources return max score
84-
if onlyNonNUMAResources(nodes, resources) {
76+
if onlyNonNUMAResources(info.numaNodes, resources) {
8577
return framework.MaxNodeScore, nil
8678
}
8779

88-
numaNodes, isMinAvgDistance := numaNodesRequired(lh, qos, nodes, resources)
80+
numaNodes, isMinAvgDistance := numaNodesRequired(lh, info.qos, info.numaNodes, resources)
8981
// pod's resources can't fit onto node, return MinNodeScore
9082
if numaNodes == nil {
9183
// score plugin should be running after resource filter plugin so we should always find sufficient amount of NUMA nodes

pkg/noderesourcetopology/plugin.go

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,14 @@ type filterInfo struct {
5858
}
5959

6060
type filterFn func(logr.Logger, *v1.Pod, *filterInfo) *framework.Status
61-
type scoringFn func(logr.Logger, *v1.Pod, topologyv1alpha2.ZoneList) (int64, *framework.Status)
61+
62+
type scoreInfo struct {
63+
topologyManager nodeconfig.TopologyManager
64+
qos v1.PodQOSClass
65+
numaNodes NUMANodeList
66+
}
67+
68+
type scoringFn func(logr.Logger, *v1.Pod, *scoreInfo) (int64, *framework.Status)
6269

6370
// TopologyMatch plugin which run simplified version of TopologyManager's admit handler
6471
type TopologyMatch struct {

pkg/noderesourcetopology/score.go

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@ import (
3030
apiconfig "sigs.k8s.io/scheduler-plugins/apis/config"
3131

3232
"github.com/go-logr/logr"
33-
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
3433
"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/logging"
3534
"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/nodeconfig"
3635
"sigs.k8s.io/scheduler-plugins/pkg/noderesourcetopology/stringify"
@@ -70,7 +69,8 @@ func (tm *TopologyMatch) Score(ctx context.Context, state *framework.CycleState,
7069

7170
lh.V(6).Info("scoring node")
7271
// if it's a non-guaranteed pod, every node is considered to be a good fit
73-
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
72+
qos := v1qos.GetPodQOS(pod)
73+
if qos != v1.PodQOSGuaranteed {
7474
return framework.MaxNodeScore, nil
7575
}
7676

@@ -87,11 +87,18 @@ func (tm *TopologyMatch) Score(ctx context.Context, state *framework.CycleState,
8787

8888
lh.V(6).Info("found object", "noderesourcetopology", stringify.NodeResourceTopologyResources(nodeTopology))
8989

90-
handler := tm.scoringHandlerFromTopologyManagerConfig(nodeconfig.TopologyManagerFromNodeResourceTopology(lh, nodeTopology))
90+
conf := nodeconfig.TopologyManagerFromNodeResourceTopology(lh, nodeTopology)
91+
handler := tm.scoringHandlerFromTopologyManagerConfig(conf)
9192
if handler == nil {
9293
return 0, nil
9394
}
94-
return handler(lh, pod, nodeTopology.Zones)
95+
numaNodes := createNUMANodeList(lh, nodeTopology.Zones)
96+
si := scoreInfo{
97+
topologyManager: conf,
98+
qos: qos,
99+
numaNodes: numaNodes,
100+
}
101+
return handler(lh, pod, &si)
95102
}
96103

97104
func (tm *TopologyMatch) ScoreExtensions() framework.ScoreExtensions {
@@ -132,27 +139,24 @@ func getScoringStrategyFunction(strategy apiconfig.ScoringStrategyType) (scoreSt
132139
}
133140
}
134141

135-
func podScopeScore(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, scorerFn scoreStrategyFn, resourceToWeightMap resourceToWeightMap) (int64, *framework.Status) {
142+
func podScopeScore(lh logr.Logger, pod *v1.Pod, info *scoreInfo, scorerFn scoreStrategyFn, resourceToWeightMap resourceToWeightMap) (int64, *framework.Status) {
136143
// This code is in Admit implementation of pod scope
137144
// https://github.com/kubernetes/kubernetes/blob/9ff3b7e744b34c099c1405d9add192adbef0b6b1/pkg/kubelet/cm/topologymanager/scope_pod.go#L52
138145
// but it works with HintProviders, takes into account all possible allocations.
139146
resources := util.GetPodEffectiveRequest(pod)
140-
141-
allocatablePerNUMA := createNUMANodeList(lh, zones)
142-
finalScore := scoreForEachNUMANode(lh, resources, allocatablePerNUMA, scorerFn, resourceToWeightMap)
147+
finalScore := scoreForEachNUMANode(lh, resources, info.numaNodes, scorerFn, resourceToWeightMap)
143148
lh.V(2).Info("pod scope scoring final node score", "finalScore", finalScore)
144149
return finalScore, nil
145150
}
146151

147-
func containerScopeScore(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList, scorerFn scoreStrategyFn, resourceToWeightMap resourceToWeightMap) (int64, *framework.Status) {
152+
func containerScopeScore(lh logr.Logger, pod *v1.Pod, info *scoreInfo, scorerFn scoreStrategyFn, resourceToWeightMap resourceToWeightMap) (int64, *framework.Status) {
148153
// This code is in Admit implementation of container scope
149154
// https://github.com/kubernetes/kubernetes/blob/9ff3b7e744b34c099c1405d9add192adbef0b6b1/pkg/kubelet/cm/topologymanager/scope_container.go#L52
150155
containers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
151156
contScore := make([]float64, len(containers))
152-
allocatablePerNUMA := createNUMANodeList(lh, zones)
153157

154158
for i, container := range containers {
155-
contScore[i] = float64(scoreForEachNUMANode(lh, container.Resources.Requests, allocatablePerNUMA, scorerFn, resourceToWeightMap))
159+
contScore[i] = float64(scoreForEachNUMANode(lh, container.Resources.Requests, info.numaNodes, scorerFn, resourceToWeightMap))
156160
lh.V(6).Info("container scope scoring", "container", container.Name, "score", contScore[i])
157161
}
158162
finalScore := int64(stat.Mean(contScore, nil))
@@ -174,13 +178,13 @@ func (tm *TopologyMatch) scoringHandlerFromTopologyManagerConfig(conf nodeconfig
174178
return nil
175179
}
176180
if conf.Scope == kubeletconfig.PodTopologyManagerScope {
177-
return func(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList) (int64, *framework.Status) {
178-
return podScopeScore(lh, pod, zones, tm.scoreStrategyFunc, tm.resourceToWeightMap)
181+
return func(lh logr.Logger, pod *v1.Pod, info *scoreInfo) (int64, *framework.Status) {
182+
return podScopeScore(lh, pod, info, tm.scoreStrategyFunc, tm.resourceToWeightMap)
179183
}
180184
}
181185
if conf.Scope == kubeletconfig.ContainerTopologyManagerScope {
182-
return func(lh logr.Logger, pod *v1.Pod, zones topologyv1alpha2.ZoneList) (int64, *framework.Status) {
183-
return containerScopeScore(lh, pod, zones, tm.scoreStrategyFunc, tm.resourceToWeightMap)
186+
return func(lh logr.Logger, pod *v1.Pod, info *scoreInfo) (int64, *framework.Status) {
187+
return containerScopeScore(lh, pod, info, tm.scoreStrategyFunc, tm.resourceToWeightMap)
184188
}
185189
}
186190
return nil // cannot happen

0 commit comments

Comments
 (0)