Skip to content

Commit 02c34fc

Browse files
committed
feat: add NodeResourcesFitPlus and ScarceResourceAvoidance plugin
Signed-off-by: LY-today <[email protected]>
1 parent f633dd2 commit 02c34fc

File tree

8 files changed

+909
-0
lines changed

8 files changed

+909
-0
lines changed

apis/config/register.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
4545
&NetworkOverheadArgs{},
4646
&SySchedArgs{},
4747
&PeaksArgs{},
48+
&NodeResourcesFitPlusArgs{},
49+
&ScarceResourceAvoidanceArgs{},
4850
)
4951
return nil
5052
}

apis/config/types.go

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,3 +298,24 @@ type PowerModel struct {
298298
// Power = K0 + K1 * e ^(K2 * x) : where x is utilisation
299299
// Idle power of node will be K0 + K1
300300
}
301+
302+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
303+
304+
// ScarceResourceAvoidanceArgs defines the parameters for ScarceResourceAvoidance plugin.
305+
type ScarceResourceAvoidanceArgs struct {
306+
metav1.TypeMeta
307+
Resources []v1.ResourceName `json:"resources,omitempty"`
308+
}
309+
310+
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
311+
312+
// NodeResourcesFitPlusArgs defines the parameters for NodeResourcesFitPlus plugin.
313+
type NodeResourcesFitPlusArgs struct {
314+
metav1.TypeMeta
315+
Resources map[v1.ResourceName]ResourcesType `json:"resources"`
316+
}
317+
318+
type ResourcesType struct {
319+
Type schedconfig.ScoringStrategyType `json:"type"`
320+
Weight int64 `json:"weight"`
321+
}

apis/config/zz_generated.deepcopy.go

Lines changed: 64 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

cmd/scheduler/main.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@ package main
1818

1919
import (
2020
"os"
21+
noderesourcesfitplus "sigs.k8s.io/scheduler-plugins/pkg/noderesourcefitplus"
22+
"sigs.k8s.io/scheduler-plugins/pkg/scarceresourceavoidance"
2123

2224
"k8s.io/component-base/cli"
2325
_ "k8s.io/component-base/metrics/prometheus/clientgo" // for rest client metric registration
@@ -64,6 +66,8 @@ func main() {
6466
// app.WithPlugin(crossnodepreemption.Name, crossnodepreemption.New),
6567
app.WithPlugin(podstate.Name, podstate.New),
6668
app.WithPlugin(qos.Name, qos.New),
69+
app.WithPlugin(noderesourcesfitplus.Name, noderesourcesfitplus.New),
70+
app.WithPlugin(scarceresourceavoidance.Name, scarceresourceavoidance.New),
6771
)
6872

6973
code := cli.Run(command)
Lines changed: 152 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,152 @@
1+
package noderesourcesfitplus
2+
3+
import (
4+
"context"
5+
"fmt"
6+
7+
v1 "k8s.io/api/core/v1"
8+
"k8s.io/apimachinery/pkg/runtime"
9+
"k8s.io/kubernetes/pkg/api/v1/resource"
10+
k8sConfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
11+
"k8s.io/kubernetes/pkg/scheduler/framework"
12+
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
13+
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
14+
"sigs.k8s.io/scheduler-plugins/apis/config"
15+
)
16+
17+
const (
18+
// Name is plugin name
19+
Name = "NodeResourcesFitPlus"
20+
)
21+
22+
var (
23+
_ framework.ScorePlugin = &Plugin{}
24+
)
25+
26+
type Plugin struct {
27+
handle framework.Handle
28+
args *config.NodeResourcesFitPlusArgs
29+
}
30+
31+
func New(_ context.Context, args runtime.Object, handle framework.Handle) (framework.Plugin, error) {
32+
33+
nodeResourcesFitPlusArgs, ok := args.(*config.NodeResourcesFitPlusArgs)
34+
35+
if !ok {
36+
return nil, fmt.Errorf("want args to be of type NodeResourcesFitPlusArgs, got %T", args)
37+
}
38+
39+
return &Plugin{
40+
handle: handle,
41+
args: nodeResourcesFitPlusArgs,
42+
}, nil
43+
}
44+
45+
func (s *Plugin) Name() string {
46+
return Name
47+
}
48+
49+
func (s *Plugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
50+
nodeInfo, err := s.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
51+
if err != nil {
52+
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
53+
}
54+
55+
var nodeScore int64
56+
var weightSum int64
57+
58+
podRequest, _ := fitsRequest(computePodResourceRequest(p).Resource, nodeInfo)
59+
60+
for _, requestSourceName := range podRequest {
61+
v, ok := s.args.Resources[requestSourceName]
62+
if !ok {
63+
continue
64+
}
65+
fit, err := noderesources.NewFit(ctx,
66+
&k8sConfig.NodeResourcesFitArgs{
67+
ScoringStrategy: &k8sConfig.ScoringStrategy{
68+
Type: v.Type, // MostAllocated or LeastAllocated
69+
Resources: []k8sConfig.ResourceSpec{
70+
{Name: string(requestSourceName), Weight: 1},
71+
},
72+
},
73+
}, s.handle, plfeature.Features{})
74+
75+
if err != nil {
76+
return 0, framework.NewStatus(framework.Error, err.Error())
77+
}
78+
resourceScore, status := fit.(framework.ScorePlugin).Score(ctx, state, p, nodeName)
79+
if !status.IsSuccess() {
80+
return 0, framework.NewStatus(framework.Error, err.Error())
81+
}
82+
83+
nodeScore += resourceScore * v.Weight
84+
weightSum += v.Weight
85+
}
86+
87+
if weightSum == 0 {
88+
return framework.MaxNodeScore, framework.NewStatus(framework.Success, "")
89+
}
90+
scores := nodeScore / weightSum
91+
92+
return scores, nil
93+
}
94+
95+
func (p *Plugin) ScoreExtensions() framework.ScoreExtensions {
96+
return nil
97+
}
98+
99+
type preFilterState struct {
100+
framework.Resource
101+
}
102+
103+
func computePodResourceRequest(pod *v1.Pod) *preFilterState {
104+
// pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled
105+
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{})
106+
result := &preFilterState{}
107+
result.SetMaxResource(reqs)
108+
return result
109+
}
110+
111+
func fitsRequest(podRequest framework.Resource, nodeInfo *framework.NodeInfo) ([]v1.ResourceName, []v1.ResourceName) {
112+
var podRequestResource []v1.ResourceName
113+
var nodeRequestResource []v1.ResourceName
114+
115+
if podRequest.MilliCPU > 0 {
116+
podRequestResource = append(podRequestResource, v1.ResourceCPU)
117+
}
118+
119+
if nodeInfo.Allocatable.MilliCPU > 0 {
120+
nodeRequestResource = append(nodeRequestResource, v1.ResourceCPU)
121+
}
122+
123+
if podRequest.Memory > 0 {
124+
podRequestResource = append(podRequestResource, v1.ResourceMemory)
125+
}
126+
127+
if nodeInfo.Allocatable.Memory > 0 {
128+
nodeRequestResource = append(nodeRequestResource, v1.ResourceMemory)
129+
}
130+
131+
if podRequest.EphemeralStorage > 0 {
132+
podRequestResource = append(podRequestResource, v1.ResourceEphemeralStorage)
133+
}
134+
135+
if nodeInfo.Allocatable.EphemeralStorage > 0 {
136+
nodeRequestResource = append(nodeRequestResource, v1.ResourceEphemeralStorage)
137+
}
138+
139+
for rName, rQuant := range podRequest.ScalarResources {
140+
if rQuant > 0 {
141+
podRequestResource = append(podRequestResource, rName)
142+
}
143+
}
144+
145+
for rName, rQuant := range nodeInfo.Allocatable.ScalarResources {
146+
if rQuant > 0 {
147+
nodeRequestResource = append(nodeRequestResource, rName)
148+
}
149+
}
150+
151+
return podRequestResource, nodeRequestResource
152+
}

0 commit comments

Comments
 (0)