Skip to content

Commit e70dedf

Browse files
authored
Merge pull request #569 from czybjtu/feat_del_options
Delete Useless options in controller
2 parents fc0b16b + 0383bfd commit e70dedf

File tree

4 files changed

+18
-103
lines changed

4 files changed

+18
-103
lines changed

cmd/controller/app/options.go

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -21,11 +21,8 @@ import (
2121
)
2222

2323
type ServerRunOptions struct {
24-
KubeConfig string
25-
MasterUrl string
2624
MetricsAddr string
2725
ProbeAddr string
28-
InCluster bool
2926
ApiServerQPS int
3027
ApiServerBurst int
3128
Workers int
@@ -39,9 +36,6 @@ func NewServerRunOptions() *ServerRunOptions {
3936
}
4037

4138
func (s *ServerRunOptions) addAllFlags() {
42-
pflag.BoolVar(&s.InCluster, "incluster", s.InCluster, "If controller run incluster.")
43-
pflag.StringVar(&s.KubeConfig, "kubeConfig", s.KubeConfig, "Kube Config path if not run in cluster.")
44-
pflag.StringVar(&s.MasterUrl, "masterUrl", s.MasterUrl, "Master Url if not run in cluster.")
4539
pflag.StringVar(&s.MetricsAddr, "metricsAddr", ":8080", "Metrics server bind listen address.")
4640
pflag.StringVar(&s.ProbeAddr, "probeAddr", ":8081", "Probe endpoint bind address.")
4741
pflag.IntVar(&s.ApiServerQPS, "qps", 5, "qps of query apiserver.")

cmd/controller/app/server.go

Lines changed: 10 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -17,31 +17,16 @@ limitations under the License.
1717
package app
1818

1919
import (
20-
"context"
21-
"os"
22-
2320
"k8s.io/apimachinery/pkg/runtime"
2421
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
25-
"k8s.io/apimachinery/pkg/util/uuid"
26-
"k8s.io/apiserver/pkg/server"
27-
"k8s.io/client-go/informers"
28-
"k8s.io/client-go/kubernetes"
2922
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
30-
"k8s.io/client-go/rest"
31-
restclient "k8s.io/client-go/rest"
32-
"k8s.io/client-go/tools/clientcmd"
33-
"k8s.io/client-go/tools/leaderelection"
34-
"k8s.io/client-go/tools/leaderelection/resourcelock"
35-
"k8s.io/klog/v2"
3623
"k8s.io/klog/v2/klogr"
3724

3825
ctrl "sigs.k8s.io/controller-runtime"
3926
"sigs.k8s.io/controller-runtime/pkg/healthz"
4027

4128
schedulingv1a1 "sigs.k8s.io/scheduler-plugins/apis/scheduling/v1alpha1"
4229
"sigs.k8s.io/scheduler-plugins/pkg/controllers"
43-
schedclientset "sigs.k8s.io/scheduler-plugins/pkg/generated/clientset/versioned"
44-
schedformers "sigs.k8s.io/scheduler-plugins/pkg/generated/informers/externalversions"
4530
)
4631

4732
var (
@@ -55,38 +40,10 @@ func init() {
5540
utilruntime.Must(schedulingv1a1.AddToScheme(scheme))
5641
}
5742

58-
func newConfig(kubeconfig, master string, inCluster bool) (*restclient.Config, error) {
59-
var (
60-
config *rest.Config
61-
err error
62-
)
63-
if inCluster {
64-
config, err = rest.InClusterConfig()
65-
} else {
66-
config, err = clientcmd.BuildConfigFromFlags(master, kubeconfig)
67-
}
68-
if err != nil {
69-
return nil, err
70-
}
71-
return config, nil
72-
}
73-
7443
func Run(s *ServerRunOptions) error {
75-
ctx := context.Background()
76-
config, err := newConfig(s.KubeConfig, s.MasterUrl, s.InCluster)
77-
if err != nil {
78-
klog.ErrorS(err, "Failed to parse config")
79-
os.Exit(1)
80-
}
44+
config := ctrl.GetConfigOrDie()
8145
config.QPS = float32(s.ApiServerQPS)
8246
config.Burst = s.ApiServerBurst
83-
stopCh := server.SetupSignalHandler()
84-
schedClient := schedclientset.NewForConfigOrDie(config)
85-
kubeClient := kubernetes.NewForConfigOrDie(config)
86-
87-
schedInformerFactory := schedformers.NewSharedInformerFactory(schedClient, 0)
88-
89-
coreInformerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
9047

9148
// Controller Runtime Controllers
9249
ctrl.SetLogger(klogr.New())
@@ -105,16 +62,18 @@ func Run(s *ServerRunOptions) error {
10562
}
10663

10764
if err = (&controllers.PodGroupReconciler{
108-
Client: mgr.GetClient(),
109-
Scheme: mgr.GetScheme(),
65+
Client: mgr.GetClient(),
66+
Scheme: mgr.GetScheme(),
67+
Workers: s.Workers,
11068
}).SetupWithManager(mgr); err != nil {
11169
setupLog.Error(err, "unable to create controller", "controller", "PodGroup")
11270
return err
11371
}
11472

11573
if err = (&controllers.ElasticQuotaReconciler{
116-
Client: mgr.GetClient(),
117-
Scheme: mgr.GetScheme(),
74+
Client: mgr.GetClient(),
75+
Scheme: mgr.GetScheme(),
76+
Workers: s.Workers,
11877
}).SetupWithManager(mgr); err != nil {
11978
setupLog.Error(err, "unable to create controller", "controller", "ElasticQuota")
12079
return err
@@ -129,53 +88,9 @@ func Run(s *ServerRunOptions) error {
12988
return err
13089
}
13190

132-
run := func(ctx context.Context) {
133-
setupLog.Info("starting manager")
134-
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
135-
setupLog.Error(err, "unable to start manager")
136-
panic(err)
137-
}
138-
139-
select {}
140-
}
141-
schedInformerFactory.Start(stopCh)
142-
coreInformerFactory.Start(stopCh)
143-
if !s.EnableLeaderElection {
144-
run(ctx)
145-
} else {
146-
id, err := os.Hostname()
147-
if err != nil {
148-
return err
149-
}
150-
// add a uniquifier so that two processes on the same host don't accidentally both become active
151-
id = id + "_" + string(uuid.NewUUID())
152-
153-
rl, err := resourcelock.New("endpoints",
154-
"kube-system",
155-
"sched-plugins-controller",
156-
kubeClient.CoreV1(),
157-
kubeClient.CoordinationV1(),
158-
resourcelock.ResourceLockConfig{
159-
Identity: id,
160-
})
161-
if err != nil {
162-
klog.ErrorS(err, "Resource lock creation failed")
163-
os.Exit(1)
164-
}
165-
166-
leaderelection.RunOrDie(context.TODO(), leaderelection.LeaderElectionConfig{
167-
Lock: rl,
168-
Callbacks: leaderelection.LeaderCallbacks{
169-
OnStartedLeading: run,
170-
OnStoppedLeading: func() {
171-
klog.ErrorS(err, "Leaderelection lost")
172-
os.Exit(1)
173-
},
174-
},
175-
Name: "scheduler-plugins controller",
176-
})
91+
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
92+
setupLog.Error(err, "unable to start manager")
93+
return err
17794
}
178-
179-
<-stopCh
18095
return nil
18196
}

pkg/controllers/elasticquota_controller.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@ import (
3030

3131
ctrl "sigs.k8s.io/controller-runtime"
3232
"sigs.k8s.io/controller-runtime/pkg/client"
33+
"sigs.k8s.io/controller-runtime/pkg/controller"
3334
"sigs.k8s.io/controller-runtime/pkg/handler"
3435
"sigs.k8s.io/controller-runtime/pkg/log"
3536
"sigs.k8s.io/controller-runtime/pkg/source"
@@ -41,7 +42,8 @@ type ElasticQuotaReconciler struct {
4142
recorder record.EventRecorder
4243

4344
client.Client
44-
Scheme *runtime.Scheme
45+
Scheme *runtime.Scheme
46+
Workers int
4547
}
4648

4749
// +kubebuilder:rbac:groups=scheduling.x-k8s.io,resources=elasticquota,verbs=get;list;watch;create;update;patch;delete
@@ -173,5 +175,6 @@ func (r *ElasticQuotaReconciler) SetupWithManager(mgr ctrl.Manager) error {
173175
return ctrl.NewControllerManagedBy(mgr).
174176
Watches(&source.Kind{Type: &v1.Pod{}}, &handler.EnqueueRequestForObject{}).
175177
For(&schedv1alpha1.ElasticQuota{}).
178+
WithOptions(controller.Options{MaxConcurrentReconciles: r.Workers}).
176179
Complete(r)
177180
}

pkg/controllers/podgroup_controller.go

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import (
3333

3434
ctrl "sigs.k8s.io/controller-runtime"
3535
"sigs.k8s.io/controller-runtime/pkg/client"
36+
"sigs.k8s.io/controller-runtime/pkg/controller"
3637
"sigs.k8s.io/controller-runtime/pkg/handler"
3738
"sigs.k8s.io/controller-runtime/pkg/log"
3839
"sigs.k8s.io/controller-runtime/pkg/source"
@@ -47,7 +48,8 @@ type PodGroupReconciler struct {
4748
recorder record.EventRecorder
4849

4950
client.Client
50-
Scheme *runtime.Scheme
51+
Scheme *runtime.Scheme
52+
Workers int
5153
}
5254

5355
// +kubebuilder:rbac:groups=scheduling.x-k8s.io,resources=podgroups,verbs=get;list;watch;create;update;patch;delete
@@ -194,6 +196,7 @@ func (r *PodGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
194196
Watches(&source.Kind{Type: &v1.Pod{}},
195197
handler.EnqueueRequestsFromMapFunc(r.podToPodGroup)).
196198
For(&schedv1alpha1.PodGroup{}).
199+
WithOptions(controller.Options{MaxConcurrentReconciles: r.Workers}).
197200
Complete(r)
198201
}
199202

0 commit comments

Comments
 (0)