@@ -29,7 +29,6 @@ import (
29
29
"k8s.io/apimachinery/pkg/util/runtime"
30
30
"k8s.io/apimachinery/pkg/util/sets"
31
31
"k8s.io/apimachinery/pkg/util/wait"
32
- utilfeature "k8s.io/apiserver/pkg/util/feature"
33
32
coreinformers "k8s.io/client-go/informers/core/v1"
34
33
clientset "k8s.io/client-go/kubernetes"
35
34
"k8s.io/client-go/kubernetes/scheme"
@@ -40,6 +39,7 @@ import (
40
39
"k8s.io/client-go/util/workqueue"
41
40
cloudprovider "k8s.io/cloud-provider"
42
41
servicehelper "k8s.io/cloud-provider/service/helpers"
42
+ "k8s.io/component-base/featuregate"
43
43
"k8s.io/component-base/metrics/prometheus/ratelimiter"
44
44
"k8s.io/klog"
45
45
)
@@ -110,6 +110,9 @@ type Controller struct {
110
110
nodeListerSynced cache.InformerSynced
111
111
// services that need to be synced
112
112
queue workqueue.RateLimitingInterface
113
+ // feature gates stored in local field for better testability
114
+ legacyNodeRoleFeatureEnabled bool
115
+ serviceNodeExclusionFeatureEnabled bool
113
116
}
114
117
115
118
// New returns a new service controller to keep cloud provider service resources
@@ -120,6 +123,7 @@ func New(
120
123
serviceInformer coreinformers.ServiceInformer ,
121
124
nodeInformer coreinformers.NodeInformer ,
122
125
clusterName string ,
126
+ featureGate featuregate.FeatureGate ,
123
127
) (* Controller , error ) {
124
128
broadcaster := record .NewBroadcaster ()
125
129
broadcaster .StartLogging (klog .Infof )
@@ -133,16 +137,18 @@ func New(
133
137
}
134
138
135
139
s := & Controller {
136
- cloud : cloud ,
137
- knownHosts : []* v1.Node {},
138
- kubeClient : kubeClient ,
139
- clusterName : clusterName ,
140
- cache : & serviceCache {serviceMap : make (map [string ]* cachedService )},
141
- eventBroadcaster : broadcaster ,
142
- eventRecorder : recorder ,
143
- nodeLister : nodeInformer .Lister (),
144
- nodeListerSynced : nodeInformer .Informer ().HasSynced ,
145
- queue : workqueue .NewNamedRateLimitingQueue (workqueue .NewItemExponentialFailureRateLimiter (minRetryDelay , maxRetryDelay ), "service" ),
140
+ cloud : cloud ,
141
+ knownHosts : []* v1.Node {},
142
+ kubeClient : kubeClient ,
143
+ clusterName : clusterName ,
144
+ cache : & serviceCache {serviceMap : make (map [string ]* cachedService )},
145
+ eventBroadcaster : broadcaster ,
146
+ eventRecorder : recorder ,
147
+ nodeLister : nodeInformer .Lister (),
148
+ nodeListerSynced : nodeInformer .Informer ().HasSynced ,
149
+ queue : workqueue .NewNamedRateLimitingQueue (workqueue .NewItemExponentialFailureRateLimiter (minRetryDelay , maxRetryDelay ), "service" ),
150
+ legacyNodeRoleFeatureEnabled : featureGate .Enabled (legacyNodeRoleBehaviorFeature ),
151
+ serviceNodeExclusionFeatureEnabled : featureGate .Enabled (serviceNodeExclusionFeature ),
146
152
}
147
153
148
154
serviceInformer .Informer ().AddEventHandlerWithResyncPeriod (
@@ -202,6 +208,7 @@ func New(
202
208
if err := s .init (); err != nil {
203
209
return nil , err
204
210
}
211
+
205
212
return s , nil
206
213
}
207
214
@@ -397,7 +404,7 @@ func (s *Controller) syncLoadBalancerIfNeeded(service *v1.Service, key string) (
397
404
}
398
405
399
406
func (s * Controller ) ensureLoadBalancer (service * v1.Service ) (* v1.LoadBalancerStatus , error ) {
400
- nodes , err := listWithPredicate (s .nodeLister , getNodeConditionPredicate ())
407
+ nodes , err := listWithPredicate (s .nodeLister , s . getNodeConditionPredicate ())
401
408
if err != nil {
402
409
return nil , err
403
410
}
@@ -626,22 +633,22 @@ func nodeSlicesEqualForLB(x, y []*v1.Node) bool {
626
633
return nodeNames (x ).Equal (nodeNames (y ))
627
634
}
628
635
629
- func getNodeConditionPredicate () NodeConditionPredicate {
636
+ func ( s * Controller ) getNodeConditionPredicate () NodeConditionPredicate {
630
637
return func (node * v1.Node ) bool {
631
638
// We add the master to the node list, but its unschedulable. So we use this to filter
632
639
// the master.
633
640
if node .Spec .Unschedulable {
634
641
return false
635
642
}
636
643
637
- if utilfeature . DefaultFeatureGate . Enabled ( legacyNodeRoleBehaviorFeature ) {
644
+ if s . legacyNodeRoleFeatureEnabled {
638
645
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
639
646
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
640
647
if _ , hasMasterRoleLabel := node .Labels [labelNodeRoleMaster ]; hasMasterRoleLabel {
641
648
return false
642
649
}
643
650
}
644
- if utilfeature . DefaultFeatureGate . Enabled ( serviceNodeExclusionFeature ) {
651
+ if s . serviceNodeExclusionFeatureEnabled {
645
652
if _ , hasExcludeBalancerLabel := node .Labels [labelNodeRoleExcludeBalancer ]; hasExcludeBalancerLabel {
646
653
return false
647
654
}
@@ -692,7 +699,7 @@ func nodeReadyConditionStatus(node *v1.Node) v1.ConditionStatus {
692
699
func (s * Controller ) nodeSyncLoop () {
693
700
s .knownHostsLock .Lock ()
694
701
defer s .knownHostsLock .Unlock ()
695
- newHosts , err := listWithPredicate (s .nodeLister , getNodeConditionPredicate ())
702
+ newHosts , err := listWithPredicate (s .nodeLister , s . getNodeConditionPredicate ())
696
703
if err != nil {
697
704
runtime .HandleError (fmt .Errorf ("Failed to retrieve current set of nodes from node lister: %v" , err ))
698
705
return
0 commit comments