@@ -29,7 +29,6 @@ import (
29
29
"k8s.io/apimachinery/pkg/util/runtime"
30
30
"k8s.io/apimachinery/pkg/util/sets"
31
31
"k8s.io/apimachinery/pkg/util/wait"
32
- utilfeature "k8s.io/apiserver/pkg/util/feature"
33
32
coreinformers "k8s.io/client-go/informers/core/v1"
34
33
clientset "k8s.io/client-go/kubernetes"
35
34
"k8s.io/client-go/kubernetes/scheme"
@@ -40,6 +39,7 @@ import (
40
39
"k8s.io/client-go/util/workqueue"
41
40
cloudprovider "k8s.io/cloud-provider"
42
41
servicehelper "k8s.io/cloud-provider/service/helpers"
42
+ "k8s.io/component-base/featuregate"
43
43
"k8s.io/component-base/metrics/prometheus/ratelimiter"
44
44
"k8s.io/klog"
45
45
)
@@ -110,6 +110,9 @@ type Controller struct {
110
110
nodeListerSynced cache.InformerSynced
111
111
// services that need to be synced
112
112
queue workqueue.RateLimitingInterface
113
+ // feature gates stored in local field for better testability
114
+ legacyNodeRoleFeatureEnabled bool
115
+ serviceNodeExclusionFeatureEnabled bool
113
116
}
114
117
115
118
// New returns a new service controller to keep cloud provider service resources
@@ -120,6 +123,7 @@ func New(
120
123
serviceInformer coreinformers.ServiceInformer ,
121
124
nodeInformer coreinformers.NodeInformer ,
122
125
clusterName string ,
126
+ featureGate featuregate.FeatureGate ,
123
127
) (* Controller , error ) {
124
128
broadcaster := record .NewBroadcaster ()
125
129
broadcaster .StartLogging (klog .Infof )
@@ -133,16 +137,18 @@ func New(
133
137
}
134
138
135
139
s := & Controller {
136
- cloud : cloud ,
137
- knownHosts : []* v1.Node {},
138
- kubeClient : kubeClient ,
139
- clusterName : clusterName ,
140
- cache : & serviceCache {serviceMap : make (map [string ]* cachedService )},
141
- eventBroadcaster : broadcaster ,
142
- eventRecorder : recorder ,
143
- nodeLister : nodeInformer .Lister (),
144
- nodeListerSynced : nodeInformer .Informer ().HasSynced ,
145
- queue : workqueue .NewNamedRateLimitingQueue (workqueue .NewItemExponentialFailureRateLimiter (minRetryDelay , maxRetryDelay ), "service" ),
140
+ cloud : cloud ,
141
+ knownHosts : []* v1.Node {},
142
+ kubeClient : kubeClient ,
143
+ clusterName : clusterName ,
144
+ cache : & serviceCache {serviceMap : make (map [string ]* cachedService )},
145
+ eventBroadcaster : broadcaster ,
146
+ eventRecorder : recorder ,
147
+ nodeLister : nodeInformer .Lister (),
148
+ nodeListerSynced : nodeInformer .Informer ().HasSynced ,
149
+ queue : workqueue .NewNamedRateLimitingQueue (workqueue .NewItemExponentialFailureRateLimiter (minRetryDelay , maxRetryDelay ), "service" ),
150
+ legacyNodeRoleFeatureEnabled : featureGate .Enabled (legacyNodeRoleBehaviorFeature ),
151
+ serviceNodeExclusionFeatureEnabled : featureGate .Enabled (serviceNodeExclusionFeature ),
146
152
}
147
153
148
154
serviceInformer .Informer ().AddEventHandlerWithResyncPeriod (
@@ -188,6 +194,7 @@ func New(
188
194
if err := s .init (); err != nil {
189
195
return nil , err
190
196
}
197
+
191
198
return s , nil
192
199
}
193
200
@@ -383,7 +390,7 @@ func (s *Controller) syncLoadBalancerIfNeeded(service *v1.Service, key string) (
383
390
}
384
391
385
392
func (s * Controller ) ensureLoadBalancer (service * v1.Service ) (* v1.LoadBalancerStatus , error ) {
386
- nodes , err := listWithPredicate (s .nodeLister , getNodeConditionPredicate ())
393
+ nodes , err := listWithPredicate (s .nodeLister , s . getNodeConditionPredicate ())
387
394
if err != nil {
388
395
return nil , err
389
396
}
@@ -612,22 +619,22 @@ func nodeSlicesEqualForLB(x, y []*v1.Node) bool {
612
619
return nodeNames (x ).Equal (nodeNames (y ))
613
620
}
614
621
615
- func getNodeConditionPredicate () NodeConditionPredicate {
622
+ func ( s * Controller ) getNodeConditionPredicate () NodeConditionPredicate {
616
623
return func (node * v1.Node ) bool {
617
624
// We add the master to the node list, but its unschedulable. So we use this to filter
618
625
// the master.
619
626
if node .Spec .Unschedulable {
620
627
return false
621
628
}
622
629
623
- if utilfeature . DefaultFeatureGate . Enabled ( legacyNodeRoleBehaviorFeature ) {
630
+ if s . legacyNodeRoleFeatureEnabled {
624
631
// As of 1.6, we will taint the master, but not necessarily mark it unschedulable.
625
632
// Recognize nodes labeled as master, and filter them also, as we were doing previously.
626
633
if _ , hasMasterRoleLabel := node .Labels [labelNodeRoleMaster ]; hasMasterRoleLabel {
627
634
return false
628
635
}
629
636
}
630
- if utilfeature . DefaultFeatureGate . Enabled ( serviceNodeExclusionFeature ) {
637
+ if s . serviceNodeExclusionFeatureEnabled {
631
638
if _ , hasExcludeBalancerLabel := node .Labels [labelNodeRoleExcludeBalancer ]; hasExcludeBalancerLabel {
632
639
return false
633
640
}
@@ -654,7 +661,7 @@ func getNodeConditionPredicate() NodeConditionPredicate {
654
661
func (s * Controller ) nodeSyncLoop () {
655
662
s .knownHostsLock .Lock ()
656
663
defer s .knownHostsLock .Unlock ()
657
- newHosts , err := listWithPredicate (s .nodeLister , getNodeConditionPredicate ())
664
+ newHosts , err := listWithPredicate (s .nodeLister , s . getNodeConditionPredicate ())
658
665
if err != nil {
659
666
runtime .HandleError (fmt .Errorf ("Failed to retrieve current set of nodes from node lister: %v" , err ))
660
667
return
0 commit comments