@@ -23,6 +23,7 @@ import (
23
23
"encoding/json"
24
24
"fmt"
25
25
"math"
26
+ "regexp"
26
27
"sort"
27
28
"strconv"
28
29
"strings"
@@ -32,10 +33,10 @@ import (
32
33
33
34
v1 "k8s.io/api/core/v1"
34
35
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
36
+ "k8s.io/apimachinery/pkg/fields"
35
37
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
36
38
clientset "k8s.io/client-go/kubernetes"
37
39
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
38
- "k8s.io/kubernetes/test/e2e/system"
39
40
40
41
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
41
42
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@@ -370,6 +371,29 @@ const (
370
371
MasterAndDNSNodes NodesSet = 2
371
372
)
372
373
374
+ // nodeHasControlPlanePods returns true if specified node has control plane pods
375
+ // (kube-scheduler and/or kube-controller-manager).
376
+ func nodeHasControlPlanePods (c clientset.Interface , nodeName string ) (bool , error ) {
377
+ regKubeScheduler := regexp .MustCompile ("kube-scheduler-.*" )
378
+ regKubeControllerManager := regexp .MustCompile ("kube-controller-manager-.*" )
379
+
380
+ podList , err := c .CoreV1 ().Pods (metav1 .NamespaceSystem ).List (context .TODO (), metav1.ListOptions {
381
+ FieldSelector : fields .OneTermEqualSelector ("spec.nodeName" , nodeName ).String (),
382
+ })
383
+ if err != nil {
384
+ return false , err
385
+ }
386
+ if len (podList .Items ) < 1 {
387
+ Logf ("Can't find any pods in namespace %s to grab metrics from" , metav1 .NamespaceSystem )
388
+ }
389
+ for _ , pod := range podList .Items {
390
+ if regKubeScheduler .MatchString (pod .Name ) || regKubeControllerManager .MatchString (pod .Name ) {
391
+ return true , nil
392
+ }
393
+ }
394
+ return false , nil
395
+ }
396
+
373
397
// NewResourceUsageGatherer returns a new ContainerResourceGatherer.
374
398
func NewResourceUsageGatherer (c clientset.Interface , options ResourceGathererOptions , pods * v1.PodList ) (* ContainerResourceGatherer , error ) {
375
399
g := ContainerResourceGatherer {
@@ -404,11 +428,23 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
404
428
}
405
429
dnsNodes := make (map [string ]bool )
406
430
for _ , pod := range pods .Items {
407
- if (options .Nodes == MasterNodes ) && ! system .DeprecatedMightBeMasterNode (pod .Spec .NodeName ) {
408
- continue
431
+ if options .Nodes == MasterNodes {
432
+ isControlPlane , err := nodeHasControlPlanePods (c , pod .Spec .NodeName )
433
+ if err != nil {
434
+ return nil , err
435
+ }
436
+ if ! isControlPlane {
437
+ continue
438
+ }
409
439
}
410
- if (options .Nodes == MasterAndDNSNodes ) && ! system .DeprecatedMightBeMasterNode (pod .Spec .NodeName ) && pod .Labels ["k8s-app" ] != "kube-dns" {
411
- continue
440
+ if options .Nodes == MasterAndDNSNodes {
441
+ isControlPlane , err := nodeHasControlPlanePods (c , pod .Spec .NodeName )
442
+ if err != nil {
443
+ return nil , err
444
+ }
445
+ if ! isControlPlane && pod .Labels ["k8s-app" ] != "kube-dns" {
446
+ continue
447
+ }
412
448
}
413
449
for _ , container := range pod .Status .InitContainerStatuses {
414
450
g .containerIDs = append (g .containerIDs , container .Name )
@@ -427,7 +463,11 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
427
463
}
428
464
429
465
for _ , node := range nodeList .Items {
430
- if options .Nodes == AllNodes || system .DeprecatedMightBeMasterNode (node .Name ) || dnsNodes [node .Name ] {
466
+ isControlPlane , err := nodeHasControlPlanePods (c , node .Name )
467
+ if err != nil {
468
+ return nil , err
469
+ }
470
+ if options .Nodes == AllNodes || isControlPlane || dnsNodes [node .Name ] {
431
471
g .workerWg .Add (1 )
432
472
g .workers = append (g .workers , resourceGatherWorker {
433
473
c : c ,
0 commit comments