Skip to content
This repository was archived by the owner on Jul 30, 2021. It is now read-only.

Commit 6ed1ce9

Browse files
author
Yifan Gu
authored
Merge pull request #422 from yifan-gu/update_manifests
templates.go: Update manifest for Kubernetes 1.6
2 parents 8a63561 + 74af00c commit 6ed1ce9

File tree

2 files changed

+76
-63
lines changed

2 files changed

+76
-63
lines changed

pkg/asset/internal/templates.go

Lines changed: 71 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -41,12 +41,14 @@ metadata:
4141
name: kubelet
4242
namespace: kube-system
4343
labels:
44-
k8s-app: kubelet
44+
tier: node
45+
component: kubelet
4546
spec:
4647
template:
4748
metadata:
4849
labels:
49-
k8s-app: kubelet
50+
tier: node
51+
component: kubelet
5052
spec:
5153
containers:
5254
- name: kubelet
@@ -133,12 +135,14 @@ metadata:
133135
name: kube-apiserver
134136
namespace: kube-system
135137
labels:
136-
k8s-app: kube-apiserver
138+
tier: control-plane
139+
component: kube-apiserver
137140
spec:
138141
template:
139142
metadata:
140143
labels:
141-
k8s-app: kube-apiserver
144+
tier: control-plane
145+
component: kube-apiserver
142146
annotations:
143147
checkpointer.alpha.coreos.com/checkpoint: "true"
144148
spec:
@@ -207,12 +211,14 @@ metadata:
207211
name: kenc
208212
namespace: kube-system
209213
labels:
210-
k8s-app: kenc
214+
tier: control-plane
215+
component: kenc
211216
spec:
212217
template:
213218
metadata:
214219
labels:
215-
k8s-app: kenc
220+
tier: control-plane
221+
component: kenc
216222
annotations:
217223
checkpointer.alpha.coreos.com/checkpoint: "true"
218224
spec:
@@ -251,12 +257,14 @@ metadata:
251257
name: pod-checkpointer
252258
namespace: kube-system
253259
labels:
254-
k8s-app: pod-checkpointer
260+
tier: control-plane
261+
component: pod-checkpointer
255262
spec:
256263
template:
257264
metadata:
258265
labels:
259-
k8s-app: pod-checkpointer
266+
tier: control-plane
267+
component: pod-checkpointer
260268
annotations:
261269
checkpointer.alpha.coreos.com/checkpoint: "true"
262270
spec:
@@ -305,13 +313,15 @@ metadata:
305313
name: kube-controller-manager
306314
namespace: kube-system
307315
labels:
308-
k8s-app: kube-controller-manager
316+
tier: control-plane
317+
component: kube-controller-manager
309318
spec:
310319
replicas: 2
311320
template:
312321
metadata:
313322
labels:
314-
k8s-app: kube-controller-manager
323+
tier: control-plane
324+
component: kube-controller-manager
315325
spec:
316326
nodeSelector:
317327
master: "true"
@@ -359,21 +369,24 @@ spec:
359369
minAvailable: 1
360370
selector:
361371
matchLabels:
362-
k8s-app: kube-controller-manager
372+
tier: control-plane
373+
component: kube-controller-manager
363374
`)
364375
SchedulerTemplate = []byte(`apiVersion: extensions/v1beta1
365376
kind: Deployment
366377
metadata:
367378
name: kube-scheduler
368379
namespace: kube-system
369380
labels:
370-
k8s-app: kube-scheduler
381+
tier: control-plane
382+
component: kube-scheduler
371383
spec:
372384
replicas: 2
373385
template:
374386
metadata:
375387
labels:
376-
k8s-app: kube-scheduler
388+
tier: control-plane
389+
component: kube-scheduler
377390
spec:
378391
nodeSelector:
379392
master: "true"
@@ -401,20 +414,25 @@ spec:
401414
minAvailable: 1
402415
selector:
403416
matchLabels:
404-
k8s-app: kube-scheduler
417+
tier: control-plane
418+
component: kube-scheduler
405419
`)
406420
ProxyTemplate = []byte(`apiVersion: "extensions/v1beta1"
407421
kind: DaemonSet
408422
metadata:
409423
name: kube-proxy
410424
namespace: kube-system
411425
labels:
412-
k8s-app: kube-proxy
426+
tier: node
427+
component: kube-proxy
413428
spec:
414429
template:
415430
metadata:
416431
labels:
417-
k8s-app: kube-proxy
432+
tier: node
433+
component: kube-proxy
434+
annotations:
435+
scheduler.alpha.kubernetes.io/critical-pod: ''
418436
spec:
419437
hostNetwork: true
420438
containers:
@@ -475,11 +493,18 @@ spec:
475493
k8s-app: kube-dns
476494
annotations:
477495
scheduler.alpha.kubernetes.io/critical-pod: ''
478-
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
479496
spec:
497+
tolerations:
498+
- key: "CriticalAddonsOnly"
499+
operator: "Exists"
500+
volumes:
501+
- name: kube-dns-config
502+
configMap:
503+
name: kube-dns
504+
optional: true
480505
containers:
481506
- name: kubedns
482-
image: gcr.io/google_containers/kubedns-amd64:1.9
507+
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.1
483508
resources:
484509
# TODO: Set memory limits when we've profiled the container for large
485510
# clusters, then set request = limit to keep this container in
@@ -492,8 +517,8 @@ spec:
492517
memory: 70Mi
493518
livenessProbe:
494519
httpGet:
495-
path: /healthz-kubedns
496-
port: 8080
520+
path: /healthcheck/kubedns
521+
port: 10054
497522
scheme: HTTP
498523
initialDelaySeconds: 60
499524
timeoutSeconds: 5
@@ -511,10 +536,8 @@ spec:
511536
args:
512537
- --domain=cluster.local.
513538
- --dns-port=10053
514-
- --config-map=kube-dns
515-
# This should be set to v=2 only after the new image (cut from 1.5) has
516-
# been released, otherwise we will flood the logs.
517-
- --v=0
539+
- --config-dir=/kube-dns-config
540+
- --v=2
518541
env:
519542
- name: PROMETHEUS_PORT
520543
value: "10055"
@@ -528,22 +551,32 @@ spec:
528551
- containerPort: 10055
529552
name: metrics
530553
protocol: TCP
554+
volumeMounts:
555+
- name: kube-dns-config
556+
mountPath: /kube-dns-config
531557
- name: dnsmasq
532-
image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
558+
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.1
533559
livenessProbe:
534560
httpGet:
535-
path: /healthz-dnsmasq
536-
port: 8080
561+
path: /healthcheck/dnsmasq
562+
port: 10054
537563
scheme: HTTP
538564
initialDelaySeconds: 60
539565
timeoutSeconds: 5
540566
successThreshold: 1
541567
failureThreshold: 5
542568
args:
569+
- -v=2
570+
- -logtostderr
571+
- -configDir=/etc/k8s/dns/dnsmasq-nanny
572+
- -restartDnsmasq=true
573+
- --
574+
- -k
543575
- --cache-size=1000
544-
- --no-resolv
545-
- --server=127.0.0.1#10053
546576
- --log-facility=-
577+
- --server=/cluster.local/127.0.0.1#10053
578+
- --server=/in-addr.arpa/127.0.0.1#10053
579+
- --server=/ip6.arpa/127.0.0.1#10053
547580
ports:
548581
- containerPort: 53
549582
name: dns
@@ -555,9 +588,12 @@ spec:
555588
resources:
556589
requests:
557590
cpu: 150m
558-
memory: 10Mi
559-
- name: dnsmasq-metrics
560-
image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
591+
memory: 20Mi
592+
volumeMounts:
593+
- name: kube-dns-config
594+
mountPath: /etc/k8s/dns/dnsmasq-nanny
595+
- name: sidecar
596+
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.1
561597
livenessProbe:
562598
httpGet:
563599
path: /metrics
@@ -570,35 +606,16 @@ spec:
570606
args:
571607
- --v=2
572608
- --logtostderr
609+
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
610+
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
573611
ports:
574612
- containerPort: 10054
575613
name: metrics
576614
protocol: TCP
577615
resources:
578616
requests:
579-
memory: 10Mi
580-
- name: healthz
581-
image: gcr.io/google_containers/exechealthz-amd64:1.2
582-
resources:
583-
limits:
584-
memory: 50Mi
585-
requests:
617+
memory: 20Mi
586618
cpu: 10m
587-
# Note that this container shouldn't really need 50Mi of memory. The
588-
# limits are set higher than expected pending investigation on #29688.
589-
# The extra memory was stolen from the kubedns container to keep the
590-
# net memory requested by the pod constant.
591-
memory: 50Mi
592-
args:
593-
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
594-
- --url=/healthz-dnsmasq
595-
- --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
596-
- --url=/healthz-kubedns
597-
- --port=8080
598-
- --quiet
599-
ports:
600-
- containerPort: 8080
601-
protocol: TCP
602619
dnsPolicy: Default # Don't use cluster DNS.
603620
`)
604621
DNSSvcTemplate = []byte(`apiVersion: v1

pkg/bootkube/status.go

Lines changed: 5 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import (
99

1010
"github.com/golang/glog"
1111
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12-
"k8s.io/apimachinery/pkg/labels"
1312
"k8s.io/apimachinery/pkg/runtime"
1413
"k8s.io/apimachinery/pkg/util/wait"
1514
"k8s.io/apimachinery/pkg/watch"
@@ -21,7 +20,6 @@ import (
2120
)
2221

2322
const (
24-
appKey = "k8s-app"
2523
doesNotExist = "DoesNotExist"
2624
)
2725

@@ -56,13 +54,11 @@ func NewStatusController(pods []string) (*statusController, error) {
5654
}
5755

5856
func (s *statusController) Run() {
59-
// TODO(aaron): statically define the selector so we can skip this
60-
ls, err := labels.Parse(appKey)
61-
if err != nil {
62-
panic(err)
63-
}
64-
65-
options := metav1.ListOptions{LabelSelector: ls.String()}
57+
// TODO(yifan): Be more explicit about the labels so that we don't just
58+
// reply on the prefix of the pod name when looking for the pods we are interested.
59+
// E.g. For a scheduler pod, we will look for pods that has label `tier=control-plane`
60+
// and `component=kube-scheduler`.
61+
options := metav1.ListOptions{}
6662
podStore, podController := cache.NewInformer(
6763
&cache.ListWatch{
6864
ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) {

0 commit comments

Comments
 (0)