Skip to content

Commit d1233d3

Browse files
authored
sync en zh yaml in content/zh/example/admin direcroty (#19766)
1 parent 3dca728 commit d1233d3

File tree

7 files changed

+187
-173
lines changed

7 files changed

+187
-173
lines changed
Lines changed: 69 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -1,69 +1,69 @@
1-
# This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster.
2-
# It assumes that your masters can run pods and has the role node-role.kubernetes.io/master
3-
# Note that this Daemonset will not work straight out of the box for your cloud, this is
4-
# meant to be a guideline.
5-
6-
---
7-
apiVersion: v1
8-
kind: ServiceAccount
9-
metadata:
10-
name: cloud-controller-manager
11-
namespace: kube-system
12-
---
13-
kind: ClusterRoleBinding
14-
apiVersion: rbac.authorization.k8s.io/v1
15-
metadata:
16-
name: system:cloud-controller-manager
17-
roleRef:
18-
apiGroup: rbac.authorization.k8s.io
19-
kind: ClusterRole
20-
name: cluster-admin
21-
subjects:
22-
- kind: ServiceAccount
23-
name: cloud-controller-manager
24-
namespace: kube-system
25-
---
26-
apiVersion: apps/v1
27-
kind: DaemonSet
28-
metadata:
29-
labels:
30-
k8s-app: cloud-controller-manager
31-
name: cloud-controller-manager
32-
namespace: kube-system
33-
spec:
34-
selector:
35-
matchLabels:
36-
k8s-app: cloud-controller-manager
37-
template:
38-
metadata:
39-
labels:
40-
k8s-app: cloud-controller-manager
41-
spec:
42-
serviceAccountName: cloud-controller-manager
43-
containers:
44-
- name: cloud-controller-manager
45-
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager
46-
# this can be replaced with any other image for out-of-tree providers
47-
image: k8s.gcr.io/cloud-controller-manager:v1.8.0
48-
command:
49-
- /usr/local/bin/cloud-controller-manager
50-
- --cloud-provider=<YOUR_CLOUD_PROVIDER> # Add your own cloud provider here!
51-
- --leader-elect=true
52-
- --use-service-account-credentials
53-
# these flags will vary for every cloud provider
54-
- --allocate-node-cidrs=true
55-
- --configure-cloud-routes=true
56-
- --cluster-cidr=172.17.0.0/16
57-
tolerations:
58-
# this is required so CCM can bootstrap itself
59-
- key: node.cloudprovider.kubernetes.io/uninitialized
60-
value: "true"
61-
effect: NoSchedule
62-
# this is to have the daemonset runnable on master nodes
63-
# the taint may vary depending on your cluster setup
64-
- key: node-role.kubernetes.io/master
65-
effect: NoSchedule
66-
# this is to restrict CCM to only run on master nodes
67-
# the node selector may vary depending on your cluster setup
68-
nodeSelector:
69-
node-role.kubernetes.io/master: ""
1+
# This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster.
2+
# It assumes that your masters can run pods and has the role node-role.kubernetes.io/master
3+
# Note that this Daemonset will not work straight out of the box for your cloud, this is
4+
# meant to be a guideline.
5+
6+
---
7+
apiVersion: v1
8+
kind: ServiceAccount
9+
metadata:
10+
name: cloud-controller-manager
11+
namespace: kube-system
12+
---
13+
apiVersion: rbac.authorization.k8s.io/v1
14+
kind: ClusterRoleBinding
15+
metadata:
16+
name: system:cloud-controller-manager
17+
roleRef:
18+
apiGroup: rbac.authorization.k8s.io
19+
kind: ClusterRole
20+
name: cluster-admin
21+
subjects:
22+
- kind: ServiceAccount
23+
name: cloud-controller-manager
24+
namespace: kube-system
25+
---
26+
apiVersion: apps/v1
27+
kind: DaemonSet
28+
metadata:
29+
labels:
30+
k8s-app: cloud-controller-manager
31+
name: cloud-controller-manager
32+
namespace: kube-system
33+
spec:
34+
selector:
35+
matchLabels:
36+
k8s-app: cloud-controller-manager
37+
template:
38+
metadata:
39+
labels:
40+
k8s-app: cloud-controller-manager
41+
spec:
42+
serviceAccountName: cloud-controller-manager
43+
containers:
44+
- name: cloud-controller-manager
45+
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager
46+
# this can be replaced with any other image for out-of-tree providers
47+
image: k8s.gcr.io/cloud-controller-manager:v1.8.0
48+
command:
49+
- /usr/local/bin/cloud-controller-manager
50+
- --cloud-provider=[YOUR_CLOUD_PROVIDER] # Add your own cloud provider here!
51+
- --leader-elect=true
52+
- --use-service-account-credentials
53+
# these flags will vary for every cloud provider
54+
- --allocate-node-cidrs=true
55+
- --configure-cloud-routes=true
56+
- --cluster-cidr=172.17.0.0/16
57+
tolerations:
58+
# this is required so CCM can bootstrap itself
59+
- key: node.cloudprovider.kubernetes.io/uninitialized
60+
value: "true"
61+
effect: NoSchedule
62+
# this is to have the daemonset runnable on master nodes
63+
# the taint may vary depending on your cluster setup
64+
- key: node-role.kubernetes.io/master
65+
effect: NoSchedule
66+
# this is to restrict CCM to only run on master nodes
67+
# the node selector may vary depending on your cluster setup
68+
nodeSelector:
69+
node-role.kubernetes.io/master: ""
Lines changed: 33 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,33 @@
1-
apiVersion: apps/v1
2-
kind: Deployment
3-
metadata:
4-
name: dns-autoscaler
5-
namespace: kube-system
6-
labels:
7-
k8s-app: dns-autoscaler
8-
spec:
9-
selector:
10-
matchLabels:
11-
k8s-app: dns-autoscaler
12-
template:
13-
metadata:
14-
labels:
15-
k8s-app: dns-autoscaler
16-
spec:
17-
containers:
18-
- name: autoscaler
19-
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.1
20-
resources:
21-
requests:
22-
cpu: "20m"
23-
memory: "10Mi"
24-
command:
25-
- /cluster-proportional-autoscaler
26-
- --namespace=kube-system
27-
- --configmap=dns-autoscaler
28-
- --target=<SCALE_TARGET>
29-
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
30-
# If using small nodes, "nodesPerReplica" should dominate.
31-
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}
32-
- --logtostderr=true
33-
- --v=2
1+
apiVersion: apps/v1
2+
kind: Deployment
3+
metadata:
4+
name: dns-autoscaler
5+
namespace: kube-system
6+
labels:
7+
k8s-app: dns-autoscaler
8+
spec:
9+
selector:
10+
matchLabels:
11+
k8s-app: dns-autoscaler
12+
template:
13+
metadata:
14+
labels:
15+
k8s-app: dns-autoscaler
16+
spec:
17+
containers:
18+
- name: autoscaler
19+
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.6.0
20+
resources:
21+
requests:
22+
cpu: 20m
23+
memory: 10Mi
24+
command:
25+
- /cluster-proportional-autoscaler
26+
- --namespace=kube-system
27+
- --configmap=dns-autoscaler
28+
- --target=<SCALE_TARGET>
29+
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
30+
# If using small nodes, "nodesPerReplica" should dominate.
31+
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}
32+
- --logtostderr=true
33+
- --v=2
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
apiVersion: v1
2+
kind: Pod
3+
metadata:
4+
name: dnsutils
5+
namespace: default
6+
spec:
7+
containers:
8+
- name: dnsutils
9+
image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
10+
command:
11+
- sleep
12+
- "3600"
13+
imagePullPolicy: IfNotPresent
14+
restartPolicy: Always

content/zh/examples/admin/resource/cpu-constraints-pod-3.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
apiVersion: v1
22
kind: Pod
33
metadata:
4-
name: constraints-cpu-demo-4
4+
name: constraints-cpu-demo-3
55
spec:
66
containers:
7-
- name: constraints-cpu-demo-4-ctr
7+
- name: constraints-cpu-demo-3-ctr
88
image: nginx
99
resources:
1010
limits:

content/zh/examples/admin/resource/quota-objects-pvc-2.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
kind: PersistentVolumeClaim
21
apiVersion: v1
2+
kind: PersistentVolumeClaim
33
metadata:
44
name: pvc-quota-demo-2
55
spec:

content/zh/examples/admin/resource/quota-objects-pvc.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
kind: PersistentVolumeClaim
21
apiVersion: v1
2+
kind: PersistentVolumeClaim
33
metadata:
44
name: pvc-quota-demo
55
spec:
Lines changed: 67 additions & 67 deletions
Original file line numberDiff line numberDiff line change
@@ -1,67 +1,67 @@
1-
apiVersion: v1
2-
kind: ServiceAccount
3-
metadata:
4-
name: my-scheduler
5-
namespace: kube-system
6-
---
7-
kind: ClusterRoleBinding
8-
apiVersion: rbac.authorization.k8s.io/v1
9-
metadata:
10-
name: my-scheduler-as-kube-scheduler
11-
subjects:
12-
- kind: ServiceAccount
13-
name: my-scheduler
14-
namespace: kube-system
15-
roleRef:
16-
kind: ClusterRole
17-
name: kube-scheduler
18-
apiGroup: rbac.authorization.k8s.io
19-
---
20-
apiVersion: apps/v1
21-
kind: Deployment
22-
metadata:
23-
labels:
24-
component: scheduler
25-
tier: control-plane
26-
name: my-scheduler
27-
namespace: kube-system
28-
spec:
29-
selector:
30-
matchLabels:
31-
component: scheduler
32-
tier: control-plane
33-
replicas: 1
34-
template:
35-
metadata:
36-
labels:
37-
component: scheduler
38-
tier: control-plane
39-
version: second
40-
spec:
41-
serviceAccountName: my-scheduler
42-
containers:
43-
- command:
44-
- /usr/local/bin/kube-scheduler
45-
- --address=0.0.0.0
46-
- --leader-elect=false
47-
- --scheduler-name=my-scheduler
48-
image: gcr.io/my-gcp-project/my-kube-scheduler:1.0
49-
livenessProbe:
50-
httpGet:
51-
path: /healthz
52-
port: 10251
53-
initialDelaySeconds: 15
54-
name: kube-second-scheduler
55-
readinessProbe:
56-
httpGet:
57-
path: /healthz
58-
port: 10251
59-
resources:
60-
requests:
61-
cpu: '0.1'
62-
securityContext:
63-
privileged: false
64-
volumeMounts: []
65-
hostNetwork: false
66-
hostPID: false
67-
volumes: []
1+
apiVersion: v1
2+
kind: ServiceAccount
3+
metadata:
4+
name: my-scheduler
5+
namespace: kube-system
6+
---
7+
apiVersion: rbac.authorization.k8s.io/v1
8+
kind: ClusterRoleBinding
9+
metadata:
10+
name: my-scheduler-as-kube-scheduler
11+
subjects:
12+
- kind: ServiceAccount
13+
name: my-scheduler
14+
namespace: kube-system
15+
roleRef:
16+
kind: ClusterRole
17+
name: system:kube-scheduler
18+
apiGroup: rbac.authorization.k8s.io
19+
---
20+
apiVersion: apps/v1
21+
kind: Deployment
22+
metadata:
23+
labels:
24+
component: scheduler
25+
tier: control-plane
26+
name: my-scheduler
27+
namespace: kube-system
28+
spec:
29+
selector:
30+
matchLabels:
31+
component: scheduler
32+
tier: control-plane
33+
replicas: 1
34+
template:
35+
metadata:
36+
labels:
37+
component: scheduler
38+
tier: control-plane
39+
version: second
40+
spec:
41+
serviceAccountName: my-scheduler
42+
containers:
43+
- command:
44+
- /usr/local/bin/kube-scheduler
45+
- --address=0.0.0.0
46+
- --leader-elect=false
47+
- --scheduler-name=my-scheduler
48+
image: gcr.io/my-gcp-project/my-kube-scheduler:1.0
49+
livenessProbe:
50+
httpGet:
51+
path: /healthz
52+
port: 10251
53+
initialDelaySeconds: 15
54+
name: kube-second-scheduler
55+
readinessProbe:
56+
httpGet:
57+
path: /healthz
58+
port: 10251
59+
resources:
60+
requests:
61+
cpu: '0.1'
62+
securityContext:
63+
privileged: false
64+
volumeMounts: []
65+
hostNetwork: false
66+
hostPID: false
67+
volumes: []

0 commit comments

Comments
 (0)