Skip to content
This repository was archived by the owner on Jul 30, 2021. It is now read-only.

Commit 9f0664d

Browse files
authored
Merge pull request #321 from ericchiang/revert-tls-bootstrapping
*: revert TLS bootstrapping
2 parents 5853643 + 0af72f7 commit 9f0664d

File tree

22 files changed

+108
-487
lines changed

22 files changed

+108
-487
lines changed

cmd/bootkube/render.go

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -27,16 +27,15 @@ var (
2727
}
2828

2929
renderOpts struct {
30-
assetDir string
31-
caCertificatePath string
32-
caPrivateKeyPath string
33-
bootstrapAuthToken string
34-
etcdServers string
35-
apiServers string
36-
altNames string
37-
selfHostKubelet bool
38-
cloudProvider string
39-
selfHostedEtcd bool
30+
assetDir string
31+
caCertificatePath string
32+
caPrivateKeyPath string
33+
etcdServers string
34+
apiServers string
35+
altNames string
36+
selfHostKubelet bool
37+
cloudProvider string
38+
selfHostedEtcd bool
4039
}
4140
)
4241

hack/csrctl.sh

Lines changed: 0 additions & 57 deletions
This file was deleted.

hack/multi-node/bootkube-up

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ fi
2222
# Render assets
2323
if [ ! -d "cluster" ]; then
2424
../../_output/bin/${local_os}/bootkube render --asset-dir=cluster --api-servers=https://172.17.4.101:443 ${etcd_render_flags}
25-
# Add rendered bootstrap-kubeconfig to the node user-data
26-
cat user-data.sample > cluster/user-data && sed 's/^/ /' cluster/auth/bootstrap-kubeconfig >> cluster/user-data
25+
# Add rendered kubeconfig to the node user-data
26+
cat user-data.sample > cluster/user-data && sed 's/^/ /' cluster/auth/kubeconfig >> cluster/user-data
2727
cp cluster/user-data{,-worker}
2828
cp cluster/user-data{,-controller}
2929
sed -i.bak -e '/--node-labels=master=true/d' cluster/user-data-worker
@@ -42,5 +42,5 @@ ssh -q -F ssh_config core@c1 "sudo GLOG_v=${GLOG_v} /home/core/bootkube start --
4242

4343
echo
4444
echo "Bootstrap complete. Access your kubernetes cluster using:"
45-
echo "kubectl --kubeconfig=cluster/auth/admin-kubeconfig get nodes"
45+
echo "kubectl --kubeconfig=cluster/auth/kubeconfig get nodes"
4646
echo

hack/multi-node/user-data.sample

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ coreos:
1818
ExecStartPre=/bin/mkdir -p /var/lib/cni
1919
ExecStart=/usr/lib/coreos/kubelet-wrapper \
2020
--kubeconfig=/etc/kubernetes/kubeconfig \
21-
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubeconfig \
22-
--cert-dir=/etc/kubernetes/secrets \
2321
--require-kubeconfig \
2422
--cni-conf-dir=/etc/kubernetes/cni/net.d \
2523
--network-plugin=cni \
@@ -38,7 +36,7 @@ coreos:
3836
WantedBy=multi-user.target
3937

4038
write_files:
41-
- path: "/etc/kubernetes/bootstrap-kubeconfig"
39+
- path: "/etc/kubernetes/kubeconfig"
4240
permissions: "0644"
4341
owner: core
4442
content: |

hack/quickstart/init-master.sh

Lines changed: 8 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -46,14 +46,12 @@ function init_master_node() {
4646
--volume home,kind=host,source=/home/core \
4747
--mount volume=home,target=/core \
4848
--trust-keys-from-https --net=host ${BOOTKUBE_REPO}:${BOOTKUBE_VERSION} --exec \
49-
/bootkube -- render --asset-dir=/core/assets --api-servers=https://${COREOS_PRIVATE_IPV4}:443,https://${COREOS_PUBLIC_IPV4}:443
49+
/bootkube -- render --asset-dir=/core/assets --api-servers=https://${COREOS_PUBLIC_IPV4}:443,https://${COREOS_PRIVATE_IPV4}:443
5050

51-
# Move the local bootstrap-kubeconfig into expected location
51+
# Move the local kubeconfig into expected location
5252
chown -R core:core /home/core/assets
5353
mkdir -p /etc/kubernetes
54-
cp /home/core/assets/auth/bootstrap-kubeconfig /etc/kubernetes/
55-
# Conformance scripts run kubectl on the master node via admin-kubeconfig
56-
cp /home/core/assets/auth/admin-kubeconfig /etc/kubernetes/
54+
cp /home/core/assets/auth/kubeconfig /etc/kubernetes/
5755

5856
# Start the kubelet
5957
systemctl enable kubelet; sudo systemctl start kubelet
@@ -73,32 +71,31 @@ function init_master_node() {
7371
exit 1
7472
}
7573

76-
# This script can execute on a remote host by copying itself and the kubelet service unit to remote host.
74+
# This script can execute on a remote host by copying itself + kubelet service unit to remote host.
7775
# After assets are available on the remote host, the script will execute itself in "local" mode.
7876
if [ "${REMOTE_HOST}" != "local" ]; then
7977
# Set up the kubelet.service on remote host
8078
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} kubelet.master core@${REMOTE_HOST}:/home/core/kubelet.master
8179
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo mv /home/core/kubelet.master /etc/systemd/system/kubelet.service"
8280

83-
# Copy self to remote host and execute script in "local" mode
81+
# Copy self to remote host so script can be executed in "local" mode
8482
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${BASH_SOURCE[0]} core@${REMOTE_HOST}:/home/core/init-master.sh
8583
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo BOOTKUBE_REPO=${BOOTKUBE_REPO} BOOTKUBE_VERSION=${BOOTKUBE_VERSION} /home/core/init-master.sh local"
8684

8785
# Copy assets from remote host to a local directory. These can be used to launch additional nodes & contain TLS assets
8886
mkdir ${CLUSTER_DIR}
8987
scp -q -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} -r core@${REMOTE_HOST}:/home/core/assets/* ${CLUSTER_DIR}
90-
sed -i.private "s/server: .*/server: https:\/\/${REMOTE_HOST}:443\//" ${CLUSTER_DIR}/auth/admin-kubeconfig
9188

9289
# Cleanup
93-
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo rm -rf /home/core/assets /home/core/init-master.sh"
90+
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "rm -rf /home/core/assets && rm -rf /home/core/init-master.sh"
9491

9592
echo "Cluster assets copied to ${CLUSTER_DIR}"
9693
echo
9794
echo "Bootstrap complete. Access your kubernetes cluster using:"
98-
echo "kubectl --kubeconfig=${CLUSTER_DIR}/auth/admin-kubeconfig get nodes"
95+
echo "kubectl --kubeconfig=${CLUSTER_DIR}/auth/kubeconfig get nodes"
9996
echo
10097
echo "Additional nodes can be added to the cluster using:"
101-
echo "./init-worker.sh <node-ip>"
98+
echo "./init-worker.sh <node-ip> ${CLUSTER_DIR}/auth/kubeconfig"
10299
echo
103100

104101
# Execute this script locally on the machine, assumes a kubelet.service file has already been placed on host.

hack/quickstart/init-worker.sh

Lines changed: 26 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,8 @@
22
set -euo pipefail
33

44
REMOTE_HOST=$1
5+
KUBECONFIG=$2
56
REMOTE_PORT=${REMOTE_PORT:-22}
6-
CLUSTER_DIR=${CLUSTER_DIR:-cluster}
77
IDENT=${IDENT:-${HOME}/.ssh/id_rsa}
88
SSH_OPTS=${SSH_OPTS:-}
99

@@ -13,41 +13,57 @@ function usage() {
1313
exit 1
1414
}
1515

16+
function extract_master_endpoint (){
17+
grep 'certificate-authority-data' ${KUBECONFIG} | awk '{print $2}' | base64 -d > /home/core/ca.crt
18+
grep 'client-certificate-data' ${KUBECONFIG} | awk '{print $2}'| base64 -d > /home/core/client.crt
19+
grep 'client-key-data' ${KUBECONFIG} | awk '{print $2}'| base64 -d > /home/core/client.key
20+
21+
MASTER_PUB="$(awk '/server:/ {print $2}' ${KUBECONFIG} | awk -F/ '{print $3}' | awk -F: '{print $1}')"
22+
# TODO (aaron): The -k was added with the gce conformance tests - figure out why it's needed here.
23+
# The certs are seemingly signed correctly, but says no SAN for MASTER_PUB
24+
MASTER_PRIV=$(curl -k https://${MASTER_PUB}:443/api/v1/namespaces/default/endpoints/kubernetes \
25+
--cacert /home/core/ca.crt --cert /home/core/client.crt --key /home/core/client.key \
26+
| jq -r '.subsets[0].addresses[0].ip')
27+
rm -f /home/core/ca.crt /home/core/client.crt /home/core/client.key
28+
}
29+
1630
# Initialize a worker node
1731
function init_worker_node() {
18-
# Setup bootstrap-kubeconfig
32+
extract_master_endpoint
33+
34+
# Setup kubeconfig
1935
mkdir -p /etc/kubernetes
20-
mv /home/core/bootstrap-kubeconfig /etc/kubernetes/bootstrap-kubeconfig
36+
cp ${KUBECONFIG} /etc/kubernetes/kubeconfig
2137

22-
# Move kubelet service file
23-
mv /home/core/kubelet.worker /etc/systemd/system/kubelet.service
38+
sed "s/{{apiserver}}/${MASTER_PRIV}/" /home/core/kubelet.worker > /etc/systemd/system/kubelet.service
39+
rm /home/core/kubelet.worker
2440

2541
# Start services
2642
systemctl daemon-reload
2743
systemctl stop update-engine; systemctl mask update-engine
2844
systemctl enable kubelet; sudo systemctl start kubelet
2945
}
3046

31-
[ "$#" == 1 ] || usage
47+
[ "$#" == 2 ] || usage
3248

3349
# This script can execute on a remote host by copying itself + kubelet service unit to remote host.
3450
# After assets are available on the remote host, the script will execute itself in "local" mode.
3551
if [ "${REMOTE_HOST}" != "local" ]; then
3652

37-
# Copy kubelet service file and bootstrap-kubeconfig to remote host
53+
# Copy kubelet service file and kubeconfig to remote host
3854
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} kubelet.worker core@${REMOTE_HOST}:/home/core/kubelet.worker
39-
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${CLUSTER_DIR}/auth/bootstrap-kubeconfig core@${REMOTE_HOST}:/home/core/bootstrap-kubeconfig
55+
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${KUBECONFIG} core@${REMOTE_HOST}:/home/core/kubeconfig
4056

4157
# Copy self to remote host so script can be executed in "local" mode
4258
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${BASH_SOURCE[0]} core@${REMOTE_HOST}:/home/core/init-worker.sh
43-
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo /home/core/init-worker.sh local"
59+
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo /home/core/init-worker.sh local /home/core/kubeconfig"
4460

4561
# Cleanup
4662
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "rm /home/core/init-worker.sh"
4763

4864
echo
4965
echo "Node bootstrap complete. It may take a few minutes for the node to become ready. Access your kubernetes cluster using:"
50-
echo "kubectl --kubeconfig=${CLUSTER_DIR}/auth/admin-kubeconfig get nodes"
66+
echo "kubectl --kubeconfig=${KUBECONFIG} get nodes"
5167
echo
5268

5369
# Execute this script locally on the machine, assumes a kubelet.service file has already been placed on host.

hack/quickstart/kubelet.master

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,8 @@ ExecStartPre=/bin/mkdir -p /etc/kubernetes/checkpoint-secrets
1111
ExecStartPre=/bin/mkdir -p /srv/kubernetes/manifests
1212
ExecStartPre=/bin/mkdir -p /var/lib/cni
1313
ExecStart=/usr/lib/coreos/kubelet-wrapper \
14+
--api-servers=https://${COREOS_PRIVATE_IPV4}:443 \
1415
--kubeconfig=/etc/kubernetes/kubeconfig \
15-
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubeconfig \
16-
--cert-dir=/etc/kubernetes/secrets \
17-
--require-kubeconfig \
1816
--cni-conf-dir=/etc/kubernetes/cni/net.d \
1917
--network-plugin=cni \
2018
--lock-file=/var/run/lock/kubelet.lock \

hack/quickstart/kubelet.worker

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,8 @@ ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests
99
ExecStartPre=/bin/mkdir -p /etc/kubernetes/cni/net.d
1010
ExecStartPre=/bin/mkdir -p /var/lib/cni
1111
ExecStart=/usr/lib/coreos/kubelet-wrapper \
12+
--api-servers=https://{{apiserver}}:443 \
1213
--kubeconfig=/etc/kubernetes/kubeconfig \
13-
--experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubeconfig \
14-
--cert-dir=/etc/kubernetes/secrets \
15-
--require-kubeconfig \
1614
--cni-conf-dir=/etc/kubernetes/cni/net.d \
1715
--network-plugin=cni \
1816
--lock-file=/var/run/lock/kubelet.lock \

hack/quickstart/quickstart-aws.md

Lines changed: 5 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -60,44 +60,20 @@ $ IDENT=k8s-key.pem ./init-master.sh <PUBLIC_IP>
6060
After the master bootstrap is complete, you can continue to add worker nodes. Or cluster state can be inspected via kubectl:
6161

6262
```
63-
$ kubectl --kubeconfig=cluster/auth/admin-kubeconfig get nodes
63+
$ kubectl --kubeconfig=cluster/auth/kubeconfig get nodes
6464
```
6565

6666
### Add Workers
6767

6868
Run the `Launch Nodes` step for each additional node you wish to add, then using the public-ip, run:
6969

7070
```
71-
IDENT=k8s-key.pem ./init-worker.sh <PUBLIC_IP>
71+
IDENT=k8s-key.pem ./init-worker.sh <PUBLIC_IP> cluster/auth/kubeconfig
7272
```
7373

74-
After a few minutes, time for the required assets and containers to be
75-
downloaded, the new worker will submit a Certificate Signing Request. This
76-
request must be approved for the worker to join the cluster. Until Kubernetes
77-
1.6, there is no [approve/deny] commands built in _kubectl_, therefore we must
78-
interact directly with the Kubernetes API. In the example below, we demonstrate
79-
how the provided [csrctl.sh] tool can be used to manage CSRs.
74+
**NOTE:** It can take a few minutes for each node to download all of the required assets / containers.
75+
They may not be immediately available, but the state can be inspected with:
8076

8177
```
82-
$ ../csrctl.sh cluster/auth/admin-kubeconfig list
83-
NAME AGE REQUESTOR CONDITION
84-
csr-9fxjw 16m kubelet-bootstrap Pending
85-
csr-j9r05 22m kubelet-bootstrap Approved,Issued
86-
87-
$ ../csrctl.sh cluster/auth/admin-kubeconfig get csr-9fxjw
88-
$ ../csrctl.sh cluster/auth/admin-kubeconfig approve csr-9fxjw
89-
90-
$ ../csrctl.sh cluster/auth/admin-kubeconfig list
91-
NAME AGE REQUESTOR CONDITION
92-
csr-9fxjw 16m kubelet-bootstrap Approved,Issued
93-
csr-j9r05 22m kubelet-bootstrap Approved,Issued
78+
$ kubectl --kubeconfig=cluster/auth/kubeconfig get nodes
9479
```
95-
96-
Once approved, the worker node should appear immediately in the node list:
97-
98-
```
99-
$ kubectl --kubeconfig=cluster/auth/admin-kubeconfig get nodes
100-
```
101-
102-
[approve/deny]: https://github.com/kubernetes/kubernetes/issues/30163
103-
[csrctl.sh]: ../csrctl.sh

hack/quickstart/quickstart-gce.md

Lines changed: 5 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ $ IDENT=~/.ssh/google_compute_engine ./init-master.sh <node-ip>
3636
After the master bootstrap is complete, you can continue to add worker nodes. Or cluster state can be inspected via kubectl:
3737

3838
```
39-
$ kubectl --kubeconfig=cluster/auth/admin-kubeconfig get nodes
39+
$ kubectl --kubeconfig=cluster/auth/kubeconfig get nodes
4040
```
4141

4242
### Add Workers
@@ -53,36 +53,12 @@ $ gcloud compute instances list ${CLUSTER_PREFIX}-core3
5353
Initialize each worker node by replacing `<node-ip>` with the EXTERNAL_IP from the commands above.
5454

5555
```
56-
$ IDENT=~/.ssh/google_compute_engine ./init-worker.sh <node-ip>
56+
$ IDENT=~/.ssh/google_compute_engine ./init-worker.sh <node-ip> cluster/auth/kubeconfig
5757
```
5858

59-
After a few minutes, time for the required assets and containers to be
60-
downloaded, the new worker will submit a Certificate Signing Request. This
61-
request must be approved for the worker to join the cluster. Until Kubernetes
62-
1.6, there is no [approve/deny] commands built in _kubectl_, therefore we must
63-
interact directly with the Kubernetes API. In the example below, we demonstrate
64-
how the provided [csrctl.sh] tool can be used to manage CSRs.
59+
**NOTE:** It can take a few minutes for each node to download all of the required assets / containers.
60+
They may not be immediately available, but the state can be inspected with:
6561

6662
```
67-
$ ../csrctl.sh cluster/auth/admin-kubeconfig list
68-
NAME AGE REQUESTOR CONDITION
69-
csr-9fxjw 16m kubelet-bootstrap Pending
70-
csr-j9r05 22m kubelet-bootstrap Approved,Issued
71-
72-
$ ../csrctl.sh cluster/auth/admin-kubeconfig get csr-9fxjw
73-
$ ../csrctl.sh cluster/auth/admin-kubeconfig approve csr-9fxjw
74-
75-
$ ../csrctl.sh cluster/auth/admin-kubeconfig list
76-
NAME AGE REQUESTOR CONDITION
77-
csr-9fxjw 16m kubelet-bootstrap Approved,Issued
78-
csr-j9r05 22m kubelet-bootstrap Approved,Issued
63+
$ kubectl --kubeconfig=cluster/auth/kubeconfig get nodes
7964
```
80-
81-
Once approved, the worker node should appear immediately in the node list:
82-
83-
```
84-
$ kubectl --kubeconfig=cluster/auth/admin-kubeconfig get nodes
85-
```
86-
87-
[approve/deny]: https://github.com/kubernetes/kubernetes/issues/30163
88-
[csrctl.sh]: ../csrctl.sh

0 commit comments

Comments
 (0)