This repository was archived by the owner on Jul 30, 2021. It is now read-only.
File tree Expand file tree Collapse file tree 4 files changed +7
-7
lines changed
Expand file tree Collapse file tree 4 files changed +7
-7
lines changed Original file line number Diff line number Diff line change @@ -47,11 +47,11 @@ if [ "${REMOTE_HOST}" != "local" ]; then
4747 scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${KUBECONFIG} core@${REMOTE_HOST} :/home/core/kubeconfig
4848
4949 # Copy self to remote host so script can be executed in "local" mode
50- scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${BASH_SOURCE[0]} core@${REMOTE_HOST} :/home/core/init-worker .sh
51- ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} " sudo /home/core/init-worker .sh local /home/core/kubeconfig"
50+ scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${BASH_SOURCE[0]} core@${REMOTE_HOST} :/home/core/init-node .sh
51+ ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} " sudo /home/core/init-node .sh local /home/core/kubeconfig"
5252
5353 # Cleanup
54- ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} " rm /home/core/init-worker .sh"
54+ ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} " rm /home/core/init-node .sh"
5555
5656 echo
5757 echo " Node bootstrap complete. It may take a few minutes for the node to become ready. Access your kubernetes cluster using:"
Original file line number Diff line number Diff line change @@ -68,7 +68,7 @@ $ kubectl --kubeconfig=cluster/auth/kubeconfig get nodes
6868Run the ` Launch Nodes ` step for each additional node you wish to add, then using the public-ip, run:
6969
7070```
71- IDENT=${CLUSTER_PREFIX}-key.pem ./init-worker .sh <PUBLIC_IP> cluster/auth/kubeconfig
71+ IDENT=${CLUSTER_PREFIX}-key.pem ./init-node .sh <PUBLIC_IP> cluster/auth/kubeconfig
7272```
7373
7474** NOTE:** It can take a few minutes for each node to download all of the required assets / containers.
Original file line number Diff line number Diff line change @@ -53,7 +53,7 @@ $ gcloud compute instances list ${CLUSTER_PREFIX}-core3
5353Initialize each worker node by replacing ` <node-ip> ` with the EXTERNAL_IP from the commands above.
5454
5555```
56- $ IDENT=~/.ssh/google_compute_engine ./init-worker .sh <node-ip> cluster/auth/kubeconfig
56+ $ IDENT=~/.ssh/google_compute_engine ./init-node .sh <node-ip> cluster/auth/kubeconfig
5757```
5858
5959** NOTE:** It can take a few minutes for each node to download all of the required assets / containers.
Original file line number Diff line number Diff line change @@ -22,7 +22,7 @@ set -euo pipefail
2222# - Use gcloud to launch master node
2323# - Use the quickstart init-master.sh script to run bootkube on that node
2424# - Use gcloud to launch worker node(s)
25- # - Use the quickstart init-worker .sh script to join node to kubernetes cluster
25+ # - Use the quickstart init-node .sh script to join node to kubernetes cluster
2626# - Run conformance tests against the launched cluster
2727#
2828COREOS_CHANNEL=${COREOS_CHANNEL:- ' coreos-stable' }
@@ -83,7 +83,7 @@ function add_workers {
8383 sleep 30 # TODO(aaron) Have seen "Too many authentication failures" in CI jobs. This seems to help, but should dig into why
8484 echo " Getting worker public IP"
8585 local WORKER_IP=$( gcloud compute instances list ${GCE_PREFIX} -w${i} --format=json | jq --raw-output ' .[].networkInterfaces[].accessConfigs[].natIP' )
86- cd /build/bootkube/hack/quickstart && SSH_OPTS=" -o StrictHostKeyChecking=no" ./init-worker .sh ${WORKER_IP} /build/cluster/auth/kubeconfig
86+ cd /build/bootkube/hack/quickstart && SSH_OPTS=" -o StrictHostKeyChecking=no" ./init-node .sh ${WORKER_IP} /build/cluster/auth/kubeconfig
8787 done
8888}
8989
You can’t perform that action at this time.
0 commit comments