Skip to content
This repository was archived by the owner on Jul 30, 2021. It is now read-only.

Commit eb07431

Browse files
author
Patrick Baxter
authored
Merge pull request #515 from pbx0/tfmaster
support bringing up more masters in terraform and quickstart scripts
2 parents 2a62287 + 9ad5679 commit eb07431

File tree

8 files changed

+42
-10
lines changed

8 files changed

+42
-10
lines changed
Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ KUBECONFIG=$2
66
REMOTE_PORT=${REMOTE_PORT:-22}
77
IDENT=${IDENT:-${HOME}/.ssh/id_rsa}
88
SSH_OPTS=${SSH_OPTS:-}
9+
TAG_MASTER=${TAG_MASTER:-false}
910

1011
function usage() {
1112
echo "USAGE:"
@@ -23,7 +24,7 @@ function init_worker_node() {
2324
# CA here manually.
2425
grep 'certificate-authority-data' ${KUBECONFIG} | awk '{print $2}' | base64 -d > /etc/kubernetes/ca.crt
2526

26-
mv /home/core/kubelet.worker /etc/systemd/system/kubelet.service
27+
mv /home/core/kubelet.service /etc/systemd/system/kubelet.service
2728

2829
# Start services
2930
systemctl daemon-reload
@@ -38,15 +39,19 @@ function init_worker_node() {
3839
if [ "${REMOTE_HOST}" != "local" ]; then
3940

4041
# Copy kubelet service file and kubeconfig to remote host
41-
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} kubelet.worker core@${REMOTE_HOST}:/home/core/kubelet.worker
42+
if [ "$TAG_MASTER" = true ] ; then
43+
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} kubelet.master core@${REMOTE_HOST}:/home/core/kubelet.service
44+
else
45+
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} kubelet.worker core@${REMOTE_HOST}:/home/core/kubelet.service
46+
fi
4247
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${KUBECONFIG} core@${REMOTE_HOST}:/home/core/kubeconfig
4348

4449
# Copy self to remote host so script can be executed in "local" mode
45-
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${BASH_SOURCE[0]} core@${REMOTE_HOST}:/home/core/init-worker.sh
46-
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo /home/core/init-worker.sh local /home/core/kubeconfig"
50+
scp -i ${IDENT} -P ${REMOTE_PORT} ${SSH_OPTS} ${BASH_SOURCE[0]} core@${REMOTE_HOST}:/home/core/init-node.sh
51+
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "sudo /home/core/init-node.sh local /home/core/kubeconfig"
4752

4853
# Cleanup
49-
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "rm /home/core/init-worker.sh"
54+
ssh -i ${IDENT} -p ${REMOTE_PORT} ${SSH_OPTS} core@${REMOTE_HOST} "rm /home/core/init-node.sh"
5055

5156
echo
5257
echo "Node bootstrap complete. It may take a few minutes for the node to become ready. Access your kubernetes cluster using:"

hack/quickstart/quickstart-aws.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ $ kubectl --kubeconfig=cluster/auth/kubeconfig get nodes
6868
Run the `Launch Nodes` step for each additional node you wish to add, then using the public-ip, run:
6969

7070
```
71-
IDENT=${CLUSTER_PREFIX}-key.pem ./init-worker.sh <PUBLIC_IP> cluster/auth/kubeconfig
71+
IDENT=${CLUSTER_PREFIX}-key.pem ./init-node.sh <PUBLIC_IP> cluster/auth/kubeconfig
7272
```
7373

7474
**NOTE:** It can take a few minutes for each node to download all of the required assets / containers.

hack/quickstart/quickstart-gce.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ $ gcloud compute instances list ${CLUSTER_PREFIX}-core3
5353
Initialize each worker node by replacing `<node-ip>` with the EXTERNAL_IP from the commands above.
5454

5555
```
56-
$ IDENT=~/.ssh/google_compute_engine ./init-worker.sh <node-ip> cluster/auth/kubeconfig
56+
$ IDENT=~/.ssh/google_compute_engine ./init-node.sh <node-ip> cluster/auth/kubeconfig
5757
```
5858

5959
**NOTE:** It can take a few minutes for each node to download all of the required assets / containers.

hack/terraform-quickstart/main.tf

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,17 @@ resource "aws_instance" "worker_node" {
2525
}
2626
}
2727

28+
resource "aws_instance" "master_node" {
29+
ami = "${data.aws_ami.coreos_ami.image_id}"
30+
instance_type = "m3.medium"
31+
key_name = "${var.ssh_key}"
32+
count = "${var.additional_masters}"
33+
34+
tags {
35+
Name = "${var.instance_tags}"
36+
}
37+
}
38+
2839
data "aws_ami" "coreos_ami" {
2940
most_recent = true
3041

hack/terraform-quickstart/outputs.tf

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,10 @@ output "worker_ips" {
66
value = ["${aws_instance.worker_node.*.public_ip}"]
77
}
88

9+
output "master_ips" {
10+
value = ["${aws_instance.master_node.*.public_ip}"]
11+
}
12+
913
output "self_host_etcd" {
1014
value = "${var.self_host_etcd}"
1115
}

hack/terraform-quickstart/start-cluster.sh

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ set -euo pipefail
33

44
export BOOTSTRAP_IP=`terraform output bootstrap_node_ip`
55
export WORKER_IPS=`terraform output -json worker_ips | jq -r '.value[]'`
6+
export MASTER_IPS=`terraform output -json master_ips | jq -r '.value[]'`
67
export SELF_HOST_ETCD=`terraform output self_host_etcd`
78
export SSH_OPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
89

@@ -11,5 +12,10 @@ cd ../quickstart
1112

1213
for IP in $WORKER_IPS
1314
do
14-
./init-worker.sh $IP cluster/auth/kubeconfig
15+
./init-node.sh $IP cluster/auth/kubeconfig
16+
done
17+
18+
for IP in $MASTER_IPS
19+
do
20+
TAG_MASTER=true ./init-node.sh $IP cluster/auth/kubeconfig
1521
done

hack/terraform-quickstart/variables.tf

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,12 @@ variable "num_workers" {
2727
default = "1"
2828
}
2929

30+
variable "additional_masters" {
31+
description = "number of additional master nodes not including bootstrap node"
32+
type = "string"
33+
default = "0"
34+
}
35+
3036
variable "region" {
3137
description = "aws region"
3238
type = "string"

hack/tests/conformance-gce.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ set -euo pipefail
2222
# - Use gcloud to launch master node
2323
# - Use the quickstart init-master.sh script to run bootkube on that node
2424
# - Use gcloud to launch worker node(s)
25-
# - Use the quickstart init-worker.sh script to join node to kubernetes cluster
25+
# - Use the quickstart init-node.sh script to join node to kubernetes cluster
2626
# - Run conformance tests against the launched cluster
2727
#
2828
COREOS_CHANNEL=${COREOS_CHANNEL:-'coreos-stable'}
@@ -83,7 +83,7 @@ function add_workers {
8383
sleep 30 # TODO(aaron) Have seen "Too many authentication failures" in CI jobs. This seems to help, but should dig into why
8484
echo "Getting worker public IP"
8585
local WORKER_IP=$(gcloud compute instances list ${GCE_PREFIX}-w${i} --format=json | jq --raw-output '.[].networkInterfaces[].accessConfigs[].natIP')
86-
cd /build/bootkube/hack/quickstart && SSH_OPTS="-o StrictHostKeyChecking=no" ./init-worker.sh ${WORKER_IP} /build/cluster/auth/kubeconfig
86+
cd /build/bootkube/hack/quickstart && SSH_OPTS="-o StrictHostKeyChecking=no" ./init-node.sh ${WORKER_IP} /build/cluster/auth/kubeconfig
8787
done
8888
}
8989

0 commit comments

Comments
 (0)