Skip to content
This repository was archived by the owner on Aug 2, 2019. It is now read-only.

Commit 190bbaf

Browse files
committed
Refactor install.sh to be used on any OKD cluster
Minishift users should use install-on-minishift.sh now, and anyone attempting to use install.sh will get a warning saying as much.
1 parent 1e325cd commit 190bbaf

File tree

3 files changed

+168
-134
lines changed

3 files changed

+168
-134
lines changed

etc/scripts/install-functions.sh

Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
#!/usr/bin/env bash
2+
3+
# This is a function library, expected to be source'd
4+
5+
KNATIVE_SERVING_VERSION=v0.2.2
6+
KNATIVE_BUILD_VERSION=v0.2.0
7+
KNATIVE_EVENTING_VERSION=v0.2.0
8+
9+
INSTALL_SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
10+
11+
# Loops until duration (car) is exceeded or command (cdr) returns non-zero
12+
function timeout() {
13+
SECONDS=0; TIMEOUT=$1; shift
14+
while eval $*; do
15+
sleep 5
16+
[[ $SECONDS -gt $TIMEOUT ]] && echo "ERROR: Timed out" && exit -1
17+
done
18+
}
19+
20+
# Waits for all pods in the given namespace to complete successfully.
21+
function wait_for_all_pods {
22+
timeout 300 "oc get pods -n $1 2>&1 | grep -v -E '(Running|Completed|STATUS)'"
23+
}
24+
25+
function install_olm {
26+
local ROOT_DIR="$INSTALL_SCRIPT_DIR/../.."
27+
local REPO_DIR="$ROOT_DIR/.repos"
28+
local OLM_DIR="$REPO_DIR/olm"
29+
mkdir -p "$REPO_DIR"
30+
rm -rf "$OLM_DIR"
31+
git clone https://github.com/operator-framework/operator-lifecycle-manager "$OLM_DIR"
32+
cat "$OLM_DIR"/deploy/okd/manifests/latest/*.crd.yaml | oc apply -f -
33+
sleep 1
34+
find "$OLM_DIR/deploy/okd/manifests/latest/" -type f ! -name "*crd.yaml" | sort | xargs cat | oc create -f -
35+
wait_for_all_pods openshift-operator-lifecycle-manager
36+
# perms required by the OLM console: $OLM_DIR/scripts/run_console_local.sh
37+
oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:kube-system:default
38+
39+
# knative catalog source
40+
oc apply -f "$ROOT_DIR/knative-operators.catalogsource.yaml"
41+
oc apply -f "$ROOT_DIR/maistra-operators.catalogsource.yaml"
42+
}
43+
44+
function install_istio {
45+
# istio
46+
oc create ns istio-operator
47+
cat <<-EOF | oc apply -f -
48+
apiVersion: operators.coreos.com/v1alpha1
49+
kind: Subscription
50+
metadata:
51+
name: maistra
52+
namespace: istio-operator
53+
spec:
54+
channel: alpha
55+
name: maistra
56+
source: maistra-operators
57+
EOF
58+
wait_for_all_pods istio-operator
59+
60+
cat <<-EOF | oc apply -f -
61+
apiVersion: istio.openshift.com/v1alpha1
62+
kind: Installation
63+
metadata:
64+
namespace: istio-operator
65+
name: istio-installation
66+
spec:
67+
istio:
68+
authentication: false
69+
community: true
70+
version: 0.2.0
71+
kiali:
72+
username: admin
73+
password: admin
74+
prefix: kiali/
75+
version: v0.7.1
76+
EOF
77+
timeout 900 'oc get pods -n istio-system && [[ $(oc get pods -n istio-system | grep openshift-ansible-istio-installer | grep -c Completed) -eq 0 ]]'
78+
79+
# Scale down unused services deployed by the istio operator. The
80+
# jaeger pods will fail anyway due to the elasticsearch pod failing
81+
# due to "max virtual memory areas vm.max_map_count [65530] is too
82+
# low, increase to at least [262144]" which could be mitigated on
83+
# minishift with:
84+
# minishift ssh "echo 'echo vm.max_map_count = 262144 >/etc/sysctl.d/99-elasticsearch.conf' | sudo sh"
85+
oc scale -n istio-system --replicas=0 deployment/grafana
86+
oc scale -n istio-system --replicas=0 deployment/jaeger-collector
87+
oc scale -n istio-system --replicas=0 deployment/jaeger-query
88+
oc scale -n istio-system --replicas=0 statefulset/elasticsearch
89+
}
90+
91+
function install_knative_build {
92+
oc create ns knative-build
93+
cat <<-EOF | oc apply -f -
94+
apiVersion: operators.coreos.com/v1alpha1
95+
kind: Subscription
96+
metadata:
97+
name: knative-build-subscription
98+
generateName: knative-build-
99+
namespace: knative-build
100+
spec:
101+
source: knative-operators
102+
name: knative-build
103+
startingCSV: knative-build.${KNATIVE_BUILD_VERSION}
104+
channel: alpha
105+
EOF
106+
}
107+
108+
function install_knative_serving {
109+
oc create ns knative-serving
110+
cat <<-EOF | oc apply -f -
111+
apiVersion: operators.coreos.com/v1alpha1
112+
kind: Subscription
113+
metadata:
114+
name: knative-serving-subscription
115+
generateName: knative-serving-
116+
namespace: knative-serving
117+
spec:
118+
source: knative-operators
119+
name: knative-serving
120+
startingCSV: knative-serving.${KNATIVE_SERVING_VERSION}
121+
channel: alpha
122+
EOF
123+
}
124+
125+
function install_knative_eventing {
126+
oc create ns knative-eventing
127+
cat <<-EOF | oc apply -f -
128+
apiVersion: operators.coreos.com/v1alpha1
129+
kind: Subscription
130+
metadata:
131+
name: knative-eventing-subscription
132+
generateName: knative-eventing-
133+
namespace: knative-eventing
134+
spec:
135+
source: knative-operators
136+
name: knative-eventing
137+
startingCSV: knative-eventing.${KNATIVE_EVENTING_VERSION}
138+
channel: alpha
139+
EOF
140+
}
Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,16 @@
11
#!/usr/bin/env bash
22

3-
set -x
3+
# WARNING: this totally destroys and recreates your `knative` profile,
4+
# thereby guaranteeing (hopefully) a clean environment upon successful
5+
# completion.
46

57
if minishift status | grep "Minishift: Running" >/dev/null; then
68
echo "Please stop your running minishift to acknowledge this script will destroy it."
79
exit 1
810
fi
911

12+
set -x
13+
1014
# blow away everything in the knative profile
1115
minishift profile delete knative --force
1216

@@ -54,3 +58,6 @@ until oc login -u admin -p admin 2>/dev/null; do sleep 5; done;
5458
oc project myproject
5559
until oc adm policy add-scc-to-user privileged -z default; do sleep 5; done
5660
oc adm policy add-scc-to-user anyuid -z default
61+
62+
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
63+
"$DIR/install.sh" -q

etc/scripts/install.sh

Lines changed: 20 additions & 133 deletions
Original file line numberDiff line numberDiff line change
@@ -1,143 +1,30 @@
11
#!/usr/bin/env bash
22

3-
# Installs istio, OLM and all knative operators on minishift
4-
5-
# WARNING: it totally destroys and recreates your `knative` profile,
6-
# thereby guaranteeing (hopefully) a clean environment upon successful
7-
# completion.
8-
9-
KNATIVE_SERVING_VERSION=v0.2.2
10-
KNATIVE_BUILD_VERSION=v0.2.0
11-
KNATIVE_EVENTING_VERSION=v0.2.0
12-
13-
DIR=$(cd $(dirname "$0") && pwd)
14-
ROOT_DIR=$DIR/../..
15-
REPO_DIR=$ROOT_DIR/.repos
3+
# Installs OLM first, and then istio and knative using OLM operators
4+
5+
if [ "$1" != "-q" ]; then
6+
echo
7+
echo " WARNING: This script will blindly attempt to install OLM, istio, and knative"
8+
echo " on your OKD cluster, so if any are already there, hijinks will ensue."
9+
echo
10+
echo " If your cluster is minishift, run $(dirname $0)/install-on-minishift.sh instead."
11+
echo
12+
echo " Pass -q to disable this warning"
13+
echo
14+
read -p "Enter to continue or Ctrl-C to exit: "
15+
fi
1616

1717
set -x
1818

19-
# Loops until duration (car) is exceeded or command (cdr) returns non-zero
20-
function timeout() {
21-
SECONDS=0; TIMEOUT=$1; shift
22-
while eval $*; do
23-
sleep 5
24-
[[ $SECONDS -gt $TIMEOUT ]] && echo "ERROR: Timed out" && exit -1
25-
done
26-
}
27-
28-
# Waits for all pods in the given namespace to complete successfully.
29-
function wait_for_all_pods {
30-
timeout 300 "oc get pods -n $1 2>&1 | grep -v -E '(Running|Completed|STATUS)'"
31-
}
32-
33-
# initialize local repos dir
34-
rm -rf "$REPO_DIR"
35-
mkdir -p "$REPO_DIR"
36-
37-
# initialize the minishift knative profile
38-
"$DIR/init-minishift-for-knative.sh"
39-
40-
# OLM
41-
git clone https://github.com/operator-framework/operator-lifecycle-manager "$REPO_DIR/olm"
42-
cat $REPO_DIR/olm/deploy/okd/manifests/latest/*.crd.yaml | oc apply -f -
43-
sleep 1
44-
find $REPO_DIR/olm/deploy/okd/manifests/latest/ -type f ! -name "*crd.yaml" | sort | xargs cat | oc create -f -
45-
wait_for_all_pods openshift-operator-lifecycle-manager
46-
# perms required by the OLM console: $REPO_DIR/olm/scripts/run_console_local.sh
47-
oc adm policy add-cluster-role-to-user cluster-admin system:serviceaccount:kube-system:default
48-
49-
# knative catalog source
50-
oc apply -f "$ROOT_DIR/knative-operators.catalogsource.yaml"
51-
oc apply -f "$ROOT_DIR/maistra-operators.catalogsource.yaml"
52-
53-
# istio
54-
oc create ns istio-operator
55-
cat <<EOF | oc apply -f -
56-
apiVersion: operators.coreos.com/v1alpha1
57-
kind: Subscription
58-
metadata:
59-
name: maistra
60-
namespace: istio-operator
61-
spec:
62-
channel: alpha
63-
name: maistra
64-
source: maistra-operators
65-
EOF
66-
wait_for_all_pods istio-operator
67-
68-
cat <<EOF | oc apply -f -
69-
apiVersion: istio.openshift.com/v1alpha1
70-
kind: Installation
71-
metadata:
72-
namespace: istio-operator
73-
name: istio-installation
74-
spec:
75-
istio:
76-
authentication: false
77-
community: true
78-
version: 0.2.0
79-
kiali:
80-
username: admin
81-
password: admin
82-
prefix: kiali/
83-
version: v0.7.1
84-
EOF
85-
timeout 900 'oc get pods -n istio-system && [[ $(oc get pods -n istio-system | grep openshift-ansible-istio-installer | grep -c Completed) -eq 0 ]]'
86-
87-
# Scale down unused services deployed by the istio addon. The jaeger
88-
# pods will fail anyway due to the elasticsearch pod failing due to
89-
# "max virtual memory areas vm.max_map_count [65530] is too low,
90-
# increase to at least [262144]" which could be mitigated on minishift
91-
# with:
92-
# minishift ssh "echo 'echo vm.max_map_count = 262144 >/etc/sysctl.d/99-elasticsearch.conf' | sudo sh"
93-
oc scale -n istio-system --replicas=0 deployment/grafana
94-
oc scale -n istio-system --replicas=0 deployment/jaeger-collector
95-
oc scale -n istio-system --replicas=0 deployment/jaeger-query
96-
oc scale -n istio-system --replicas=0 statefulset/elasticsearch
19+
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
9720

98-
# for now, we must install the operators in specific namespaces, so...
99-
oc create ns knative-build
100-
oc create ns knative-serving
101-
oc create ns knative-eventing
21+
source "$DIR/install-functions.sh"
10222

103-
# install the operators for build, serving, and eventing
104-
cat <<EOF | oc apply -f -
105-
apiVersion: operators.coreos.com/v1alpha1
106-
kind: Subscription
107-
metadata:
108-
name: knative-build-subscription
109-
generateName: knative-build-
110-
namespace: knative-build
111-
spec:
112-
source: knative-operators
113-
name: knative-build
114-
startingCSV: knative-build.${KNATIVE_BUILD_VERSION}
115-
channel: alpha
116-
---
117-
apiVersion: operators.coreos.com/v1alpha1
118-
kind: Subscription
119-
metadata:
120-
name: knative-serving-subscription
121-
generateName: knative-serving-
122-
namespace: knative-serving
123-
spec:
124-
source: knative-operators
125-
name: knative-serving
126-
startingCSV: knative-serving.${KNATIVE_SERVING_VERSION}
127-
channel: alpha
128-
---
129-
apiVersion: operators.coreos.com/v1alpha1
130-
kind: Subscription
131-
metadata:
132-
name: knative-eventing-subscription
133-
generateName: knative-eventing-
134-
namespace: knative-eventing
135-
spec:
136-
source: knative-operators
137-
name: knative-eventing
138-
startingCSV: knative-eventing.${KNATIVE_EVENTING_VERSION}
139-
channel: alpha
140-
EOF
23+
install_olm
24+
install_istio
25+
install_knative_build
26+
install_knative_serving
27+
install_knative_eventing
14128

14229
wait_for_all_pods knative-build
14330
wait_for_all_pods knative-eventing

0 commit comments

Comments
 (0)