Skip to content

Commit 1f9a180

Browse files
authored
repair run_console_run.sh script
Script wasn't able to run due to exit status check dependency on interactively running console container. Added a few extra functions to @camilamacedo86 original additions.
1 parent 259ae4c commit 1f9a180

File tree

1 file changed

+34
-14
lines changed

1 file changed

+34
-14
lines changed

scripts/run_console_local.sh

Lines changed: 34 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,16 @@
33
# Colors definition
44
readonly RED=$(tput setaf 1)
55
readonly RESET=$(tput sgr0)
6-
readonly BLUE=$(tput setaf 2)
6+
readonly GREEN=$(tput setaf 2)
7+
8+
# Check if Podman binary exists
9+
verify_podman_binary() {
10+
if hash podman 2>/dev/null; then
11+
POD_MANAGER="podman"
12+
else
13+
POD_MANAGER="docker"
14+
fi
15+
}
716

817
# Add port as 9000:9000 as arg when the SO is MacOS or Win
918
add_host_port_arg (){
@@ -14,38 +23,49 @@ add_host_port_arg (){
1423
}
1524

1625
pull_ocp_console_image (){
17-
docker pull quay.io/openshift/origin-console:latest
26+
$POD_MANAGER pull quay.io/openshift/origin-console:latest
1827
}
1928

20-
run_docker_console (){
29+
run_ocp_console_image (){
2130
secretname=$(kubectl get serviceaccount default --namespace=kube-system -o jsonpath='{.secrets[0].name}')
2231
endpoint=$(kubectl config view -o json | jq '{myctx: .["current-context"], ctxs: .contexts[], clusters: .clusters[]}' | jq 'select(.myctx == .ctxs.name)' | jq 'select(.ctxs.context.cluster == .clusters.name)' | jq '.clusters.cluster.server' -r)
2332

2433
echo -e "Using $endpoint"
25-
command -v docker run -it $args \
34+
$POD_MANAGER run -dit $args \
2635
-e BRIDGE_USER_AUTH="disabled" \
2736
-e BRIDGE_K8S_MODE="off-cluster" \
2837
-e BRIDGE_K8S_MODE_OFF_CLUSTER_ENDPOINT=$endpoint \
2938
-e BRIDGE_K8S_MODE_OFF_CLUSTER_SKIP_VERIFY_TLS=true \
3039
-e BRIDGE_K8S_AUTH="bearer-token" \
3140
-e BRIDGE_K8S_AUTH_BEARER_TOKEN=$(kubectl get secret "$secretname" --namespace=kube-system -o template --template='{{.data.token}}' | base64 --decode) \
3241
quay.io/openshift/origin-console:latest &> /dev/null
42+
}
3343

34-
docker_exists=${?}; if [[ ${docker_exists} -ne 0 ]]; then
35-
echo -e "${BLUE}The OLM is accessible via web console at:${RESET}"
36-
echo -e "${BLUE}https://localhost:9000/${RESET}"
44+
verify_ocp_console_image (){
45+
while true; do
46+
if [ "$($POD_MANAGER ps -q -f label=io.openshift.build.source-location=https://github.com/openshift/console)" ];
47+
then
48+
echo -e "${GREEN}The OLM is accessible via web console at:${RESET}"
49+
echo -e "${GREEN}http://localhost:9000/${RESET}"
50+
echo -e "${GREEN}Press Ctrl-C to quit${RESET}"; sleep 10;
3751
else
38-
echo -e "${RED}Unable to run the console locally. May this port is in usage already. ${RESET}"
39-
echo -e "${RED}Check if the OLM is not accessible via web console at: https://localhost:9000/. ${RESET}"
40-
exit 1
52+
echo -e "${RED}Unable to run the console locally. May this port is in usage already.${RESET}"
53+
echo -e "${RED}Check if the OLM is not accessible via web console at: http://localhost:9000/${RESET}"
54+
exit 1
4155
fi
42-
56+
done
4357
}
4458

59+
function ctrl_c() {
60+
container_id="$($POD_MANAGER ps -q -f label=io.openshift.build.source-location=https://github.com/openshift/console)"
61+
$POD_MANAGER rm -f $container_id
62+
exit 130
63+
}
4564

4665
# Calling the functions
66+
verify_podman_binary
4767
add_host_port_arg
4868
pull_ocp_console_image
49-
run_docker_console
50-
51-
69+
run_ocp_console_image
70+
trap ctrl_c INT
71+
verify_ocp_console_image

0 commit comments

Comments
 (0)