diff --git a/devsetup/Makefile b/devsetup/Makefile index 208dcf4b..fa654a4d 100644 --- a/devsetup/Makefile +++ b/devsetup/Makefile @@ -1,5 +1,7 @@ CRC_VERSION ?= latest CRC_URL ?= 'https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/$(CRC_VERSION)/crc-linux-amd64.tar.xz' +# possible values: openshift, microshift, okd +CRC_PRESET ?= openshift KUBEADMIN_PWD ?= 12345678 PULL_SECRET ?= ${PWD}/pull-secret.txt TIMEOUT ?= 300s @@ -132,6 +134,18 @@ BM_NODE_COUNT ?=1 BM_ROOT_PASSWORD_SECRET ?= BMH_NAMESPACE ?=openstack +SNO_WORK_DIR ?= ${HOME}/.sno +SNO_SSH_PUB_KEY ?= ${HOME}/.ssh/id_rsa.pub +SNO_LIBVIRT_STORAGE_POOL ?= default +SNO_INSTANCE_NAME ?= sno +SNO_CLUSTER_NETWORK ?= 10.217.0.0/22 +SNO_HOST_PREFIX ?= 23 +SNO_MACHINE_NETWORK ?= 192.168.130.0/24 +SNO_SERVICE_NETWORK ?= 10.217.4.0/23 +SNO_HOST_IP ?= 192.168.130.11 +SNO_OCP_VERSION ?= latest-4.18 +SNO_OCP_MIRROR_URL ?= https://mirror.openshift.com/pub/openshift-v4/clients/ocp + IPV6_LAB_WORK_DIR ?= ${HOME}/.ipv6lab IPV6_LAB_SSH_PUB_KEY ?= ${HOME}/.ssh/id_rsa.pub IPV6_LAB_LIBVIRT_STORAGE_POOL ?= default @@ -154,7 +168,7 @@ IPV6_LAB_SNO_HOST_PREFIX ?= 64 IPV6_LAB_SNO_MACHINE_NETWORK ?= fd00:abcd:abcd:fc00::/64 IPV6_LAB_SNO_SERVICE_NETWORK ?= fd00:abcd:abcd:fc03::/112 IPV6_LAB_SNO_HOST_IP ?= fd00:abcd:abcd:fc00::11 -IPV6_LAB_SNO_OCP_VERSION ?= latest-4.14 +IPV6_LAB_SNO_OCP_VERSION ?= latest-4.18 IPV6_LAB_SNO_OCP_MIRROR_URL ?= https://mirror.openshift.com/pub/openshift-v4/clients/ocp # default number of instances to deploy via edpm_deploy_instance @@ -236,6 +250,30 @@ crc_attach_default_interface: crc_attach_default_interface_cleanup: make attach_default_interface_cleanup +##@ SNO equivalent of CRC +.PHONY: sno +sno: export WORK_DIR = ${SNO_WORK_DIR} +sno: export SSH_PUB_KEY = ${SNO_SSH_PUB_KEY} +sno: export OCP_VERSION = ${SNO_OCP_VERSION} +sno: export OCP_MIRROR_URL = ${SNO_OCP_MIRROR_URL} +sno: export OCP_ADMIN_PASSWD = ${KUBEADMIN_PWD} +sno: export NETWORK_NAME = ${OCP_NETWORK_NAME} +sno: export IPV4_ADDRESS= ${SNO_MACHINE_NETWORK} +sno: export IP_VERSION = v4 +sno: export VCPUS = ${CPUS} +sno: export DISK_SIZE = ${DISK} +sno: export LIBVIRT_STORAGE_POOL = ${SNO_LIBVIRT_STORAGE_POOL} +sno: ## Deployes Single-node-Openshift + bash scripts/network-setup.sh --create + bash scripts/ipv6-nat64/sno.sh --create + +.PHONY: sno_cleanup +sno_cleanup: export WORK_DIR = ${SNO_WORK_DIR} +sno_cleanup: export NETWORK_NAME = ${OCP_NETWORK_NAME} +sno_cleanup: ## Destroys Single-node-Openshift + bash scripts/ipv6-nat64/sno.sh --cleanup + bash scripts/network-setup.sh --cleanup + ##@ IPv6 Lab .PHONY: ipv6_lab_network ipv6_lab_network: export WORK_DIR = ${IPV6_LAB_WORK_DIR} diff --git a/devsetup/README.md b/devsetup/README.md index 3049d629..2e59e590 100644 --- a/devsetup/README.md +++ b/devsetup/README.md @@ -1,4 +1,6 @@ -# CRC automation + tool deployment +# OCP automation + tool deployment +### OCP installation + ### CRC CRC installation requires sudo to create a NetworkManager dispatcher file in /etc/NetworkManager/dispatcher.d/99-crc.sh, also the post step to add the CRC cert to the system store to be able to access the image registry from the host system. @@ -15,42 +17,21 @@ To configure a http and/or https proxy on the crc instance, use `CRC_HTTP_PROXY` After the installation is complete, proceed with the OpenStack service provisioning. -The steps it runs are the following: -```bash -# Pre req -# verifies that the pull secret is located at $(pwd)/pull-secret.txt (get it from https://cloud.redhat.com/openshift/create/local) - -* install crc -mkdir -p ~/bin -curl -L https://developers.redhat.com/content-gateway/rest/mirror/pub/openshift-v4/clients/crc/latest/crc-linux-amd64.tar.xz | tar -U --strip-components=1 -C ~/bin -xJf - --no-anchored crc - -# config CRC -crc config set consent-telemetry no -crc config set kubeadmin-password ${KUBEADMIN_PWD} -crc config set pull-secret-file ${PULL_SECRET_FILE} -crc setup -crc start +### SNO +Single-node-Openshift can also be installed in a configuration which is similar to CRC it takes longer to install but the resulting OCP better represents what end-users are running -# show kubeadmin and devel login details -crc console --credentials - -# add crc provided oc client to PATH -eval $(${CRC_BIN} oc-env) - -# login to crc env -oc login -u kubeadmin -p ${KUBEADMIN_PWD} https://api.crc.testing:6443 +* Get the pull secret from `https://cloud.redhat.com/openshift/create/local` and save it in `pull-secret.txt` of the repo dir, or set the `PULL_SECRET` env var to point to a different location. +* `SNO_OCP_VERSION` can be used to change requirements for the SNO install. -# make sure you can push to the internal registry; without this step you'll get x509 errors -echo -n "Adding router-ca to system certs to allow accessing the crc image registry" -oc extract secret/router-ca --keys=tls.crt -n openshift-ingress-operator --confirm -sudo cp -f tls.crt /etc/pki/ca-trust/source/anchors/crc-router-ca.pem -sudo update-ca-trust +```bash +cd /devsetup +CPUS=12 MEMORY=25600 DISK=100 make sno ``` -#### Access OCP from external systems +### Access OCP from external systems -On the local system add the required entries to your local /etc/hosts. The previous used ansible playbook also outputs the information: +On the local system add the required entries to your local /etc/hosts. The previous used ansible playbook also outputs the information for CRC: ``` cat <> /etc/hosts @@ -58,8 +39,15 @@ cat <> /etc/hosts EOF ``` +and for SNO: +``` +cat <> /etc/hosts +192.168.130.11 api.sno.lab.example.com canary-openshift-ingress-canary.apps.sno.lab.example.com console-openshift-console.apps.sno.lab.example.com default-route-openshift-image-registry.apps.sno.lab.example.com downloads-openshift-console.apps.sno.lab.example.com oauth-openshift.apps.sno.lab.example.com +EOF +``` + **Note** -validate that the IP address matches the installed CRC VM. +validate that the IP address matches the installed OCP VM. To access OCP console @@ -92,7 +80,7 @@ a mechanism to configure them using the ansibleee-operator. After completing the devsetup, attach the crc VM to the default network: ``` -make crc_attach_default_interface +make attach_default_interface ``` This requires running operators required for controlplane and dataplane: diff --git a/devsetup/scripts/crc-setup.sh b/devsetup/scripts/crc-setup.sh index 7a0a3aa5..e1ad1632 100755 --- a/devsetup/scripts/crc-setup.sh +++ b/devsetup/scripts/crc-setup.sh @@ -16,6 +16,7 @@ DISK=${DISK:-31} HTTP_PROXY=${CRC_HTTP_PROXY:-""} HTTPS_PROXY=${CRC_HTTPS_PROXY:-""} CRC_MONITORING_ENABLED=${CRC_MONITORING_ENABLED:-false} +CRC_PRESET=${CRC_PRESET:-openshift} if [ -z "${CRC_URL}" ]; then echo "Please set CRC_URL as ARG1"; exit 1 @@ -42,6 +43,7 @@ if [ -z "${CRC_BIN}" ]; then fi # config CRC +${CRC_BIN} config set preset ${CRC_PRESET} ${CRC_BIN} config set network-mode system ${CRC_BIN} config set consent-telemetry no ${CRC_BIN} config set kubeadmin-password ${KUBEADMIN_PWD} @@ -65,9 +67,9 @@ fi if [ "$CRC_MONITORING_ENABLED" = "true" ]; then ${CRC_BIN} config set enable-cluster-monitoring true fi -${CRC_BIN} setup +${CRC_BIN} setup --log-level debug -${CRC_BIN} start +${CRC_BIN} start --log-level debug ${CRC_BIN} console --credentials # get the kubeadmin login and then login # add crc provided oc client to PATH diff --git a/devsetup/scripts/ipv6-nat64/sno.sh b/devsetup/scripts/ipv6-nat64/sno.sh index c493682b..39e77812 100755 --- a/devsetup/scripts/ipv6-nat64/sno.sh +++ b/devsetup/scripts/ipv6-nat64/sno.sh @@ -51,23 +51,42 @@ WORK_DIR="${WORK_DIR:-$HOME/.sno-workdir}" # OCP installer PULL_SECRET="${PULL_SECRET:-${HOME}/pull-secret.txt}" SSH_PUB_KEY="${SSH_PUB_KEY:-${HOME}/.ssh/id_rsa.pub}" -OCP_VERSION="${OCP_VERSION:-latest-4.14}" +OCP_VERSION="${OCP_VERSION:-latest-4.18}" OCP_MIRROR_URL="${OCP_MIRROR_URL:-https://mirror.openshift.com/pub/openshift-v4/clients/ocp}" OCP_ADMIN_PASSWD=${OCP_ADMIN_PASSWD:-12345678} BOOTSTRAP_ISO_FILENAME="${BOOTSTRAP_ISO_FILENAME:-rhcos-live-with-ignition.iso}" # Networking -NETWORK_NAME="${NETWORK_NAME:-nat64}" -NAT64_IPV6_DNSMASQ_VAR_DIR=${NAT64_IPV6_DNSMASQ_VAR_DIR:-/var/lib/dnsmasq/${NETWORK_NAME}-v6} -NAT64_IPV6_DNSMASQ_SERVICE_NAME=${NAT64_IPV6_DNSMASQ_SERVICE_NAME:-${NETWORK_NAME}-v6-dnsmasq.service} -NAT64_IPV6_DNSMASQ_CONF_DIR=${NAT64_IPV6_DNSMASQ_CONF_DIR:-/etc/${NETWORK_NAME}-v6-dnsmasq} - -SNO_CLUSTER_NETWORK=${SNO_CLUSTER_NETWORK:-fd00:abcd:0::/48} -SNO_HOST_PREFIX=${SNO_HOST_PREFIX:-64} -SNO_MACHINE_NETWORK=${SNO_MACHINE_NETWORK:-fd00:abcd:abcd:fc00::/64} -SNO_SERVICE_NETWORK=${SNO_SERVICE_NETWORK:-fd00:abcd:abcd:fc03::/112} -SNO_HOST_IP=${SNO_HOST_IP:-fd00:abcd:abcd:fc00::11} -SNO_HOST_MAC="${SNO_HOST_MAC:-$(echo -n 52:54:00; dd bs=1 count=3 if=/dev/random 2>/dev/null | hexdump -v -e '/1 "-%02X"' | tr '-' ':')}" +IP_VERSION=${IP_VERSION:-v6} +if [ "${IP_VERSION}" = "v6" ]; then + NETWORK_NAME="${NETWORK_NAME:-nat64}" + NAT64_IPV6_DNSMASQ_VAR_DIR=${NAT64_IPV6_DNSMASQ_VAR_DIR:-/var/lib/dnsmasq/${NETWORK_NAME}-v6} + NAT64_IPV6_DNSMASQ_SERVICE_NAME=${NAT64_IPV6_DNSMASQ_SERVICE_NAME:-${NETWORK_NAME}-v6-dnsmasq.service} + NAT64_IPV6_DNSMASQ_CONF_DIR=${NAT64_IPV6_DNSMASQ_CONF_DIR:-/etc/${NETWORK_NAME}-v6-dnsmasq} + + SNO_CLUSTER_NETWORK=${SNO_CLUSTER_NETWORK:-fd00:abcd:0::/48} + SNO_HOST_PREFIX=${SNO_HOST_PREFIX:-64} + SNO_MACHINE_NETWORK=${SNO_MACHINE_NETWORK:-fd00:abcd:abcd:fc00::/64} + SNO_SERVICE_NETWORK=${SNO_SERVICE_NETWORK:-fd00:abcd:abcd:fc03::/112} + SNO_HOST_IP=${SNO_HOST_IP:-fd00:abcd:abcd:fc00::11} + SNO_HOST_MAC="${SNO_HOST_MAC:-$(echo -n 52:54:00; dd bs=1 count=3 if=/dev/random 2>/dev/null | hexdump -v -e '/1 "-%02X"' | tr '-' ':')}" +elif [ "${IP_VERSION}" = "v4" ]; then + NETWORK_NAME="${NETWORK_NAME:-sno}" + NAT64_IPV6_DNSMASQ_VAR_DIR=${NAT64_IPV6_DNSMASQ_VAR_DIR:-} + NAT64_IPV6_DNSMASQ_SERVICE_NAME=${NAT64_IPV6_DNSMASQ_SERVICE_NAME:-} + NAT64_IPV6_DNSMASQ_CONF_DIR=${NAT64_IPV6_DNSMASQ_CONF_DIR:-} + + # Set to same as CRC + SNO_CLUSTER_NETWORK=${SNO_CLUSTER_NETWORK:-10.217.0.0/22} + SNO_HOST_PREFIX=${SNO_HOST_PREFIX:-23} + SNO_MACHINE_NETWORK=${SNO_MACHINE_NETWORK:-192.168.130.0/24} + SNO_SERVICE_NETWORK=${SNO_SERVICE_NETWORK:-10.217.4.0/23} + SNO_HOST_IP=${SNO_HOST_IP:-192.168.130.11} + SNO_HOST_MAC="${SNO_HOST_MAC:-$(echo -n 52:54:00; dd bs=1 count=3 if=/dev/random 2>/dev/null | hexdump -v -e '/1 "-%02X"' | tr '-' ':')}" +else + echo "ERROR: unknown IP_VERSION=${IP_VERSION}" + exit 1 +fi # VM config SNO_INSTANCE_NAME="${SNO_INSTANCE_NAME:-sno}" @@ -94,7 +113,7 @@ fi mkdir -p "${WORK_DIR}"/ocp mkdir -p "${WORK_DIR}"/bin -sudo chcon -t bin_t ${WORK_DIR}/bin +sudo chcon -h system_u:object_r:bin_t:s0 ${WORK_DIR}/bin function get_oc_client { pushd ${WORK_DIR} @@ -205,6 +224,7 @@ function create_sno_instance { --virt-type ${VIRT_TYPE} \ --import \ --events on_crash=restart + ${VIRSH_CMD} net-update ${NETWORK_NAME} add ip-dhcp-host "" echo "OCP single-node instance ${SNO_INSTANCE_NAME} created" } @@ -256,6 +276,7 @@ function wait_for_install_complete { echo "${WORK_DIR}/bin/openshift-install --dir=${WORK_DIR}/ocp wait-for bootstrap-complete" ./bin/openshift-install --dir=${WORK_DIR}/ocp wait-for bootstrap-complete echo + create_source_env echo "Waiting for OCP cluster installation to complete:" sleep 60 echo "${WORK_DIR}/bin/openshift-install --dir=${WORK_DIR}/ocp wait-for install-complete" @@ -268,7 +289,7 @@ function post_config { pushd ${WORK_DIR} KUBEADMIN_PASSWD=$(cat ./ocp/auth/kubeadmin-password) - export KUBECONFIG="${WORK_DIR}/ocp/auth/kubeconfig" + . sno_env # Create htpasswd file htpasswd -c -B -b ${MY_TMP_DIR}/htpasswd admin ${OCP_ADMIN_PASSWD} @@ -351,11 +372,12 @@ function create { get_openshift_installer get_rhcos_live_iso create_install_iso - create_dnsmasq_config + if [ "${IP_VERSION}" = "v6" ]; then + create_dnsmasq_config + fi create_sno_instance wait_for_install_complete post_config - create_source_env print_cluster_info }