Skip to content

Commit 46e4e87

Browse files
Huy MaiCloud User
authored andcommitted
Add script to generate multiple fake nodes
1 parent f00b635 commit 46e4e87

14 files changed

+317
-37
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
*.xml
2+
*.json
3+
*.log
4+
ironicclient.sh
5+
_clouds_yaml/*
6+
kubectl
7+
minikube-linux-amd64
8+
macgen
9+
uuids

Support/Multitenancy/ironic-env/03-images-and-run-local-services.sh

Lines changed: 64 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,19 @@
11
# Set variables
2+
N_NODES=${1:-1000}
23
REGISTRY_NAME="registry"
34
REGISTRY_PORT="5000"
45
IMAGE_NAMES=(
5-
"quay.io/metal3-io/sushy-tools"
6+
"quay.io/metal3-io/ironic-python-agent"
7+
# For now, sushy-tools needs to be compiled locally with https://review.opendev.org/c/openstack/sushy-tools/+/875366
8+
# "quay.io/metal3-io/sushy-tools"
69
"quay.io/metal3-io/ironic-ipa-downloader"
710
"quay.io/metal3-io/ironic:latest"
811
"quay.io/metal3-io/ironic-client"
9-
"quay.io/metal3-io/keepalived"
12+
"quay.io/metal3-io/keepalived:v0.2.0"
13+
"docker.io/nicolaka/netshoot"
1014
)
1115

16+
1217
# Attach provisioning and baremetal network interfaces to minikube domain
1318
virsh attach-interface --domain minikube --model virtio --source provisioning --type network --config
1419
virsh attach-interface --domain minikube --model virtio --source baremetal --type network --config
@@ -18,6 +23,8 @@ if ! podman ps | grep -q "$REGISTRY_NAME"; then
1823
podman run -d -p "$REGISTRY_PORT":"$REGISTRY_PORT" --name "$REGISTRY_NAME" docker.io/library/registry:2.7.1
1924
fi
2025

26+
podman pod create -n infra-pod || true
27+
podman pod create -n ironic-pod || true
2128
# Pull images, tag to local registry, and push to registry
2229
for NAME in "${IMAGE_NAMES[@]}"; do
2330
# Pull and tag the image
@@ -27,11 +34,13 @@ for NAME in "${IMAGE_NAMES[@]}"; do
2734
podman push --tls-verify=false 127.0.0.1:5000/localimages/"${NAME##*/}"
2835
done
2936

37+
podman push --tls-verify=false 127.0.0.1:5000/localimages/sushy-tools
38+
3039
# Define variables for repeated values
3140
IRONIC_IMAGE="127.0.0.1:5000/localimages/ironic:latest"
3241
SUSHY_TOOLS_IMAGE="127.0.0.1:5000/localimages/sushy-tools"
3342
LIBVIRT_URI="qemu+ssh://[email protected]/system?&keyfile=/root/ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
34-
API_URL="http://172.22.0.2:6385"
43+
API_URL="http://173.22.0.2:6385"
3544
CALLBACK_URL="http://172.22.0.2:5050/v1/continue"
3645
ADVERTISE_HOST="192.168.111.1"
3746
ADVERTISE_PORT="9999"
@@ -46,6 +55,9 @@ for DIR in "${DIRECTORIES[@]}"; do
4655
chmod -R 755 "$DIR"
4756
done
4857

58+
rm -f nodes.json
59+
echo '[]' > nodes.json
60+
4961
# Run httpd container
5062
podman run -d --net host --name httpd-infra \
5163
--pod infra-pod \
@@ -54,6 +66,46 @@ podman run -d --net host --name httpd-infra \
5466
-e LISTEN_ALL_INTERFACES=false \
5567
--entrypoint /bin/runhttpd \
5668
"$IRONIC_IMAGE"
69+
70+
rm -rf macaddrs uuids
71+
72+
function macgen {
73+
hexdump -n 6 -ve '1/1 "%.2x "' /dev/random | awk -v a="2,6,a,e" -v r="$RANDOM" 'BEGIN{srand(r);}NR==1{split(a,b,",");r=int(rand()*4+1);printf "%s%s:%s:%s:%s:%s:%s\n",substr($1,0,1),b[r],$2,$3,$4,$5,$6}'
74+
}
75+
76+
function generate_unique {
77+
func=$1
78+
store_file=$2
79+
newgen=$($func)
80+
if [[ ! -f "$store_file" || $(grep "$newgen" "$store_file") == "" ]]; then
81+
echo "$newgen" >> "$store_file"
82+
echo "$newgen"
83+
return
84+
fi
85+
$func
86+
}
87+
88+
for i in $(seq 1 "$N_NODES"); do
89+
uuid=$(generate_unique uuidgen uuids)
90+
macaddr=$(generate_unique macgen macgen)
91+
name="fake${i}"
92+
jq --arg node_name "${name}" \
93+
--arg uuid "${uuid}" \
94+
--arg macaddr "${macaddr}" \
95+
'{
96+
"uuid": $uuid,
97+
"name": $node_name,
98+
"power_state": "Off",
99+
"nics": [
100+
{"mac": $macaddr, "ip": "172.0.0.100"}
101+
]
102+
}' nodes_template.json > node.json
103+
104+
jq -s '.[0] + [.[1] ]' nodes.json node.json > tmp.json
105+
rm -f nodes.json
106+
mv tmp.json nodes.json
107+
done
108+
57109
# Set configuration options
58110
cat <<EOF >/opt/metal3-dev-env/ironic/virtualbmc/sushy-tools/conf.py
59111
import collections
@@ -65,23 +117,28 @@ SUSHY_EMULATOR_IGNORE_BOOT_DEVICE = False
65117
SUSHY_EMULATOR_VMEDIA_VERIFY_SSL = False
66118
SUSHY_EMULATOR_AUTH_FILE = "/root/sushy/htpasswd"
67119
SUSHY_EMULATOR_FAKE_DRIVER = True
68-
69120
FAKE_IPA_API_URL = "${API_URL}"
70121
FAKE_IPA_INSPECTION_CALLBACK_URL = "${CALLBACK_URL}"
71122
FAKE_IPA_ADVERTISE_ADDRESS = Host(hostname="${ADVERTISE_HOST}", port="${ADVERTISE_PORT}")
123+
SUSHY_EMULATOR_FAKE_SYSTEMS = $(cat nodes.json)
72124
EOF
73125

74126
# Create an htpasswd file
75-
cat <<EOF >/opt/metal3-dev-env/ironic/virtualbmc/sushy-tools/htpasswd
76-
admin:$2b${12}$/dVOBNatORwKpF.ss99KB.vESjfyONOxyH.UgRwNyZi1Xs/W2pGVS
127+
cat <<'EOF' > /opt/metal3-dev-env/ironic/virtualbmc/sushy-tools/htpasswd
128+
admin:$2b$12$/dVOBNatORwKpF.ss99KB.vESjfyONOxyH.UgRwNyZi1Xs/W2pGVS
77129
EOF
78130

79131
# Generate ssh keys to use for virtual power and add them to authorized_keys
80132
sudo ssh-keygen -f /root/.ssh/id_rsa_virt_power -P "" -q -y
81133
sudo cat /root/.ssh/id_rsa_virt_power.pub | sudo tee -a /root/.ssh/authorized_keys
82134

83-
# Create and start a container for sushy-tools
84135
podman run -d --net host --name sushy-tools --pod infra-pod \
85136
-v /opt/metal3-dev-env/ironic/virtualbmc/sushy-tools:/root/sushy \
86137
-v /root/.ssh:/root/ssh \
87138
"${SUSHY_TOOLS_IMAGE}"
139+
140+
podman run --entrypoint='["sushy-fake-ipa", "--config", "/root/sushy/conf.py"]' \
141+
-d --net host --name fake-ipa --pod infra-pod \
142+
-v /opt/metal3-dev-env/ironic/virtualbmc/sushy-tools:/root/sushy \
143+
-v /root/.ssh:/root/ssh \
144+
"${SUSHY_TOOLS_IMAGE}"

Support/Multitenancy/ironic-env/05-apply-manifests.sh

Lines changed: 19 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2,48 +2,41 @@ set -e
22
# Apply ironic
33
kubectl apply -f manifests/ironic.yaml -n baremetal-operator-system
44
kubectl -n baremetal-operator-system wait --for=condition=available deployment/baremetal-operator-ironic --timeout=300s
5-
cat <<'EOF' >ironicclient.sh
5+
6+
openstack_dir="${PWD}/_clouds_yaml"
7+
ironic_client="ironicclient.sh"
8+
ironic_port=6385
9+
10+
cat << EOT >"${ironic_client}"
611
#!/bin/bash
712
813
DIR="$(dirname "$(readlink -f "$0")")"
914
10-
if [ -d "${PWD}/_clouds_yaml" ]; then
11-
MOUNTDIR="${PWD}/_clouds_yaml"
15+
if [ -d $openstack_dir ]; then
16+
MOUNTDIR=$openstack_dir
1217
else
13-
echo "cannot find _clouds_yaml"
18+
echo 'cannot find '$openstack_dir
1419
exit 1
1520
fi
1621
17-
if [ "$1" == "baremetal" ] ; then
22+
if [ \$1 == "baremetal" ] ; then
1823
shift 1
1924
fi
2025
2126
# shellcheck disable=SC2086
2227
sudo podman run --net=host --tls-verify=false \
23-
-v "${MOUNTDIR}:/etc/openstack" --rm \
24-
-e OS_CLOUD="${OS_CLOUD:-metal3}" "172.22.0.1:5000/localimages/ironic-client" "$@"
25-
EOF
28+
-v "${openstack_dir}:/etc/openstack" --rm \
29+
-e OS_CLOUD="${OS_CLOUD:-metal3}" "172.22.0.1:5000/localimages/ironic-client" "\$@"
30+
EOT
2631

27-
mkdir _clouds_yaml
32+
mkdir -p "${openstack_dir}"
2833

29-
cat <<'EOF' >_clouds_yaml/clouds.yaml
34+
cat << EOT >"${openstack_dir}/clouds.yaml"
3035
clouds:
3136
metal3:
3237
auth_type: none
33-
baremetal_endpoint_override: http://172.22.0.2:6385
38+
baremetal_endpoint_override: http://172.22.0.2:${ironic_port}
3439
baremetal_introspection_endpoint_override: http://172.22.0.2:5050
35-
EOF
36-
sudo chmod a+x ironicclient.sh
37-
sudo ln -sf "$PWD/ironicclient.sh" "/usr/local/bin/baremetal"
38-
39-
# Create ironic node
40-
41-
baremetal node create --driver redfish --driver-info \
42-
redfish_address=http://192.168.111.1:8000 --driver-info \
43-
redfish_system_id=/redfish/v1/Systems/27946b59-9e44-4fa7-8e91-f3527a1ef094 --driver-info \
44-
redfish_username=admin --driver-info redfish_password=password \
45-
--name default-node
46-
47-
# baremetal node manage $NODE_UUID
48-
# get mac : virsh domiflist vmname
49-
# baremetal port create 00:5c:52:31:3a:9c --node $NODE_UUID
40+
EOT
41+
sudo chmod a+x "${ironic_client}"
42+
sudo ln -sf "$PWD/${ironic_client}" "/usr/local/bin/baremetal"
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
#!/bin/bash
2+
#
3+
ironic_client="baremetal"
4+
nodes=($(jq -c -r '.[]' nodes.json))
5+
n_nodes=${#nodes[@]}
6+
start_idx=${1:-0}
7+
8+
function create_nodes() {
9+
start_idx=$1
10+
end_idx=$2
11+
if [[ $end_idx -ge $n_nodes ]]; then
12+
end_idx=$(( n_nodes - 1 ))
13+
fi
14+
jq ".[$start_idx:$(( end_idx + 1 ))]" nodes.json > batch.json
15+
for idx in $(seq $start_idx $end_idx); do
16+
node=${nodes[$idx]}
17+
uuid=$(echo ${node} | jq -r '.uuid')
18+
node_name=$(echo ${node} | jq -r '.name')
19+
${ironic_client} node create --driver redfish --driver-info \
20+
redfish_address=http://192.168.111.1:8000 --driver-info \
21+
redfish_system_id=/redfish/v1/Systems/${uuid} --driver-info \
22+
redfish_username=admin --driver-info redfish_password=password \
23+
--uuid ${uuid} \
24+
--name ${node_name} > /dev/null
25+
echo "Created node ${node_name} on ironic"
26+
done
27+
}
28+
29+
batch_size=${2:-200}
30+
inspect_batch_size=${3:-30}
31+
while true; do
32+
end_idx=$((start_idx + batch_size - 1))
33+
create_nodes $start_idx $end_idx
34+
./07-inspect-nodes.sh $inspect_batch_size
35+
start_idx=$(( end_idx + 1 ))
36+
if [[ $start_idx -ge $(( n_nodes - 1 )) ]]; then
37+
exit 0
38+
fi
39+
done
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
#!/bin/bash
2+
3+
ironic_client="baremetal"
4+
node_names=($(jq -c -r '.[].name' batch.json))
5+
n_nodes=${#node_names[@]}
6+
batch_size=${1:-30}
7+
8+
function inspect_batch() {
9+
start_idx=$1
10+
end_idx=$2
11+
if [[ "$end_idx" -ge "$n_nodes" ]]; then
12+
end_idx=$(( n_nodes - 1 ))
13+
fi
14+
current_batch_size=$(( end_idx - start_idx + 1 ))
15+
inspected_nodes=""
16+
echo "${ironic_client}: Inspecting nodes batch ${node_names[$start_idx]} - ${node_names[$end_idx]}"
17+
while true; do
18+
for idx in $(seq "$start_idx" "$end_idx"); do
19+
node_name=${node_names[$idx]}
20+
if [[ "$inspected_nodes" == *$node_name* ]]; then
21+
continue
22+
fi
23+
node_info=$("${ironic_client}" node show "$node_name" -f json)
24+
provisioning_state=$(echo "${node_info}" | jq -r '.provision_state')
25+
if [[ "$provisioning_state" == "enroll" ]]; then
26+
"${ironic_client}" node manage "${node_name}"
27+
continue
28+
fi
29+
if [[ "$provisioning_state" == "verifying" || "$provisioning_state" == "inspect wait" || "$provisioning_state" == "inspecting" ]]; then
30+
continue
31+
fi
32+
inspection_info=$(echo "${node_info}" | jq -r '.inspection_finished_at')
33+
if [[ "$provisioning_state" == "manageable" ]]; then
34+
if [[ "$inspection_info" == "null" ]]; then
35+
"${ironic_client}" node inspect "${node_name}"
36+
else
37+
inspected_nodes="${inspected_nodes} ${node_name}"
38+
echo "${ironic_client}: ${node_name} was inspected at ${inspection_info}"
39+
# ${ironic_client} node delete $node_name
40+
if [[ $(echo "$inspected_nodes" | wc -w) == "$current_batch_size" ]]; then
41+
echo "${ironic_client}: Done batch"
42+
return
43+
fi
44+
fi
45+
fi
46+
if [[ "$provisioning_state" == "inspect failed" ]]; then
47+
echo "${ironic_client}: ${node_name} was failed in inspection"
48+
# ${ironic_client} node inspect $node_name
49+
inspected_nodes="${inspected_nodes} ${node_name}"
50+
if [[ $(echo "$inspected_nodes" | wc -w) == "$current_batch_size" ]]; then
51+
echo "${ironic_client}: Done batch"
52+
return
53+
fi
54+
fi
55+
done
56+
done
57+
}
58+
59+
start_idx=0
60+
while true; do
61+
end_idx=$((start_idx + batch_size - 1))
62+
inspect_batch "$start_idx" "$end_idx"
63+
start_idx=$(( end_idx + 1 ))
64+
if [[ "$start_idx" -ge "$n_nodes" ]]; then
65+
exit 0
66+
fi
67+
done
Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,13 @@
11
set -e
2+
trap "trap - SIGTERM && kill -- -'$$'" SIGINT SIGTERM EXIT
3+
__dir__=$(realpath $(dirname $0))
4+
5+
source "$__dir__/config.sh"
6+
# This is temporarily required since https://review.opendev.org/c/openstack/sushy-tools/+/875366 has not been merged.
7+
./build-sushy-tools-image.sh
28
sudo ./01-vm-setup.sh
39
./02-configure-minikube.sh
4-
sudo ./03-images-and-run-local-services.sh
10+
sudo ./03-images-and-run-local-services.sh "$N_NODES"
511
./04-start-minikube.sh
612
./05-apply-manifests.sh
13+
./06-create-nodes.sh 0 "$NODE_CREATE_BATCH_SIZE" "$NODE_INSPECT_BATCH_SIZE"
Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
#!/bin/bash
2+
#
3+
SUSHYTOOLS_IMAGE="127.0.0.1:5000/localimages/sushy-tools"
4+
5+
# if [[ $(sudo podman images | grep "$SUSHYTOOLS_IMAGE") != "" ]]4; then
6+
# exit 0
7+
# fi
8+
9+
SUSHYTOOLS_DIR="$HOME/sushy-tools"
10+
rm -rf "$SUSHYTOOLS_DIR"
11+
git clone https://opendev.org/openstack/sushy-tools.git "$SUSHYTOOLS_DIR"
12+
cd "$SUSHYTOOLS_DIR"
13+
git fetch https://review.opendev.org/openstack/sushy-tools refs/changes/66/875366/18 && git cherry-pick FETCH_HEAD
14+
15+
pip3 install build
16+
python3 -m build
17+
18+
cd dist
19+
WHEEL_FILENAME=$(ls *.whl)
20+
echo $WHEEL_FILENAME
21+
22+
cd ..
23+
24+
cat <<EOF > "${SUSHYTOOLS_DIR}/Dockerfile"
25+
# Use the official Centos image as the base image
26+
FROM ubuntu:22.04
27+
28+
# Install necessary packages
29+
RUN apt update -y && \
30+
apt install -y python3 python3-pip python3-venv && \
31+
apt clean all
32+
33+
WORKDIR /opt
34+
35+
# RUN python3 setup.py install
36+
37+
# Copy the application code to the container
38+
COPY dist/${WHEEL_FILENAME} .
39+
40+
RUN pip3 install ${WHEEL_FILENAME}
41+
42+
ENV FLASK_DEBUG=1
43+
44+
RUN mkdir -p /root/sushy
45+
46+
# Set the default command to run when starting the container
47+
# CMD ["python3", "app.py"]
48+
# CMD ["sleep", "infinity"]
49+
CMD ["sushy-emulator", "-i", "::", "--config", "/root/sushy/conf.py"]
50+
EOF
51+
52+
sudo podman build -t 127.0.0.1:5000/localimages/sushy-tools .
53+
rm -rf "$SUSHYTOOLS_DIR"

0 commit comments

Comments
 (0)