|
| 1 | +#!/usr/bin/env bats |
| 2 | + |
| 3 | +# Licensed to the Apache Software Foundation (ASF) under one or more |
| 4 | +# contributor license agreements. See the NOTICE file distributed with |
| 5 | +# this work for additional information regarding copyright ownership. |
| 6 | +# The ASF licenses this file to You under the Apache License, Version 2.0 |
| 7 | +# (the "License"); you may not use this file except in compliance with |
| 8 | +# the License. You may obtain a copy of the License at |
| 9 | +# |
| 10 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | +# |
| 12 | +# Unless required by applicable law or agreed to in writing, software |
| 13 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | +# See the License for the specific language governing permissions and |
| 16 | +# limitations under the License. |
| 17 | + |
| 18 | +load bats_helper |
| 19 | + |
| 20 | +# You can test alternative images via |
| 21 | +# export SOLR_BEGIN_IMAGE="apache/solr-nightly:9.9.0-slim" and then running |
| 22 | +# ./gradlw iTest --tests test_docker_solrcloud.bats |
| 23 | +SOLR_BEGIN_IMAGE="${SOLR_BEGIN_IMAGE:-apache/solr-nightly:9.10.0-SNAPSHOT-slim}" |
| 24 | +SOLR_END_IMAGE="${SOLR_END_IMAGE:-apache/solr-nightly:10.0.0-SNAPSHOT-slim}" |
| 25 | + |
| 26 | +setup() { |
| 27 | + common_clean_setup |
| 28 | + |
| 29 | + # Pre-checks |
| 30 | + if ! command -v docker >/dev/null 2>&1 || ! docker info >/dev/null 2>&1; then |
| 31 | + skip "Docker is not available" |
| 32 | + fi |
| 33 | + docker pull "$SOLR_BEGIN_IMAGE" || skip "Docker image $SOLR_BEGIN_IMAGE is not available" |
| 34 | + docker pull "$SOLR_END_IMAGE" || skip "Docker image $SOLR_END_IMAGE is not available" |
| 35 | + |
| 36 | + # Record test start time for scoping logs on failure |
| 37 | + TEST_STARTED_AT_ISO=$(date -Iseconds) |
| 38 | + export TEST_STARTED_AT_ISO |
| 39 | + |
| 40 | + # Persist artifacts under Gradle’s test-output |
| 41 | + ARTIFACT_DIR="${TEST_OUTPUT_DIR}/docker" |
| 42 | + mkdir -p "$ARTIFACT_DIR" |
| 43 | + export ARTIFACT_DIR |
| 44 | +} |
| 45 | + |
| 46 | +teardown() { |
| 47 | + failed=$([[ -z "${BATS_TEST_COMPLETED:-}" ]] && [[ -z "${BATS_TEST_SKIPPED:-}" ]] && echo 1 || echo 0) |
| 48 | + if [[ "$failed" -eq 1 ]]; then |
| 49 | + echo "# Test failed - capturing Docker diagnostics" >&3 |
| 50 | + echo "# === docker ps (summary) ===" >&3 |
| 51 | + docker ps -a --format 'table {{.Names}}\t{{.Status}}\t{{.Image}}\t{{.Ports}}' >&3 2>&3 || true |
| 52 | + fi |
| 53 | + |
| 54 | + for container in solr-node1 solr-node2 solr-node3; do |
| 55 | + if docker ps -a --format '{{.Names}}' | grep -q "^${container}$" 2>/dev/null; then |
| 56 | + if [[ "$failed" -eq 1 ]]; then |
| 57 | + echo "# === Docker logs for $container ===" >&3 |
| 58 | + docker logs --timestamps --since "$TEST_STARTED_AT_ISO" "$container" >&3 2>&3 || echo "# Failed to get logs for $container" >&3 |
| 59 | + echo "# === Docker inspect for $container ===" >&3 |
| 60 | + docker inspect "$container" | jq '.[] | {Name: .Name, State: .State, Ports: .NetworkSettings.Ports}' >&3 2>&3 || true |
| 61 | + fi |
| 62 | + # Persist artifacts |
| 63 | + docker logs --timestamps "$container" >"$ARTIFACT_DIR/${container}.log" 2>&1 || true |
| 64 | + docker inspect "$container" >"$ARTIFACT_DIR/${container}.inspect.json" 2>&1 || true |
| 65 | + docker exec "$container" ps aux >"$ARTIFACT_DIR/${container}.ps.txt" 2>&1 || true |
| 66 | + fi |
| 67 | + done |
| 68 | + |
| 69 | + echo "# Docker artifacts saved to: $ARTIFACT_DIR" >&3 |
| 70 | + |
| 71 | + docker stop solr-node1 solr-node2 solr-node3 2>/dev/null || true |
| 72 | + docker rm solr-node1 solr-node2 solr-node3 2>/dev/null || true |
| 73 | + docker volume rm solr-data1 solr-data2 solr-data3 2>/dev/null || true |
| 74 | + docker network rm solrcloud-test 2>/dev/null || true |
| 75 | +} |
| 76 | + |
| 77 | +@test "Docker SolrCloud rolling upgrade" { |
| 78 | + # Networking & volumes |
| 79 | + docker network create solrcloud-test |
| 80 | + docker volume create solr-data1 |
| 81 | + docker volume create solr-data2 |
| 82 | + docker volume create solr-data3 |
| 83 | + |
| 84 | + echo "Starting solr-node1 with embedded ZooKeeper" |
| 85 | + docker run --name solr-node1 -d \ |
| 86 | + --network solrcloud-test \ |
| 87 | + --memory=400m \ |
| 88 | + --platform linux/amd64 \ |
| 89 | + -v solr-data1:/var/solr \ |
| 90 | + "$SOLR_BEGIN_IMAGE" solr start -f -c -m 200m --host solr-node1 -p 8983 |
| 91 | + docker exec solr-node1 solr assert --started http://solr-node1:8983 --timeout 10000 |
| 92 | + |
| 93 | + # start next 2 in parallel |
| 94 | + |
| 95 | + echo "Starting solr-node2 connected to first node's ZooKeeper" |
| 96 | + docker run --name solr-node2 -d \ |
| 97 | + --network solrcloud-test \ |
| 98 | + --memory=400m \ |
| 99 | + --platform linux/amd64 \ |
| 100 | + -v solr-data2:/var/solr \ |
| 101 | + "$SOLR_BEGIN_IMAGE" solr start -f -c -m 200m --host solr-node2 -p 8984 -z solr-node1:9983 |
| 102 | + |
| 103 | + echo "Started solr-node3 connected to first node's ZooKeeper" |
| 104 | + docker run --name solr-node3 -d \ |
| 105 | + --network solrcloud-test \ |
| 106 | + --memory=400m \ |
| 107 | + --platform linux/amd64 \ |
| 108 | + -v solr-data3:/var/solr \ |
| 109 | + "$SOLR_BEGIN_IMAGE" solr start -f -c -m 200m --host solr-node3 -p 8985 -z solr-node1:9983 |
| 110 | + |
| 111 | + docker exec solr-node2 solr assert --started http://solr-node2:8984 --timeout 30000 |
| 112 | + docker exec solr-node3 solr assert --started http://solr-node3:8985 --timeout 30000 |
| 113 | + |
| 114 | + echo "Creating a Collection" |
| 115 | + docker exec --user=solr solr-node1 solr create -c test-collection -n techproducts --shards 3 |
| 116 | + |
| 117 | + echo "Checking collection health" |
| 118 | + wait_for 30 1 docker exec solr-node1 solr healthcheck -c test-collection |
| 119 | + |
| 120 | + echo "Add some sample data" |
| 121 | + docker exec --user=solr solr-node1 solr post -c test-collection example/exampledocs/mem.xml |
| 122 | + assert_success |
| 123 | + |
| 124 | + # Begin rolling upgrade - upgrade node 3 first (reverse order: 3, 2, 1) |
| 125 | + echo "Starting rolling upgrade - upgrading node 3" |
| 126 | + docker stop solr-node3 |
| 127 | + docker rm solr-node3 |
| 128 | + docker run --name solr-node3 -d \ |
| 129 | + --network solrcloud-test \ |
| 130 | + --memory=400m \ |
| 131 | + --platform linux/amd64 \ |
| 132 | + -v solr-data3:/var/solr \ |
| 133 | + "$SOLR_END_IMAGE" solr start -f -m 200m --host solr-node3 -p 8985 -z solr-node1:9983 |
| 134 | + docker exec solr-node3 solr assert --started http://solr-node3:8985 --timeout 30000 |
| 135 | + assert_success |
| 136 | + |
| 137 | + # Upgrade node 2 second |
| 138 | + echo "Upgrading node 2" |
| 139 | + docker stop solr-node2 |
| 140 | + docker rm solr-node2 |
| 141 | + docker run --name solr-node2 -d \ |
| 142 | + --network solrcloud-test \ |
| 143 | + --memory=400m \ |
| 144 | + --platform linux/amd64 \ |
| 145 | + -v solr-data2:/var/solr \ |
| 146 | + "$SOLR_END_IMAGE" solr start -f -m 200m --host solr-node2 -p 8984 -z solr-node1:9983 |
| 147 | + docker exec solr-node2 solr assert --started http://solr-node2:8984 --timeout 30000 |
| 148 | + assert_success |
| 149 | + |
| 150 | + echo "Upgrading node 1 (ZK node)" |
| 151 | + docker stop solr-node1 |
| 152 | + docker rm solr-node1 |
| 153 | + docker run --name solr-node1 -d \ |
| 154 | + --network solrcloud-test \ |
| 155 | + --memory=400m \ |
| 156 | + --platform linux/amd64 \ |
| 157 | + -v solr-data1:/var/solr \ |
| 158 | + "$SOLR_END_IMAGE" solr start -f -m 200m --host solr-node1 -p 8983 |
| 159 | + docker exec solr-node1 solr assert --started http://solr-node1:8983 --timeout 30000 |
| 160 | + assert_success |
| 161 | + |
| 162 | + # Final collection health check |
| 163 | + wait_for 30 1 docker exec solr-node1 solr healthcheck -c test-collection |
| 164 | + |
| 165 | + echo "checking cluster has exactly 3 live nodes" |
| 166 | + run docker exec solr-node1 curl -s "http://solr-node1:8983/solr/admin/collections?action=CLUSTERSTATUS" |
| 167 | + assert_success |
| 168 | + |
| 169 | + local live_nodes_count=$(echo "$output" | jq -r '.cluster.live_nodes | length') |
| 170 | + assert_equal "$live_nodes_count" "3" |
| 171 | + |
| 172 | +} |
0 commit comments