|
9 | 9 | import java.time.OffsetDateTime;
|
10 | 10 | import java.util.ArrayList;
|
11 | 11 | import java.util.Collections;
|
12 |
| -import java.util.HashMap; |
13 | 12 | import java.util.List;
|
14 |
| -import java.util.Map; |
15 | 13 | import java.util.concurrent.Callable;
|
16 | 14 | import java.util.regex.Matcher;
|
17 | 15 | import java.util.regex.Pattern;
|
18 | 16 |
|
19 | 17 | import io.kubernetes.client.openapi.models.V1EnvVar;
|
20 | 18 | import io.kubernetes.client.openapi.models.V1LocalObjectReference;
|
21 | 19 | import io.kubernetes.client.openapi.models.V1ObjectMeta;
|
22 |
| -import io.kubernetes.client.openapi.models.V1Secret; |
23 | 20 | import io.kubernetes.client.openapi.models.V1SecretReference;
|
24 | 21 | import oracle.weblogic.domain.AdminServer;
|
25 | 22 | import oracle.weblogic.domain.AdminService;
|
|
57 | 54 | import static oracle.weblogic.kubernetes.actions.ActionConstants.MODEL_DIR;
|
58 | 55 | import static oracle.weblogic.kubernetes.actions.ActionConstants.WORK_DIR;
|
59 | 56 | import static oracle.weblogic.kubernetes.actions.TestActions.createDomainCustomResource;
|
60 |
| -import static oracle.weblogic.kubernetes.actions.TestActions.createSecret; |
| 57 | +import static oracle.weblogic.kubernetes.actions.TestActions.getPodCreationTimestamp; |
61 | 58 | import static oracle.weblogic.kubernetes.actions.TestActions.getServiceNodePort;
|
62 | 59 | import static oracle.weblogic.kubernetes.assertions.TestAssertions.domainExists;
|
63 | 60 | import static oracle.weblogic.kubernetes.assertions.TestAssertions.isPodRestarted;
|
| 61 | +import static oracle.weblogic.kubernetes.utils.CommonMiiTestUtils.createDomainSecret; |
64 | 62 | import static oracle.weblogic.kubernetes.utils.CommonPatchTestUtils.patchServerStartPolicy;
|
65 | 63 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkClusterReplicaCountMatches;
|
| 64 | +import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkIsPodRestarted; |
66 | 65 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkPodDeleted;
|
67 | 66 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkPodDoesNotExist;
|
68 | 67 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkPodInitializing;
|
69 | 68 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkPodReadyAndServiceExists;
|
| 69 | +import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkPodRestarted; |
70 | 70 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.createConfigMapAndVerify;
|
71 | 71 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.createOcirRepoSecret;
|
72 | 72 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.getExternalServicePodName;
|
@@ -106,6 +106,8 @@ class ItServerStartPolicy {
|
106 | 106 | public static final String START_DOMAIN_SCRIPT = "startDomain.sh";
|
107 | 107 | public static final String SCALE_CLUSTER_SCRIPT = "scaleCluster.sh";
|
108 | 108 | public static final String STATUS_CLUSTER_SCRIPT = "clusterStatus.sh";
|
| 109 | + public static final String ROLLING_DOMAIN_SCRIPT = "rollDomain.sh"; |
| 110 | + public static final String ROLLING_CLUSTER_SCRIPT = "rollCluster.sh"; |
109 | 111 | public static final String managedServerNamePrefix = "managed-server";
|
110 | 112 | public static final String CLUSTER_1 = "cluster-1";
|
111 | 113 | public static final String CLUSTER_2 = "cluster-2";
|
@@ -1132,13 +1134,153 @@ public void testRestartingMSWithExplicitServerStartStateWhileVaryingReplicaCount
|
1132 | 1134 | logger.info("managed server " + serverName + " restarted successfully.");
|
1133 | 1135 | }
|
1134 | 1136 |
|
| 1137 | + /** |
| 1138 | + * Rolling restart the configured cluster using the sample script rollCluster.sh script |
| 1139 | + * Verify that server(s) in the configured cluster are restarted and in RUNNING state. |
| 1140 | + * Verify that server(s) in the dynamic cluster are not affected. |
| 1141 | + */ |
| 1142 | + @Order(19) |
| 1143 | + @Test |
| 1144 | + @DisplayName("Rolling restart the configured cluster with rollCluster.sh script") |
| 1145 | + public void testConfigClusterRollingRestart() { |
| 1146 | + String configServerName = "config-cluster-server1"; |
| 1147 | + String configServerPodName = domainUid + "-" + configServerName; |
| 1148 | + String dynamicServerPodName = domainUid + "-managed-server1"; |
| 1149 | + |
| 1150 | + // restore the env |
| 1151 | + restoreEnv(); |
| 1152 | + |
| 1153 | + // get the creation time of the configured and dynamic server pod before patching |
| 1154 | + OffsetDateTime configServerPodCreationTime = |
| 1155 | + assertDoesNotThrow(() -> getPodCreationTimestamp(domainNamespace, "", configServerPodName), |
| 1156 | + String.format("Failed to get creationTimestamp for pod %s", configServerPodName)); |
| 1157 | + OffsetDateTime dynServerPodCreationTime = |
| 1158 | + assertDoesNotThrow(() -> getPodCreationTimestamp(domainNamespace, "", dynamicServerPodName), |
| 1159 | + String.format("Failed to get creationTimestamp for pod %s", dynamicServerPodName)); |
| 1160 | + |
| 1161 | + // use rollCluster.sh to rolling-restart a configured cluster |
| 1162 | + logger.info("Rolling restart the configured cluster with rollCluster.sh script"); |
| 1163 | + String result = assertDoesNotThrow(() -> |
| 1164 | + executeLifecycleScript(ROLLING_CLUSTER_SCRIPT, CLUSTER_LIFECYCLE, CLUSTER_2), |
| 1165 | + String.format("Failed to run %s", ROLLING_CLUSTER_SCRIPT)); |
| 1166 | + |
| 1167 | + // wait till rolling restart has started by checking managed server pods have restarted |
| 1168 | + logger.info("Waiting for rolling restart to start by checking {0} pod is restarted in namespace {0}", |
| 1169 | + configServerPodName, domainNamespace); |
| 1170 | + checkPodRestarted(domainUid, domainNamespace, configServerPodName, configServerPodCreationTime); |
| 1171 | + |
| 1172 | + // check managed server from other cluster are not affected |
| 1173 | + logger.info("Check dynamic managed server pods are not affected"); |
| 1174 | + assertDoesNotThrow(() -> assertTrue(checkClusterReplicaCountMatches(CLUSTER_1, |
| 1175 | + domainUid, domainNamespace, replicaCount))); |
| 1176 | + |
| 1177 | + boolean isPodRestarted = |
| 1178 | + assertDoesNotThrow(() -> checkIsPodRestarted(domainNamespace, |
| 1179 | + dynamicServerPodName, dynServerPodCreationTime).call().booleanValue(), |
| 1180 | + String.format("pod %s should not been restarted in namespace %s", |
| 1181 | + dynamicServerPodName, domainNamespace)); |
| 1182 | + |
| 1183 | + assertFalse(isPodRestarted, |
| 1184 | + String.format("dynamic server %s shouldn't be rolling-restarted", dynamicServerPodName)); |
| 1185 | + } |
| 1186 | + |
| 1187 | + /** |
| 1188 | + * Rolling restart the dynamic cluster using the sample script rollCluster.sh script |
| 1189 | + * Verify that server(s) in the dynamic cluster are restarted and in RUNNING state. |
| 1190 | + * Verify that server(s) in the configured cluster are not affected. |
| 1191 | + */ |
| 1192 | + @Order(20) |
| 1193 | + @Test |
| 1194 | + @DisplayName("Rolling restart the dynamic cluster with rollCluster.sh script") |
| 1195 | + public void testDynamicClusterRollingRestart() { |
| 1196 | + String dynamicServerName = "managed-server1"; |
| 1197 | + String dynamicServerPodName = domainUid + "-" + dynamicServerName; |
| 1198 | + String configServerPodName = domainUid + "-config-cluster-server1"; |
| 1199 | + |
| 1200 | + // restore the env |
| 1201 | + restoreEnv(); |
| 1202 | + |
| 1203 | + // get the creation time of the configured and dynamic server pod before patching |
| 1204 | + OffsetDateTime dynServerPodCreationTime = |
| 1205 | + assertDoesNotThrow(() -> getPodCreationTimestamp(domainNamespace, "", dynamicServerPodName), |
| 1206 | + String.format("Failed to get creationTimestamp for pod %s", dynamicServerPodName)); |
| 1207 | + OffsetDateTime configServerPodCreationTime = |
| 1208 | + assertDoesNotThrow(() -> getPodCreationTimestamp(domainNamespace, "", configServerPodName), |
| 1209 | + String.format("Failed to get creationTimestamp for pod %s", configServerPodName)); |
| 1210 | + |
| 1211 | + // use rollCluster.sh to rolling-restart a dynamic cluster |
| 1212 | + logger.info("Rolling restart the dynamic cluster with rollCluster.sh script"); |
| 1213 | + assertDoesNotThrow(() -> |
| 1214 | + executeLifecycleScript(ROLLING_CLUSTER_SCRIPT, CLUSTER_LIFECYCLE, CLUSTER_1), |
| 1215 | + String.format("Failed to run %s", ROLLING_CLUSTER_SCRIPT)); |
| 1216 | + |
| 1217 | + // wait till rolling restart has started by checking managed server pods have restarted |
| 1218 | + logger.info("Waiting for rolling restart to start by checking {0} pod is restarted in namespace {0}", |
| 1219 | + dynamicServerPodName, domainNamespace); |
| 1220 | + checkPodRestarted(domainUid, domainNamespace, dynamicServerPodName, dynServerPodCreationTime); |
| 1221 | + |
| 1222 | + // check managed server from other cluster are not affected |
| 1223 | + logger.info("Check configured managed server pods are not affected"); |
| 1224 | + assertDoesNotThrow(() -> assertTrue(checkClusterReplicaCountMatches(CLUSTER_2, |
| 1225 | + domainUid, domainNamespace, replicaCount))); |
| 1226 | + |
| 1227 | + boolean isPodRestarted = |
| 1228 | + assertDoesNotThrow(() -> checkIsPodRestarted(domainNamespace, |
| 1229 | + configServerPodName, configServerPodCreationTime).call().booleanValue(), |
| 1230 | + String.format("pod %s should not been restarted in namespace %s", |
| 1231 | + configServerPodName, domainNamespace)); |
| 1232 | + |
| 1233 | + assertFalse(isPodRestarted, |
| 1234 | + String.format("configured server %s shouldn't be rolling-restarted", configServerPodName)); |
| 1235 | + } |
| 1236 | + |
| 1237 | + /** |
| 1238 | + * Rolling restart the domain using the sample script rollDomain.sh script |
| 1239 | + * Verify that server(s) in the domain is restarted and all servers are in RUNNING state. |
| 1240 | + */ |
| 1241 | + @Order(21) |
| 1242 | + @Test |
| 1243 | + @DisplayName("Rolling restart the domain with rollDomain.shscript") |
| 1244 | + public void testConfigDomainRollingRestart() { |
| 1245 | + String configServerName = "config-cluster-server1"; |
| 1246 | + String dynamicServerName = "managed-server1"; |
| 1247 | + String configServerPodName = domainUid + "-" + configServerName; |
| 1248 | + String dynamicServerPodName = domainUid + "-" + dynamicServerName; |
| 1249 | + |
| 1250 | + // restore the env |
| 1251 | + restoreEnv(); |
| 1252 | + |
| 1253 | + // get the creation time of the configured and dynamic server pod before patching |
| 1254 | + OffsetDateTime configServerPodCreationTime = |
| 1255 | + assertDoesNotThrow(() -> getPodCreationTimestamp(domainNamespace, "", configServerPodName), |
| 1256 | + String.format("Failed to get creationTimestamp for pod %s", configServerPodName)); |
| 1257 | + OffsetDateTime dynServerPodCreationTime = |
| 1258 | + assertDoesNotThrow(() -> getPodCreationTimestamp(domainNamespace, "", dynamicServerPodName), |
| 1259 | + String.format("Failed to get creationTimestamp for pod %s", dynamicServerPodName)); |
| 1260 | + |
| 1261 | + // use rollDomain.sh to rolling-restart a configured cluster |
| 1262 | + logger.info("Rolling restart the domain with rollDomain.sh script"); |
| 1263 | + String result = assertDoesNotThrow(() -> |
| 1264 | + executeLifecycleScript(ROLLING_DOMAIN_SCRIPT, DOMAIN, ""), |
| 1265 | + String.format("Failed to run %s", ROLLING_DOMAIN_SCRIPT)); |
| 1266 | + |
| 1267 | + // wait till rolling restart has started by checking managed server pods have restarted |
| 1268 | + logger.info("Waiting for rolling restart to start by checking {0} pod is restarted in namespace {0}", |
| 1269 | + configServerPodName, domainNamespace); |
| 1270 | + checkPodRestarted(domainUid, domainNamespace, configServerPodName, configServerPodCreationTime); |
| 1271 | + |
| 1272 | + logger.info("Waiting for rolling restart to start by checking {0} pod is restarted in namespace {0}", |
| 1273 | + dynamicServerPodName, domainNamespace); |
| 1274 | + checkPodRestarted(domainUid, domainNamespace, dynamicServerPodName, dynServerPodCreationTime); |
| 1275 | + } |
| 1276 | + |
1135 | 1277 | /**
|
1136 | 1278 | * Scale the configured cluster using the sample script scaleCluster.sh script
|
1137 | 1279 | * Verify that server(s) in the configured cluster are scaled up and in RUNNING state.
|
1138 | 1280 | * Verify that server(s) in the dynamic cluster are not affected.
|
1139 | 1281 | * Restore the env using the sample script stopServer.sh.
|
1140 | 1282 | */
|
1141 |
| - @Order(19) |
| 1283 | + @Order(22) |
1142 | 1284 | @Test
|
1143 | 1285 | @DisplayName("Scale the configured cluster with scaleCluster.sh script")
|
1144 | 1286 | public void testConfigClusterScale() {
|
@@ -1187,7 +1329,7 @@ public void testConfigClusterScale() {
|
1187 | 1329 | * Verify that server(s) in the configured cluster are not affected.
|
1188 | 1330 | * Restore the env using the sample script stopServer.sh.
|
1189 | 1331 | */
|
1190 |
| - @Order(20) |
| 1332 | + @Order(23) |
1191 | 1333 | @Test
|
1192 | 1334 | @DisplayName("Scale the dynamic cluster with scaleCluster.sh script")
|
1193 | 1335 | public void testDynamicClusterScale() {
|
@@ -1257,16 +1399,25 @@ private void scalingClusters(String clusterName, String serverPodName, int repli
|
1257 | 1399 | logger.info("The cluster {0} scaled successfully.", clusterName);
|
1258 | 1400 | }
|
1259 | 1401 |
|
1260 |
| - private static void createDomainSecret(String secretName, String username, String password, String domNamespace) { |
1261 |
| - Map<String, String> secretMap = new HashMap<>(); |
1262 |
| - secretMap.put("username", username); |
1263 |
| - secretMap.put("password", password); |
1264 |
| - boolean secretCreated = assertDoesNotThrow(() -> createSecret(new V1Secret() |
1265 |
| - .metadata(new V1ObjectMeta() |
1266 |
| - .name(secretName) |
1267 |
| - .namespace(domNamespace)) |
1268 |
| - .stringData(secretMap)), "Create secret failed with ApiException"); |
1269 |
| - assertTrue(secretCreated, String.format("create secret failed for %s in namespace %s", secretName, domNamespace)); |
| 1402 | + private void restoreEnv() { |
| 1403 | + int newReplicaCount = 2; |
| 1404 | + String configServerName = "config-cluster-server" + newReplicaCount; |
| 1405 | + String configServerPodName = domainUid + "-" + configServerName; |
| 1406 | + String dynamicServerName = "managed-server" + newReplicaCount; |
| 1407 | + String dynamicServerPodName = domainUid + "-" + dynamicServerName; |
| 1408 | + |
| 1409 | + // restore test env |
| 1410 | + assertDoesNotThrow(() -> |
| 1411 | + executeLifecycleScript(STOP_SERVER_SCRIPT, SERVER_LIFECYCLE, configServerName), |
| 1412 | + String.format("Failed to run %s", STOP_SERVER_SCRIPT)); |
| 1413 | + checkPodDeleted(configServerPodName, domainUid, domainNamespace); |
| 1414 | + logger.info("managed server " + configServerPodName + " stopped successfully."); |
| 1415 | + |
| 1416 | + assertDoesNotThrow(() -> |
| 1417 | + executeLifecycleScript(STOP_SERVER_SCRIPT, SERVER_LIFECYCLE, dynamicServerName), |
| 1418 | + String.format("Failed to run %s", STOP_SERVER_SCRIPT)); |
| 1419 | + checkPodDeleted(dynamicServerPodName, domainUid, domainNamespace); |
| 1420 | + logger.info("managed server " + dynamicServerPodName + " stopped successfully."); |
1270 | 1421 | }
|
1271 | 1422 |
|
1272 | 1423 | private static void createDomainResource(
|
|
0 commit comments