|
1 |
| -// Copyright (c) 2020, 2022, Oracle and/or its affiliates. |
| 1 | +// Copyright (c) 2020, 2023, Oracle and/or its affiliates. |
2 | 2 | // Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
|
3 | 3 |
|
4 | 4 | package oracle.weblogic.kubernetes;
|
|
16 | 16 | import java.util.Map;
|
17 | 17 | import java.util.Properties;
|
18 | 18 | import java.util.concurrent.Callable;
|
| 19 | +import java.util.logging.Level; |
19 | 20 |
|
20 | 21 | import io.kubernetes.client.custom.V1Patch;
|
21 | 22 | import io.kubernetes.client.openapi.models.CoreV1Event;
|
|
47 | 48 | import oracle.weblogic.kubernetes.utils.ExecResult;
|
48 | 49 | import oracle.weblogic.kubernetes.utils.OracleHttpClient;
|
49 | 50 | import org.awaitility.core.ConditionFactory;
|
| 51 | +import org.awaitility.core.ConditionTimeoutException; |
50 | 52 | import org.junit.jupiter.api.BeforeAll;
|
51 | 53 | import org.junit.jupiter.api.DisplayName;
|
52 | 54 | import org.junit.jupiter.api.MethodOrderer;
|
|
75 | 77 | import static oracle.weblogic.kubernetes.actions.TestActions.deleteSecret;
|
76 | 78 | import static oracle.weblogic.kubernetes.actions.TestActions.dockerTag;
|
77 | 79 | import static oracle.weblogic.kubernetes.actions.TestActions.execCommand;
|
| 80 | +import static oracle.weblogic.kubernetes.actions.TestActions.getContainerRestartCount; |
78 | 81 | import static oracle.weblogic.kubernetes.actions.TestActions.getCurrentIntrospectVersion;
|
79 | 82 | import static oracle.weblogic.kubernetes.actions.TestActions.getDomainCustomResource;
|
80 | 83 | import static oracle.weblogic.kubernetes.actions.TestActions.getNextIntrospectVersion;
|
81 | 84 | import static oracle.weblogic.kubernetes.actions.TestActions.getServiceNodePort;
|
82 | 85 | import static oracle.weblogic.kubernetes.actions.TestActions.getServicePort;
|
83 | 86 | import static oracle.weblogic.kubernetes.actions.TestActions.now;
|
84 | 87 | import static oracle.weblogic.kubernetes.actions.TestActions.patchDomainResourceWithNewIntrospectVersion;
|
| 88 | +import static oracle.weblogic.kubernetes.actions.TestActions.patchDomainResourceWithNewRestartVersion; |
85 | 89 | import static oracle.weblogic.kubernetes.actions.TestActions.scaleCluster;
|
86 | 90 | import static oracle.weblogic.kubernetes.actions.impl.Domain.patchDomainCustomResource;
|
87 | 91 | import static oracle.weblogic.kubernetes.actions.impl.Pod.getPod;
|
| 92 | +import static oracle.weblogic.kubernetes.assertions.TestAssertions.podPending; |
88 | 93 | import static oracle.weblogic.kubernetes.assertions.TestAssertions.podStateNotChanged;
|
89 | 94 | import static oracle.weblogic.kubernetes.assertions.TestAssertions.verifyRollingRestartOccurred;
|
90 | 95 | import static oracle.weblogic.kubernetes.utils.CommonMiiTestUtils.verifyPodsNotRolled;
|
|
94 | 99 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.testUntil;
|
95 | 100 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.verifyCredentials;
|
96 | 101 | import static oracle.weblogic.kubernetes.utils.CommonTestUtils.verifyServerCommunication;
|
97 |
| -import static oracle.weblogic.kubernetes.utils.CommonTestUtils.withStandardRetryPolicy; |
98 | 102 | import static oracle.weblogic.kubernetes.utils.ConfigMapUtils.createConfigMapForDomainCreation;
|
99 | 103 | import static oracle.weblogic.kubernetes.utils.DeployUtil.deployUsingRest;
|
100 | 104 | import static oracle.weblogic.kubernetes.utils.DomainUtils.createDomainAndVerify;
|
|
108 | 112 | import static oracle.weblogic.kubernetes.utils.K8sEvents.getOpGeneratedEvent;
|
109 | 113 | import static oracle.weblogic.kubernetes.utils.OKDUtils.createRouteForOKD;
|
110 | 114 | import static oracle.weblogic.kubernetes.utils.OperatorUtils.installAndVerifyOperator;
|
111 |
| -import static oracle.weblogic.kubernetes.utils.PatchDomainUtils.patchDomainResource; |
112 | 115 | import static oracle.weblogic.kubernetes.utils.PersistentVolumeUtils.createPV;
|
113 | 116 | import static oracle.weblogic.kubernetes.utils.PersistentVolumeUtils.createPVC;
|
114 | 117 | import static oracle.weblogic.kubernetes.utils.PodUtils.checkPodDoesNotExist;
|
|
129 | 132 | import static org.junit.jupiter.api.Assertions.assertFalse;
|
130 | 133 | import static org.junit.jupiter.api.Assertions.assertNotEquals;
|
131 | 134 | import static org.junit.jupiter.api.Assertions.assertNotNull;
|
| 135 | +import static org.junit.jupiter.api.Assertions.assertThrows; |
132 | 136 | import static org.junit.jupiter.api.Assertions.assertTrue;
|
133 | 137 |
|
134 | 138 | /**
|
@@ -681,12 +685,12 @@ void testCreateNewCluster() {
|
681 | 685 | }
|
682 | 686 |
|
683 | 687 | /**
|
684 |
| - * Modify the domain scope property |
685 |
| - * From: "image: container-registry.oracle.com/middleware/weblogic:ImageTagBeingUsed" to |
686 |
| - * To: "image: container-registry.oracle.com/middleware/weblogic:DateAndTimeStamp" |
687 |
| - * e.g, From ""image: container-registry.oracle.com/middleware/weblogic:12.2.1.4" |
688 |
| - * To: "image:container-registry.oracle.com/middleware/weblogic:2021-07-08-162571383699" |
689 |
| - * Verify all the pods are restarted and back to ready state |
| 688 | + * In this test firstly we patch the running domain with an image that does not exist. |
| 689 | + * Admin server pod will be recreated but fail to get into "Ready" state |
| 690 | + * So even with a new updated restartVersion rolling restart will not be triggered in the domain |
| 691 | + * Admin server pod is in Pending state with container restart count 0 |
| 692 | + * Secondly we patch the domain with a new available image |
| 693 | + * Verify rolling restart is triggered in the domain |
690 | 694 | * Verify the admin server is accessible and cluster members are healthy
|
691 | 695 | */
|
692 | 696 | @Test
|
@@ -721,26 +725,51 @@ void testUpdateImageName() {
|
721 | 725 | String imageName = domain1.getSpec().getImage();
|
722 | 726 | logger.info("Currently the image name used for the domain is: {0}", imageName);
|
723 | 727 |
|
724 |
| - //change image name to imageUpdate |
725 |
| - String imageTag = CommonTestUtils.getDateAndTimeStamp(); |
726 |
| - String imageUpdate = KIND_REPO != null ? KIND_REPO |
727 |
| - + (WEBLOGIC_IMAGE_NAME + ":" + imageTag).substring(TestConstants.BASE_IMAGES_REPO.length() + 1) |
728 |
| - : TEST_IMAGES_REPO + "/" + WEBLOGIC_IMAGE_NAME_DEFAULT + ":" + imageTag; |
729 |
| - getLogger().info(" The image name used for update is: {0}", imageUpdate); |
730 |
| - dockerTag(imageName, imageUpdate); |
731 |
| - dockerLoginAndPushImageToRegistry(imageUpdate); |
732 |
| - |
733 |
| - StringBuffer patchStr = null; |
734 |
| - patchStr = new StringBuffer("[{"); |
735 |
| - patchStr.append("\"op\": \"replace\",") |
736 |
| - .append(" \"path\": \"/spec/image\",") |
737 |
| - .append("\"value\": \"") |
738 |
| - .append(imageUpdate) |
739 |
| - .append("\"}]"); |
740 |
| - logger.info("PatchStr for imageUpdate: {0}", patchStr.toString()); |
741 |
| - |
742 |
| - assertTrue(patchDomainResource(domainUid, introDomainNamespace, patchStr), |
743 |
| - "patchDomainCustomResource(imageUpdate) failed"); |
| 728 | + //create image name used for 1st Update. This image essentially does not exist |
| 729 | + String imageTag1 = CommonTestUtils.getDateAndTimeStamp(); |
| 730 | + String imageUpdate1 = KIND_REPO != null ? KIND_REPO |
| 731 | + + (WEBLOGIC_IMAGE_NAME + ":" + imageTag1).substring(TestConstants.BASE_IMAGES_REPO.length() + 1) |
| 732 | + : TEST_IMAGES_REPO + "/" + WEBLOGIC_IMAGE_NAME_DEFAULT + ":" + imageTag1; |
| 733 | + getLogger().info(" The image name used for the 1st update is: {0}", imageUpdate1); |
| 734 | + |
| 735 | + // 1st time patch the domain resource with an image that does not exist in the registry, update domain |
| 736 | + // restartVersion. After this admin server pod will be recreated but fail |
| 737 | + // to get into "Ready" state because of ImagePullBackOff error. Since admin server is stuck managed server |
| 738 | + // pods will not be recreated. Rolling restart is not triggered in the domain. |
| 739 | + patchDomainWithNewImage(imageUpdate1); |
| 740 | + String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, introDomainNamespace); |
| 741 | + logger.log(Level.INFO, "New restart version is {0}", newRestartVersion); |
| 742 | + logger.info("Verifying rolling restart did NOT occur for domain {0} in namespace {1}", |
| 743 | + domainUid, introDomainNamespace); |
| 744 | + //verify rolling restart is not triggered in the domain |
| 745 | + assertThrows(ConditionTimeoutException.class, () -> { |
| 746 | + verifyRollingRestartOccurred(cl1podsWithTimeStamps, 1, introDomainNamespace); |
| 747 | + }); |
| 748 | + if (cluster2Created) { |
| 749 | + assertThrows(ConditionTimeoutException.class, () -> { |
| 750 | + verifyRollingRestartOccurred(cl2podsWithTimeStamps, 1, introDomainNamespace); |
| 751 | + }); |
| 752 | + } |
| 753 | + |
| 754 | + //verify admin server pod container restartCount is 0 before 2nd time image update |
| 755 | + assertTrue((getPodRestartCount(introDomainNamespace, adminServerPodName) == 0), |
| 756 | + String.format("Pod %s restart count does not equals to 0", adminServerPodName)); |
| 757 | + //verify admin server pod is in pending state before 2nd time image update |
| 758 | + assertDoesNotThrow(() -> podPending(adminServerPodName, domainUid, introDomainNamespace), |
| 759 | + String.format("podPending failed with ApiException for pod %s in namespace %s", |
| 760 | + adminServerPodName, introDomainNamespace)); |
| 761 | + |
| 762 | + //create image name used for 2nd Update and make it available with proper tagging |
| 763 | + String imageTag2 = CommonTestUtils.getDateAndTimeStamp(); |
| 764 | + String imageUpdate2 = KIND_REPO != null ? KIND_REPO |
| 765 | + + (WEBLOGIC_IMAGE_NAME + ":" + imageTag1).substring(TestConstants.BASE_IMAGES_REPO.length() + 1) |
| 766 | + : TEST_IMAGES_REPO + "/" + WEBLOGIC_IMAGE_NAME_DEFAULT + ":" + imageTag2; |
| 767 | + getLogger().info(" The image name used for the 2st update is: {0}", imageUpdate2); |
| 768 | + dockerTag(imageName, imageUpdate2); |
| 769 | + dockerLoginAndPushImageToRegistry(imageUpdate2); |
| 770 | + |
| 771 | + //2nd time update domain resource with available image |
| 772 | + patchDomainWithNewImage(imageUpdate2); |
744 | 773 |
|
745 | 774 | domain1 = assertDoesNotThrow(() -> getDomainCustomResource(domainUid, introDomainNamespace),
|
746 | 775 | String.format("getDomainCustomResource failed with ApiException when tried to get domain %s in namespace %s",
|
@@ -1334,4 +1363,36 @@ private void verifyConnectionBetweenClusterMembers(String serverName, List<Strin
|
1334 | 1363 | + "&password=" + wlsPassword + "\"",managedServerPort);
|
1335 | 1364 | verifyServerCommunication(command, serverName, managedServerNames);
|
1336 | 1365 | }
|
| 1366 | + |
| 1367 | + private void patchDomainWithNewImage(String newImage) { |
| 1368 | + // get the original domain resource before update |
| 1369 | + Domain domain1 = assertDoesNotThrow(() -> getDomainCustomResource(domainUid, introDomainNamespace), |
| 1370 | + String.format("getDomainCustomResource failed with ApiException when tried to get domain %s in namespace %s", |
| 1371 | + domainUid, introDomainNamespace)); |
| 1372 | + assertNotNull(domain1, "Got null domain resource"); |
| 1373 | + assertNotNull(domain1.getSpec(), domain1 + "/spec is null"); |
| 1374 | + |
| 1375 | + logger.info("patch the domain resource with new image"); |
| 1376 | + String patchStr |
| 1377 | + = "[" |
| 1378 | + + "{\"op\": \"replace\", \"path\": \"/spec/image\", " |
| 1379 | + + "\"value\": \"" + newImage + "\"}" |
| 1380 | + + "]"; |
| 1381 | + logger.info("Updating domain configuration using patch string: {0}\n", patchStr); |
| 1382 | + V1Patch patch = new V1Patch(patchStr); |
| 1383 | + assertTrue(patchDomainCustomResource(domainUid, introDomainNamespace, patch, V1Patch.PATCH_FORMAT_JSON_PATCH), |
| 1384 | + "Failed to patch domain"); |
| 1385 | + } |
| 1386 | + |
| 1387 | + private int getPodRestartCount(String nameSpace, String podName) { |
| 1388 | + int restartCount = |
| 1389 | + assertDoesNotThrow(() -> getContainerRestartCount(nameSpace, null, |
| 1390 | + podName, null), |
| 1391 | + String.format("Failed to get the restart count of the container from pod %s in namespace %s", |
| 1392 | + podName, nameSpace)); |
| 1393 | + logger.info("For server pod {0} restart count is: {1}", |
| 1394 | + podName, restartCount); |
| 1395 | + return restartCount; |
| 1396 | + } |
| 1397 | + |
1337 | 1398 | }
|
0 commit comments