Skip to content

Commit 6fd1f2e

Browse files
authored
Add test to check for HTTP access log in logHome (#2294)
* Adding http access log enable test
1 parent 1401d38 commit 6fd1f2e

File tree

2 files changed

+65
-28
lines changed

2 files changed

+65
-28
lines changed

integration-tests/src/test/java/oracle/weblogic/kubernetes/ItMiiUpdateDomainConfig.java

Lines changed: 62 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@
7373
import static oracle.weblogic.kubernetes.actions.TestActions.createConfigMap;
7474
import static oracle.weblogic.kubernetes.actions.TestActions.createDomainCustomResource;
7575
import static oracle.weblogic.kubernetes.actions.TestActions.createSecret;
76+
import static oracle.weblogic.kubernetes.actions.TestActions.execCommand;
7677
import static oracle.weblogic.kubernetes.actions.TestActions.getJob;
7778
import static oracle.weblogic.kubernetes.actions.TestActions.getPodLog;
7879
import static oracle.weblogic.kubernetes.actions.TestActions.getServiceNodePort;
@@ -107,6 +108,7 @@
107108
import static oracle.weblogic.kubernetes.utils.FileUtils.copyFileToPod;
108109
import static oracle.weblogic.kubernetes.utils.ThreadSafeLogger.getLogger;
109110
import static org.awaitility.Awaitility.with;
111+
import static org.junit.jupiter.api.Assertions.assertAll;
110112
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
111113
import static org.junit.jupiter.api.Assertions.assertFalse;
112114
import static org.junit.jupiter.api.Assertions.assertNotEquals;
@@ -261,15 +263,47 @@ public void testMiiServerLogsAreOnPV() {
261263
}
262264

263265
/**
264-
* Create a WebLogic domain with a defined configmap in configuration/model
266+
* Check HTTP server logs are written on logHome.
267+
* The test looks for the string sample-war/index.jsp in HTTP access
268+
* logs
269+
*/
270+
@Test
271+
@Order(2)
272+
@DisplayName("Check the HTTP server logs are written to PersistentVolume")
273+
public void testMiiHttpServerLogsAreOnPV() {
274+
String[] podNames = {managedServerPrefix + "1", managedServerPrefix + "2"};
275+
for (String pod : podNames) {
276+
String curlCmd = "for i in {1..100}; do curl -v "
277+
+ "http://" + pod + ":8001/sample-war/index.jsp; done";
278+
logger.info("Command to send HTTP request and get HTTP response {0} ", curlCmd);
279+
ExecResult execResult = assertDoesNotThrow(() -> execCommand(domainNamespace, pod, null, true,
280+
"/bin/sh", "-c", curlCmd));
281+
if (execResult.exitValue() == 0) {
282+
logger.info("\n HTTP response is \n " + execResult.toString());
283+
assertAll("Check that the HTTP response is 200",
284+
() -> assertTrue(execResult.toString().contains("HTTP/1.1 200 OK"))
285+
);
286+
} else {
287+
fail("Failed to access sample application " + execResult.stderr());
288+
}
289+
}
290+
String[] servers = {"managed-server1", "managed-server2"};
291+
for (String server : servers) {
292+
logger.info("Checking HTTP server logs are written on PV and look for string sample-war/index.jsp in log");
293+
checkLogsOnPV("grep sample-war/index.jsp /shared/logs/" + server + "_access.log", adminServerPodName);
294+
}
295+
}
296+
297+
/**
298+
* Create a WebLogic domain with a defined configmap in configuration/model
265299
* section of the domain resource.
266-
* The configmap has multiple sparse WDT model files that define
300+
* The configmap has multiple sparse WDT model files that define
267301
* a JDBCSystemResource, a JMSSystemResource and a WLDFSystemResource.
268-
* Verify all the SystemResource configurations using the rest API call
302+
* Verify all the SystemResource configurations using the rest API call
269303
* using the public nodeport of the administration server.
270304
*/
271305
@Test
272-
@Order(2)
306+
@Order(3)
273307
@DisplayName("Verify the pre-configured SystemResources in the domain")
274308
public void testMiiCheckSystemResources() {
275309

@@ -309,7 +343,7 @@ public void testMiiCheckSystemResources() {
309343
* Verify SystemResources are deleted from the domain.
310344
*/
311345
@Test
312-
@Order(3)
346+
@Order(4)
313347
@DisplayName("Delete SystemResources from the domain")
314348
public void testMiiDeleteSystemResources() {
315349

@@ -343,7 +377,7 @@ public void testMiiDeleteSystemResources() {
343377

344378
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
345379
logger.log(Level.INFO, "New restart version is {0}", newRestartVersion);
346-
380+
347381
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
348382
"Rolling restart failed");
349383

@@ -374,7 +408,7 @@ public void testMiiDeleteSystemResources() {
374408
* Verify JMS Server logs are written on PV.
375409
*/
376410
@Test
377-
@Order(4)
411+
@Order(5)
378412
@DisplayName("Add new JDBC/JMS SystemResources to the domain")
379413
public void testMiiAddSystemResources() {
380414

@@ -409,7 +443,7 @@ public void testMiiAddSystemResources() {
409443

410444
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
411445
logger.log(Level.INFO, "New restart version is {0}", newRestartVersion);
412-
446+
413447
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
414448
"Rolling restart failed");
415449

@@ -440,11 +474,11 @@ public void testMiiAddSystemResources() {
440474
* Update the restart version of the domain resource.
441475
* Verify rolling restart of the domain by comparing PodCreationTimestamp
442476
* before and after rolling restart.
443-
* Verify servers from new cluster are not in running state, because
477+
* Verify servers from new cluster are not in running state, because
444478
* the spec level replica count to zero(default).
445479
*/
446480
@Test
447-
@Order(5)
481+
@Order(6)
448482
@DisplayName("Add a dynamic cluster to the domain with default replica count")
449483
public void testMiiAddDynmicClusteriWithNoReplica() {
450484

@@ -504,7 +538,7 @@ public void testMiiAddDynmicClusteriWithNoReplica() {
504538
* Verify servers from new cluster are in running state.
505539
*/
506540
@Test
507-
@Order(6)
541+
@Order(7)
508542
@DisplayName("Add a dynamic cluster to domain with non-zero replica count")
509543
public void testMiiAddDynamicCluster() {
510544

@@ -581,7 +615,7 @@ public void testMiiAddDynamicCluster() {
581615
* Verify servers from new cluster are in running state.
582616
*/
583617
@Test
584-
@Order(7)
618+
@Order(8)
585619
@DisplayName("Add a configured cluster to the domain")
586620
public void testMiiAddConfiguredCluster() {
587621

@@ -649,11 +683,11 @@ public void testMiiAddConfiguredCluster() {
649683
* Start a WebLogic domain with model-in-imge.
650684
* Patch the domain CRD with a new credentials secret.
651685
* Update domainRestartVersion to trigger a rolling restart of server pods.
652-
* Make sure all the server pods are re-started in a rolling fashion.
686+
* Make sure all the server pods are re-started in a rolling fashion.
653687
* Check the validity of new credentials by accessing WebLogic RESTful Service
654688
*/
655689
@Test
656-
@Order(8)
690+
@Order(9)
657691
@DisplayName("Change the WebLogic Admin credential of the domain")
658692
public void testMiiUpdateWebLogicCredential() {
659693
final boolean VALID = true;
@@ -709,23 +743,23 @@ public void testMiiUpdateWebLogicCredential() {
709743
}
710744

711745
/**
712-
* Start a WebLogic domain with a dynamic cluster with the following
746+
* Start a WebLogic domain with a dynamic cluster with the following
713747
* attributes MaxDynamicClusterSize(5) and MinDynamicClusterSize(1)
714748
* Set allowReplicasBelowMinDynClusterSize to false.
715749
* Make sure that the cluster can be scaled up to 5 servers and
716-
* scaled down to 1 server.
717-
* Create a configmap with a sparse model file with following attributes for
750+
* scaled down to 1 server.
751+
* Create a configmap with a sparse model file with following attributes for
718752
* Cluster/cluster-1/DynamicServers
719753
* MaxDynamicClusterSize(4) and MinDynamicClusterSize(2)
720754
* Patch the domain resource with the configmap and update restartVersion.
721-
* Make sure a rolling restart is triggered.
755+
* Make sure a rolling restart is triggered.
722756
* Now with the modified value
723757
* Make sure that the cluster can be scaled up to 4 servers.
724758
* Make sure JMS Connections and messages are distributed across 4 servers.
725759
* Make sure that the cluster can be scaled down below 2 servers.
726760
*/
727761
@Test
728-
@Order(9)
762+
@Order(10)
729763
@DisplayName("Test modification to Dynamic cluster size parameters")
730764
public void testMiiUpdateDynamicClusterSize() {
731765

@@ -774,8 +808,8 @@ public void testMiiUpdateDynamicClusterSize() {
774808
pods.put(managedServerPrefix + i, getPodCreationTime(domainNamespace, managedServerPrefix + i));
775809
}
776810

777-
// Update the Dynamic ClusterSize and add distributed destination
778-
// to verify JMS connection and message distribution after the
811+
// Update the Dynamic ClusterSize and add distributed destination
812+
// to verify JMS connection and message distribution after the
779813
// WebLogic cluster is scaled.
780814
String configMapName = "dynamic-cluster-size-cm";
781815
createClusterConfigMap(configMapName, "model.cluster.size.yaml");
@@ -810,7 +844,7 @@ public void testMiiUpdateDynamicClusterSize() {
810844
String.format("Scaling the cluster cluster-1 of domain %s in namespace %s failed", domainUid, domainNamespace));
811845
assertTrue(p1Success,
812846
String.format("replica patching to 3 failed for domain %s in namespace %s", domainUid, domainNamespace));
813-
847+
814848
// Make sure the 3rd Managed server comes up
815849
checkServiceExists(managedServerPrefix + "3", domainNamespace);
816850
checkServiceExists(managedServerPrefix + "4", domainNamespace);
@@ -842,11 +876,11 @@ public void testMiiUpdateDynamicClusterSize() {
842876
condition.getElapsedTimeInMS(),
843877
condition.getRemainingTimeInMS()))
844878
.until(runJmsClient(new String(javapCmd)));
845-
846-
// Since the MinDynamicClusterSize is set to 2 in the configmap
879+
880+
// Since the MinDynamicClusterSize is set to 2 in the configmap
847881
// and allowReplicasBelowMinDynClusterSize is set false, the replica
848-
// count can not go below 2. So during the following scale down operation
849-
// only managed-server3 and managed-server4 pod should be removed.
882+
// count can not go below 2. So during the following scale down operation
883+
// only managed-server3 and managed-server4 pod should be removed.
850884
logger.info("[After Patching] updating the replica count to 1");
851885
boolean p4Success = assertDoesNotThrow(() ->
852886
scaleCluster(domainUid, domainNamespace, "cluster-1", 1),
@@ -907,7 +941,7 @@ private static Callable<Boolean> runJmsClient(String javaCmd) {
907941

908942

909943
private static void createDatabaseSecret(
910-
String secretName, String username, String password,
944+
String secretName, String username, String password,
911945
String dburl, String domNamespace) throws ApiException {
912946
Map<String, String> secretMap = new HashMap();
913947
secretMap.put("username", username);
@@ -937,7 +971,7 @@ private static void createDomainSecret(String secretName, String username, Strin
937971

938972
private static void createDomainResource(
939973
String domainUid, String domNamespace, String adminSecretName,
940-
String repoSecretName, String encryptionSecretName,
974+
String repoSecretName, String encryptionSecretName,
941975
int replicaCount, String configmapName, String dbSecretName) {
942976
List<String> securityList = new ArrayList<>();
943977
securityList.add(dbSecretName);

integration-tests/src/test/resources/wdt-models/model-singleclusterdomain-sampleapp-wls.yaml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,9 @@ topology:
2424
"cluster-1-template":
2525
Cluster: "cluster-1"
2626
ListenPort : 8001
27+
WebServer:
28+
WebServerLog:
29+
BufferSizeKb: 1
2730

2831
appDeployments:
2932
Application:

0 commit comments

Comments
 (0)