Skip to content

Commit 77d5f6b

Browse files
hzhao-githubrjeberhard
authored andcommitted
OWLS-115531 - converted ItDedicatedMode, ItT3Channel and ItMiiDynamicUpdatePart3 class to run in internal OKE jenkin
1 parent 3af61da commit 77d5f6b

File tree

3 files changed

+119
-57
lines changed

3 files changed

+119
-57
lines changed

integration-tests/src/test/java/oracle/weblogic/kubernetes/ItDedicatedMode.java

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
import static oracle.weblogic.kubernetes.TestConstants.MANAGED_SERVER_NAME_BASE;
3838
import static oracle.weblogic.kubernetes.TestConstants.MII_BASIC_IMAGE_NAME;
3939
import static oracle.weblogic.kubernetes.TestConstants.MII_BASIC_IMAGE_TAG;
40+
import static oracle.weblogic.kubernetes.TestConstants.OKE_CLUSTER;
4041
import static oracle.weblogic.kubernetes.TestConstants.OPERATOR_CHART_DIR;
4142
import static oracle.weblogic.kubernetes.TestConstants.OPERATOR_RELEASE_NAME;
4243
import static oracle.weblogic.kubernetes.TestConstants.TEST_IMAGES_REPO_SECRET_NAME;
@@ -46,6 +47,7 @@
4647
import static oracle.weblogic.kubernetes.actions.impl.Domain.scaleClusterWithRestApi;
4748
import static oracle.weblogic.kubernetes.utils.ClusterUtils.createClusterResourceAndAddReferenceToDomain;
4849
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkPodReadyAndServiceExists;
50+
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.scaleAndVerifyCluster;
4951
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.verifyClusterAfterScaling;
5052
import static oracle.weblogic.kubernetes.utils.DomainUtils.createDomainAndVerify;
5153
import static oracle.weblogic.kubernetes.utils.OKDUtils.createRouteForOKD;
@@ -66,7 +68,7 @@
6668
*/
6769
@DisplayName("Test Operator and WebLogic domain with Dedicated set to true")
6870
@Tag("kind-sequential")
69-
@Tag("oke-sequential")
71+
@Tag("oke-gate")
7072
@Tag("okd-wls-mrg")
7173
@IntegrationTest
7274
class ItDedicatedMode {
@@ -192,9 +194,9 @@ void testDedicatedModeSameNamespace() {
192194
OffsetDateTime originalCreationTimestamp
193195
= assertDoesNotThrow(() -> getPodCreationTimestamp(domain1Namespace, "", managedServerPodName),
194196
String.format("getPodCreationTimestamp failed with ApiException for pod %s in namespace %s",
195-
managedServerPodName, domain1Namespace));
197+
managedServerPodName, domain1Namespace));
196198
listOfPodCreationTimestamp.add(originalCreationTimestamp);
197-
}
199+
}
198200
// Scale up cluster-1 in domain1Namespace and verify it succeeds
199201
String externalRestHttpshost;
200202
if (TestConstants.KIND_CLUSTER
@@ -208,8 +210,13 @@ void testDedicatedModeSameNamespace() {
208210
}
209211

210212
logger.info("scaling the cluster from {0} servers to {1} servers", replicaCount, replicaCount + 1);
211-
scaleClusterWithRestApi(domainUid, clusterName, replicaCount + 1,
212-
externalRestHttpshost, externalRestHttpsPort, opNamespace, opServiceAccount);
213+
if (OKE_CLUSTER) {
214+
scaleAndVerifyCluster(clusterResName, domainUid, domain1Namespace, managedServerPodPrefix,
215+
replicaCount, replicaCount + 1, null, null);
216+
} else {
217+
scaleClusterWithRestApi(domainUid, clusterName, replicaCount + 1,
218+
externalRestHttpshost, externalRestHttpsPort, opNamespace, opServiceAccount);
219+
}
213220

214221
verifyClusterAfterScaling(domainUid, domain1Namespace, managedServerPodPrefix,
215222
replicaCount, replicaCount + 1, null, null, listOfPodCreationTimestamp);

integration-tests/src/test/java/oracle/weblogic/kubernetes/ItMiiDynamicUpdatePart3.java

Lines changed: 28 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import org.junit.jupiter.api.Test;
2626

2727
import static oracle.weblogic.kubernetes.TestConstants.MII_DYNAMIC_UPDATE_EXPECTED_ERROR_MSG;
28+
import static oracle.weblogic.kubernetes.TestConstants.OKE_CLUSTER;
2829
import static oracle.weblogic.kubernetes.TestConstants.OPERATOR_RELEASE_NAME;
2930
import static oracle.weblogic.kubernetes.TestConstants.WEBLOGIC_SLIM;
3031
import static oracle.weblogic.kubernetes.TestConstants.WEBLOGIC_VERSION;
@@ -45,6 +46,7 @@
4546
import static oracle.weblogic.kubernetes.utils.CommonMiiTestUtils.verifyPodIntrospectVersionUpdated;
4647
import static oracle.weblogic.kubernetes.utils.CommonMiiTestUtils.verifyPodsNotRolled;
4748
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkSystemResourceConfig;
49+
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.checkSystemResourceConfigViaAdminPod;
4850
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.testUntil;
4951
import static oracle.weblogic.kubernetes.utils.CommonTestUtils.withStandardRetryPolicy;
5052
import static oracle.weblogic.kubernetes.utils.JobUtils.getIntrospectJobName;
@@ -69,7 +71,7 @@
6971
@DisplayName("Test dynamic updates to a model in image domain, part3")
7072
@IntegrationTest
7173
@Tag("olcne-mrg")
72-
@Tag("oke-sequential")
74+
@Tag("oke-gate")
7375
@Tag("kind-parallel")
7476
@Tag("toolkits-srg")
7577
@Tag("okd-wls-mrg")
@@ -292,8 +294,7 @@ void testMiiChangeDataSourceParameterWithCommitUpdateAndRoll() {
292294

293295
// This test uses the WebLogic domain created in BeforeAll method
294296
// BeforeEach method ensures that the server pods are running
295-
LinkedHashMap<String, OffsetDateTime> pods =
296-
helper.addDataSourceAndVerify(false);
297+
LinkedHashMap<String, OffsetDateTime> pods = helper.addDataSourceAndVerify(false);
297298

298299
// Replace contents of an existing configMap with cm config and application target as
299300
// there are issues with removing them, WDT-535
@@ -319,12 +320,18 @@ void testMiiChangeDataSourceParameterWithCommitUpdateAndRoll() {
319320
verifyPodIntrospectVersionUpdated(pods.keySet(), introspectVersion, helper.domainNamespace);
320321

321322
// check datasource configuration using REST api
322-
int adminServiceNodePort
323-
= getServiceNodePort(helper.domainNamespace, getExternalServicePodName(helper.adminServerPodName), "default");
324-
assertNotEquals(-1, adminServiceNodePort, "admin server default node port is not valid");
325-
assertTrue(checkSystemResourceConfig(helper.adminSvcExtHost, adminServiceNodePort,
326-
"JDBCSystemResources/TestDataSource2/JDBCResource/JDBCDataSourceParams",
327-
"jdbc\\/TestDataSource2-2"), "JDBCSystemResource JNDIName not found");
323+
if (OKE_CLUSTER) {
324+
assertTrue(checkSystemResourceConfigViaAdminPod(helper.adminServerPodName, helper.domainNamespace,
325+
"JDBCSystemResources/TestDataSource2/JDBCResource/JDBCDataSourceParams",
326+
"jdbc\\/TestDataSource2-2"), "JDBCSystemResource JNDIName not found");
327+
} else {
328+
int adminServiceNodePort
329+
= getServiceNodePort(helper.domainNamespace, getExternalServicePodName(helper.adminServerPodName), "default");
330+
assertNotEquals(-1, adminServiceNodePort, "admin server default node port is not valid");
331+
assertTrue(checkSystemResourceConfig(helper.adminSvcExtHost, adminServiceNodePort,
332+
"JDBCSystemResources/TestDataSource2/JDBCResource/JDBCDataSourceParams",
333+
"jdbc\\/TestDataSource2-2"), "JDBCSystemResource JNDIName not found");
334+
}
328335
logger.info("JDBCSystemResource configuration found");
329336

330337
// check that the domain status condition contains the correct type and expected reason
@@ -333,8 +340,7 @@ void testMiiChangeDataSourceParameterWithCommitUpdateAndRoll() {
333340

334341
// write sparse yaml to delete datasource to file, delete ds to keep the config clean
335342
Path pathToDeleteDSYaml = Paths.get(WORK_DIR + "/deleteds.yaml");
336-
String yamlToDeleteDS = "resources:\n"
337-
+ " JDBCSystemResource:\n";
343+
String yamlToDeleteDS = "resources:\n" + " JDBCSystemResource:\n";
338344

339345
assertDoesNotThrow(() -> Files.write(pathToDeleteDSYaml, yamlToDeleteDS.getBytes()));
340346

@@ -363,17 +369,22 @@ void testMiiChangeDataSourceParameterWithCommitUpdateAndRoll() {
363369
verifyPodIntrospectVersionUpdated(pods.keySet(), introspectVersion, helper.domainNamespace);
364370

365371
// check datasource configuration is deleted using REST api
366-
adminServiceNodePort
367-
= getServiceNodePort(helper.domainNamespace, getExternalServicePodName(helper.adminServerPodName), "default");
368-
assertNotEquals(-1, adminServiceNodePort, "admin server default node port is not valid");
369-
assertFalse(checkSystemResourceConfig(helper.adminSvcExtHost, adminServiceNodePort, "JDBCSystemResources",
370-
"TestDataSource2"), "Found JDBCSystemResource datasource, should be deleted");
372+
if (OKE_CLUSTER) {
373+
assertFalse(checkSystemResourceConfigViaAdminPod(helper.adminServerPodName, helper.domainNamespace,
374+
"JDBCSystemResources",
375+
"TestDataSource2"), "Found JDBCSystemResource datasource, should be deleted");
376+
} else {
377+
int adminServiceNodePort
378+
= getServiceNodePort(helper.domainNamespace, getExternalServicePodName(helper.adminServerPodName), "default");
379+
assertNotEquals(-1, adminServiceNodePort, "admin server default node port is not valid");
380+
assertFalse(checkSystemResourceConfig(helper.adminSvcExtHost, adminServiceNodePort, "JDBCSystemResources",
381+
"TestDataSource2"), "Found JDBCSystemResource datasource, should be deleted");
382+
}
371383
logger.info("JDBCSystemResource Datasource is deleted");
372384

373385
// check that the domain status condition contains the correct type and expected status
374386
logger.info("verifying the domain status condition contains the correct type and expected status");
375387
helper.verifyDomainStatusConditionNoErrorMsg("Completed", "True");
376-
377388
}
378389

379390
private void verifyIntrospectorFailsWithExpectedErrorMsg(String expectedErrorMsg) {

integration-tests/src/test/java/oracle/weblogic/kubernetes/ItT3Channel.java

Lines changed: 79 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
import java.io.File;
77
import java.io.FileOutputStream;
8+
import java.io.IOException;
89
import java.net.http.HttpResponse;
910
import java.nio.file.Path;
1011
import java.nio.file.Paths;
@@ -32,6 +33,8 @@
3233
import oracle.weblogic.kubernetes.annotations.Namespaces;
3334
import oracle.weblogic.kubernetes.logging.LoggingFacade;
3435
import oracle.weblogic.kubernetes.utils.BuildApplication;
36+
import oracle.weblogic.kubernetes.utils.ExecCommand;
37+
import oracle.weblogic.kubernetes.utils.ExecResult;
3538
import oracle.weblogic.kubernetes.utils.OracleHttpClient;
3639
import org.junit.jupiter.api.BeforeAll;
3740
import org.junit.jupiter.api.DisplayName;
@@ -46,7 +49,9 @@
4649
import static oracle.weblogic.kubernetes.TestConstants.DOMAIN_API_VERSION;
4750
import static oracle.weblogic.kubernetes.TestConstants.IMAGE_PULL_POLICY;
4851
import static oracle.weblogic.kubernetes.TestConstants.K8S_NODEPORT_HOST;
52+
import static oracle.weblogic.kubernetes.TestConstants.KUBERNETES_CLI;
4953
import static oracle.weblogic.kubernetes.TestConstants.MANAGED_SERVER_NAME_BASE;
54+
import static oracle.weblogic.kubernetes.TestConstants.OKE_CLUSTER;
5055
import static oracle.weblogic.kubernetes.TestConstants.TRAEFIK_INGRESS_HTTP_HOSTPORT;
5156
import static oracle.weblogic.kubernetes.TestConstants.WEBLOGIC_IMAGE_TO_USE_IN_SPEC;
5257
import static oracle.weblogic.kubernetes.actions.ActionConstants.APP_DIR;
@@ -81,7 +86,7 @@
8186
@DisplayName("Test T3 channel deployment")
8287
@IntegrationTest
8388
@Tag("olcne-mrg")
84-
@Tag("oke-sequential")
89+
@Tag("oke-gate")
8590
@Tag("kind-sequential")
8691
class ItT3Channel {
8792
// namespace constants
@@ -271,11 +276,21 @@ void testAdminServerT3Channel() {
271276

272277
// deploy application and verify all servers functions normally
273278
//deploy clusterview application
274-
logger.info("Deploying clusterview app {0} to cluster {1}",
275-
clusterViewAppPath, clusterName);
276-
deployUsingWlst(adminServerPodName, Integer.toString(t3ChannelPort),
277-
ADMIN_USERNAME_DEFAULT, ADMIN_PASSWORD_DEFAULT, adminServerName + "," + clusterName, clusterViewAppPath,
278-
domainNamespace);
279+
if (OKE_CLUSTER) {
280+
int adminPort = 7001;
281+
assertDoesNotThrow(() -> deployUsingWlst(adminServerPodName,
282+
String.valueOf(adminPort),
283+
ADMIN_USERNAME_DEFAULT, ADMIN_PASSWORD_DEFAULT,
284+
clusterName + "," + adminServerName,
285+
clusterViewAppPath,
286+
domainNamespace), "Deploying the application");
287+
} else {
288+
logger.info("Deploying clusterview app {0} to cluster {1}",
289+
clusterViewAppPath, clusterName);
290+
deployUsingWlst(adminServerPodName, Integer.toString(t3ChannelPort),
291+
ADMIN_USERNAME_DEFAULT, ADMIN_PASSWORD_DEFAULT, adminServerName + "," + clusterName,
292+
clusterViewAppPath, domainNamespace);
293+
}
279294

280295
List<String> managedServerNames = new ArrayList<String>();
281296
for (int i = 1; i <= replicaCount; i++) {
@@ -316,41 +331,70 @@ void testAdminServerT3Channel() {
316331
}
317332

318333
private static void verifyMemberHealth(String adminServerPodName, List<String> managedServerNames,
319-
String user, String code) {
320-
334+
String user, String code) {
321335
logger.info("Checking the health of servers in cluster");
322336
testUntil(() -> {
323-
logger.info("Getting node port for default channel");
324-
int serviceNodePort = assertDoesNotThrow(()
325-
-> getServiceNodePort(domainNamespace, getExternalServicePodName(adminServerPodName), "default"),
326-
"Getting admin server node port failed");
327-
String host = K8S_NODEPORT_HOST;
328-
String hostAndPort = host + ":" + serviceNodePort;
329-
Map<String, String> headers = null;
330-
if (TestConstants.KIND_CLUSTER
331-
&& !TestConstants.WLSIMG_BUILDER.equals(TestConstants.WLSIMG_BUILDER_DEFAULT)) {
332-
hostAndPort = "localhost:" + TRAEFIK_INGRESS_HTTP_HOSTPORT;
333-
headers = new HashMap<>();
334-
headers.put("host", hostHeader);
335-
}
336-
String url = "http://" + hostAndPort
337-
+ "/clusterview/ClusterViewServlet?user=" + user + "&password=" + code;
338-
HttpResponse<String> response;
339-
response = OracleHttpClient.get(url, headers, true);
340-
341-
boolean health = true;
342-
for (String managedServer : managedServerNames) {
343-
health = health && response.body().contains(managedServer + ":HEALTH_OK");
344-
if (health) {
345-
logger.info(managedServer + " is healthy");
346-
} else {
347-
logger.info(managedServer + " health is not OK or server not found");
337+
if (OKE_CLUSTER) {
338+
// In internal OKE env, verifyMemberHealth in admin server pod
339+
int adminPort = 7001;
340+
final String command = KUBERNETES_CLI + " exec -n "
341+
+ domainNamespace + " " + adminServerPodName + " -- curl http://"
342+
+ adminServerPodName + ":"
343+
+ adminPort + "/clusterview/ClusterViewServlet"
344+
+ "\"?user=" + user
345+
+ "&password=" + code + "\"";
346+
347+
ExecResult result = null;
348+
try {
349+
result = ExecCommand.exec(command, true);
350+
} catch (IOException | InterruptedException ex) {
351+
logger.severe(ex.getMessage());
352+
}
353+
354+
String response = result.stdout().trim();
355+
logger.info(response);
356+
boolean health = true;
357+
for (String managedServer : managedServerNames) {
358+
health = health && response.contains(managedServer + ":HEALTH_OK");
359+
if (health) {
360+
logger.info(managedServer + " is healthy");
361+
} else {
362+
logger.info(managedServer + " health is not OK or server not found");
363+
}
364+
}
365+
return health;
366+
} else {
367+
logger.info("Getting node port for default channel");
368+
int serviceNodePort = assertDoesNotThrow(()
369+
-> getServiceNodePort(domainNamespace, getExternalServicePodName(adminServerPodName), "default"),
370+
"Getting admin server node port failed");
371+
String host = K8S_NODEPORT_HOST;
372+
String hostAndPort = host + ":" + serviceNodePort;
373+
Map<String, String> headers = null;
374+
if (TestConstants.KIND_CLUSTER
375+
&& !TestConstants.WLSIMG_BUILDER.equals(TestConstants.WLSIMG_BUILDER_DEFAULT)) {
376+
hostAndPort = "localhost:" + TRAEFIK_INGRESS_HTTP_HOSTPORT;
377+
headers = new HashMap<>();
378+
headers.put("host", hostHeader);
348379
}
380+
String url = "http://" + hostAndPort
381+
+ "/clusterview/ClusterViewServlet?user=" + user + "&password=" + code;
382+
HttpResponse<String> response;
383+
response = OracleHttpClient.get(url, headers, true);
384+
385+
boolean health = true;
386+
for (String managedServer : managedServerNames) {
387+
health = health && response.body().contains(managedServer + ":HEALTH_OK");
388+
if (health) {
389+
logger.info(managedServer + " is healthy");
390+
} else {
391+
logger.info(managedServer + " health is not OK or server not found");
392+
}
393+
}
394+
return health;
349395
}
350-
return health;
351396
},
352397
logger,
353398
"Verifying the health of all cluster members");
354399
}
355-
356400
}

0 commit comments

Comments
 (0)