Skip to content

Commit

Permalink
ST: tests for CR statuses (#1844)
Browse files Browse the repository at this point in the history
* WIP: status tests for all componentes

* FIX: some fixes for status tests

* FIX: add correct messages to asserts

* FIX: change connects2i pod deletion

* fixup! FIX: change connects2i pod deletion

* FIX: fix test_only option in PR job

* fixup! FIX: fix test_only option in PR job

* fixup! fixup! FIX: fix test_only option in PR job

* fixup! fixup! fixup! FIX: fix test_only option in PR job

* fixup! fixup! fixup! fixup! FIX: fix test_only option in PR job
  • Loading branch information
Frawless authored and scholzj committed Jul 29, 2019
1 parent 349995f commit fc94de0
Show file tree
Hide file tree
Showing 9 changed files with 284 additions and 46 deletions.
34 changes: 25 additions & 9 deletions Jenkinsfile-pr
Expand Up @@ -17,16 +17,12 @@ pipeline {
ansiColor('xterm')
}
environment {
DOCKER_ORG="strimzi"
DOCKER_REGISTRY="localhost:5000"
DOCKER_TAG="pr"
OPENSHIFT_URL="https://github.com/openshift/origin/releases/download/v3.11.0/openshift-origin-client-tools-v3.11.0-0cbc58b-linux-64bit.tar.gz"
// Workaround for skip flaky tests
MVN_ARGS="-Dfailsafe.rerunFailingTestsCount=5"

ARTIFACTS_DIR = 'systemtest/target/logs'
JOB_NAME_SUB = "${String.format("%.15s", JOB_NAME).toLowerCase()}"
TEST_ONLY="false"
}
stages {
stage('Clean WS') {
Expand All @@ -42,6 +38,7 @@ pipeline {
stage('Parse parameters from comment') {
steps {
script {
echo "Comment body: ${env.ghprbCommentBody}"
env.TEST_CASE = params.TEST_CASE
env.TEST_PROFILE = params.TEST_PROFILE
if (env.ghprbCommentBody.contains('testcase=')) {
Expand All @@ -52,10 +49,21 @@ pipeline {
env.TEST_PROFILE = env.ghprbCommentBody.split('profile=')[1].split(/\s/)[0]
}
echo "TEST_PROFILE: ${env.TEST_PROFILE}"
if (env.ghprbCommentBody.contains('testonly')) {
env.TEST_ONLY = "true"
if (env.ghprbCommentBody.contains('test_only')) {
env.TEST_ONLY = true
env.DOCKER_REGISTRY = "docker.io"
env.DOCKER_ORG="strimzi"
env.DOCKER_TAG = "latest"
} else {
env.TEST_ONLY = false
env.DOCKER_ORG="strimzi"
env.DOCKER_REGISTRY="localhost:5000"
env.DOCKER_TAG="pr"
}
echo "TEST_ONLY: ${env.TEST_ONLY}"
echo "DOCKER_REGISTRY: ${env.DOCKER_REGISTRY}"
echo "DOCKER_ORG: ${env.DOCKER_ORG}"
echo "DOCKER_TAG: ${env.DOCKER_TAG}"
}
}
}
Expand All @@ -69,12 +77,20 @@ pipeline {
}
}
}
stage('Build project') {
steps {
script {
sh "mvn clean install -DskipTests"
}
}
}
stage('Build images') {
when {
environment name: 'TEST_ONLY', value: 'false'
}
steps {
script {
if (env.TEST_ONLY.equals("false")) {
lib.buildStrimzi()
}
lib.buildStrimziImages()
}
}
}
Expand Down
3 changes: 1 addition & 2 deletions jenkins.groovy
Expand Up @@ -63,8 +63,7 @@ def clearImages() {
}


def buildStrimzi() {
sh "mvn clean install -DskipTests"
def buildStrimziImages() {
sh "make docker_build"
sh "make docker_tag"
}
Expand Down
15 changes: 15 additions & 0 deletions systemtest/src/main/java/io/strimzi/systemtest/AbstractST.java
Expand Up @@ -15,18 +15,24 @@
import io.fabric8.kubernetes.client.CustomResourceList;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.strimzi.api.kafka.Crds;
import io.strimzi.api.kafka.KafkaBridgeList;
import io.strimzi.api.kafka.KafkaConnectList;
import io.strimzi.api.kafka.KafkaConnectS2IList;
import io.strimzi.api.kafka.KafkaList;
import io.strimzi.api.kafka.KafkaMirrorMakerList;
import io.strimzi.api.kafka.KafkaTopicList;
import io.strimzi.api.kafka.KafkaUserList;
import io.strimzi.api.kafka.model.DoneableKafka;
import io.strimzi.api.kafka.model.DoneableKafkaBridge;
import io.strimzi.api.kafka.model.DoneableKafkaConnect;
import io.strimzi.api.kafka.model.DoneableKafkaConnectS2I;
import io.strimzi.api.kafka.model.DoneableKafkaMirrorMaker;
import io.strimzi.api.kafka.model.DoneableKafkaTopic;
import io.strimzi.api.kafka.model.DoneableKafkaUser;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.KafkaBridge;
import io.strimzi.api.kafka.model.KafkaConnect;
import io.strimzi.api.kafka.model.KafkaConnectS2I;
import io.strimzi.api.kafka.model.KafkaMirrorMaker;
import io.strimzi.api.kafka.model.KafkaResources;
import io.strimzi.api.kafka.model.KafkaTopic;
Expand Down Expand Up @@ -208,6 +214,14 @@ void replaceMirrorMakerResource(String resourceName, Consumer<KafkaMirrorMaker>
replaceCrdResource(KafkaMirrorMaker.class, KafkaMirrorMakerList.class, DoneableKafkaMirrorMaker.class, resourceName, editor);
}

void replaceBridgeResource(String resourceName, Consumer<KafkaBridge> editor) {
replaceCrdResource(KafkaBridge.class, KafkaBridgeList.class, DoneableKafkaBridge.class, resourceName, editor);
}

void replaceConnectS2IResource(String resourceName, Consumer<KafkaConnectS2I> editor) {
replaceCrdResource(KafkaConnectS2I.class, KafkaConnectS2IList.class, DoneableKafkaConnectS2I.class, resourceName, editor);
}

String getBrokerApiVersions(String podName) {
AtomicReference<String> versions = new AtomicReference<>();
waitFor("kafka-broker-api-versions.sh success", Constants.GET_BROKER_API_INTERVAL, Constants.GET_BROKER_API_TIMEOUT, () -> {
Expand Down Expand Up @@ -941,6 +955,7 @@ void teardownEnvironmentMethod(ExtensionContext context) throws Exception {
if (Environment.SKIP_TEARDOWN == null) {
if (context.getExecutionException().isPresent()) {
LOGGER.info("Test execution contains exception, going to recreate test environment");
context.getExecutionException().get().printStackTrace();
recreateTestEnv(clusterOperatorNamespace, bindingsNamespaces);
LOGGER.info("Env recreated.");
}
Expand Down
2 changes: 2 additions & 0 deletions systemtest/src/main/java/io/strimzi/systemtest/Constants.java
Expand Up @@ -33,6 +33,8 @@ public interface Constants {
long TIMEOUT_FOR_GET_SECRETS = Duration.ofMinutes(1).toMillis();
long TIMEOUT_TEARDOWN = Duration.ofSeconds(10).toMillis();
long GLOBAL_TIMEOUT = Duration.ofMinutes(5).toMillis();
long GLOBAL_STATUS_TIMEOUT = Duration.ofMinutes(3).toMillis();
long CONNECT_STATUS_TIMEOUT = Duration.ofMinutes(5).toMillis();
long GLOBAL_POLL_INTERVAL = Duration.ofSeconds(1).toMillis();

long CO_OPERATION_TIMEOUT = Duration.ofMinutes(1).toMillis();
Expand Down
12 changes: 7 additions & 5 deletions systemtest/src/main/java/io/strimzi/systemtest/Resources.java
Expand Up @@ -52,6 +52,7 @@
import io.strimzi.api.kafka.model.KafkaConnectBuilder;
import io.strimzi.api.kafka.model.KafkaConnectS2I;
import io.strimzi.api.kafka.model.KafkaConnectS2IBuilder;
import io.strimzi.api.kafka.model.KafkaConnectS2IResources;
import io.strimzi.api.kafka.model.KafkaMirrorMaker;
import io.strimzi.api.kafka.model.KafkaMirrorMakerBuilder;
import io.strimzi.api.kafka.model.KafkaResources;
Expand Down Expand Up @@ -578,13 +579,14 @@ private void waitForDeletion(KafkaConnect kafkaConnect) {
private void waitForDeletion(KafkaConnectS2I kafkaConnectS2I) {
LOGGER.info("Waiting when all the pods are terminated for Kafka Connect S2I {}", kafkaConnectS2I.getMetadata().getName());

client().listPods().stream()
.filter(p -> p.getMetadata().getName().contains("build"))
.forEach(p -> client().deletePod(p));
client().deleteDeploymentConfig(KafkaConnectS2IResources.buildConfigName(kafkaConnectS2I.getMetadata().getName()));

client().listPods().stream()
.filter(p -> p.getMetadata().getName().startsWith(kafkaConnectS2I.getMetadata().getName() + "-connect-"))
.forEach(p -> waitForPodDeletion(p.getMetadata().getName()));
.filter(p -> p.getMetadata().getName().contains("-connect-"))
.forEach(p -> {
LOGGER.debug("Deleting: {}", p.getMetadata().getName());
client().deletePod(p);
});
}

private void waitForDeletion(KafkaMirrorMaker kafkaMirrorMaker) {
Expand Down
Expand Up @@ -398,7 +398,7 @@ public static void waitForDeploymentConfigReady(String name, int expectedPods) {
LabelSelector deploymentConfigSelector = new LabelSelectorBuilder().addToMatchLabels(kubeClient().getDeploymentConfigSelectors(name)).build();
waitForPodsReady(deploymentConfigSelector, expectedPods, true);
String clusterOperatorPodName = kubeClient().listPods("name", "strimzi-cluster-operator").get(0).getMetadata().getName();
String log = "BuildConfigOperator:191 - BuildConfig " + name + " in namespace connect-s2i-cluster-test has been created";
String log = "BuildConfigOperator:191 - BuildConfig " + name + " in namespace " + kubeClient().getNamespace() + " has been created";

TestUtils.waitFor("build config creation " + name, Constants.POLL_INTERVAL_FOR_RESOURCE_READINESS, Constants.TIMEOUT_FOR_RESOURCE_READINESS,
() -> kubeClient().logs(clusterOperatorPodName).contains(log));
Expand Down

0 comments on commit fc94de0

Please sign in to comment.