diff --git a/.checkstyle/checkstyle.xml b/.checkstyle/checkstyle.xml index a3715ec24c..2e5fe3af90 100644 --- a/.checkstyle/checkstyle.xml +++ b/.checkstyle/checkstyle.xml @@ -44,7 +44,7 @@ - + diff --git a/CHANGELOG.md b/CHANGELOG.md index 03bdde7591..673ec5d692 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ * Support pausing reconciliation of KafkaTopic CR with annotation `strimzi.io/pause-reconciliation` * Update cruise control to 2.5.55 * Support for broker load information added to the rebalance optimization proposal. Information on the load difference, before and after a rebalance is stored in a ConfigMap +* Add support for selectively changing the verbosity of logging for individual CRs, using markers. ### Changes, deprecations and removals diff --git a/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java b/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java index 713c36d129..0c9c034d5b 100644 --- a/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java +++ b/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java @@ -32,7 +32,7 @@ * @param The converted type */ public interface Conversion { - Logger log = LogManager.getLogger(Conversion.class); + Logger LOGGER = LogManager.getLogger(Conversion.class); Conversion NOOP = new Conversion<>() { @Override diff --git a/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java b/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java index b9044a1dbf..4a232a1b68 100644 --- a/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java +++ b/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java @@ -57,7 +57,7 @@ public class OpenSslCertManager implements CertManager { .appendOffsetId().toFormatter(); public static final int MAXIMUM_CN_LENGTH = 64; - private static final Logger log = LogManager.getLogger(OpenSslCertManager.class); + private static final Logger LOGGER = LogManager.getLogger(OpenSslCertManager.class); public static final ZoneId UTC = ZoneId.of("UTC"); public OpenSslCertManager() {} @@ -79,12 +79,12 @@ static void delete(Path fileOrDir) throws IOException { try { Files.delete(path); } catch (IOException e) { - log.debug("File could not be deleted: {}", fileOrDir); + LOGGER.debug("File could not be deleted: {}", fileOrDir); } }); } else { if (!Files.deleteIfExists(fileOrDir)) { - log.debug("File not deleted, because it did not exist: {}", fileOrDir); + LOGGER.debug("File not deleted, because it did not exist: {}", fileOrDir); } } } @@ -534,8 +534,8 @@ public OpensslArgs optArg(String opt, File file) throws IOException { return optArg(opt, file, false); } public OpensslArgs optArg(String opt, File file, boolean mayLog) throws IOException { - if (mayLog && log.isTraceEnabled()) { - log.trace("Contents of {} for option {} is:\n{}", file, opt, Files.readString(file.toPath())); + if (mayLog && LOGGER.isTraceEnabled()) { + LOGGER.trace("Contents of {} for option {} is:\n{}", file, opt, Files.readString(file.toPath())); } opt(opt); pb.command().add(file.getAbsolutePath()); @@ -614,7 +614,7 @@ public void exec(boolean failOnNonZero) throws IOException { pb.redirectErrorStream(true) .redirectOutput(out.toFile()); - log.debug("Running command {}", pb.command()); + LOGGER.debug("Running command {}", pb.command()); Process proc = pb.start(); @@ -626,18 +626,18 @@ public void exec(boolean failOnNonZero) throws IOException { if (failOnNonZero && result != 0) { String output = Files.readString(out, Charset.defaultCharset()); - if (!log.isDebugEnabled()) { + if (!LOGGER.isDebugEnabled()) { // Include the command if we've not logged it already - log.error("Got result {} from command {} with output\n{}", result, pb.command(), output); + LOGGER.error("Got result {} from command {} with output\n{}", result, pb.command(), output); } else { - log.error("Got result {} with output\n{}", result, output); + LOGGER.error("Got result {} with output\n{}", result, output); } throw new RuntimeException("openssl status code " + result); } else { - if (log.isTraceEnabled()) { - log.trace("Got output\n{}", Files.readString(out, Charset.defaultCharset())); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Got output\n{}", Files.readString(out, Charset.defaultCharset())); } - log.debug("Got result {}", result); + LOGGER.debug("Got result {}", result); } } catch (InterruptedException ignored) { diff --git a/cluster-operator/pom.xml b/cluster-operator/pom.xml index 8c466f24b6..c89e57a542 100644 --- a/cluster-operator/pom.xml +++ b/cluster-operator/pom.xml @@ -122,11 +122,11 @@ org.apache.logging.log4j - log4j-api + log4j-core org.apache.logging.log4j - log4j-core + log4j-api org.apache.logging.log4j diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java index a93debd710..89f04d878a 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java @@ -23,8 +23,6 @@ import io.vertx.core.Handler; import io.vertx.core.Promise; import io.vertx.core.http.HttpServer; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -34,6 +32,8 @@ import static java.util.Arrays.asList; import io.micrometer.prometheus.PrometheusMeterRegistry; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * An "operator" for managing assemblies of various types in a particular namespace. @@ -42,7 +42,7 @@ */ public class ClusterOperator extends AbstractVerticle { - private static final Logger log = LogManager.getLogger(ClusterOperator.class.getName()); + private static final Logger LOGGER = LogManager.getLogger(ClusterOperator.class.getName()); public static final String STRIMZI_CLUSTER_OPERATOR_DOMAIN = "cluster.operator.strimzi.io"; private static final String NAME_SUFFIX = "-cluster-operator"; @@ -78,7 +78,7 @@ public ClusterOperator(String namespace, KafkaBridgeAssemblyOperator kafkaBridgeAssemblyOperator, KafkaRebalanceAssemblyOperator kafkaRebalanceAssemblyOperator, MetricsProvider metricsProvider) { - log.info("Creating ClusterOperator for namespace {}", namespace); + LOGGER.info("Creating ClusterOperator for namespace {}", namespace); this.namespace = namespace; this.config = config; this.client = client; @@ -95,7 +95,7 @@ public ClusterOperator(String namespace, @Override public void start(Promise start) { - log.info("Starting ClusterOperator for namespace {}", namespace); + LOGGER.info("Starting ClusterOperator for namespace {}", namespace); // Configure the executor here, but it is used only in other places getVertx().createSharedWorkerExecutor("kubernetes-ops-pool", config.getOperationsThreadPoolSize(), TimeUnit.SECONDS.toNanos(120)); @@ -109,7 +109,7 @@ public void start(Promise start) { } for (AbstractOperator operator : operators) { watchFutures.add(operator.createWatch(namespace, operator.recreateWatch(namespace)).compose(w -> { - log.info("Opened watch for {} operator", operator.kind()); + LOGGER.info("Opened watch for {} operator", operator.kind()); watchByKind.put(operator.kind(), w); return Future.succeededFuture(); })); @@ -120,9 +120,9 @@ public void start(Promise start) { CompositeFuture.join(watchFutures) .compose(f -> { - log.info("Setting up periodic reconciliation for namespace {}", namespace); + LOGGER.info("Setting up periodic reconciliation for namespace {}", namespace); this.reconcileTimer = vertx.setPeriodic(this.config.getReconciliationIntervalMs(), res2 -> { - log.info("Triggering periodic reconciliation for namespace {}...", namespace); + LOGGER.info("Triggering periodic reconciliation for namespace {}...", namespace); reconcileAll("timer"); }); return startHealthServer().map((Void) null); @@ -133,7 +133,7 @@ public void start(Promise start) { @Override public void stop(Promise stop) { - log.info("Stopping ClusterOperator for namespace {}", namespace); + LOGGER.info("Stopping ClusterOperator for namespace {}", namespace); vertx.cancelTimer(reconcileTimer); for (Watch watch : watchByKind.values()) { if (watch != null) { @@ -182,9 +182,9 @@ private Future startHealthServer() { }) .listen(HEALTH_SERVER_PORT, ar -> { if (ar.succeeded()) { - log.info("ClusterOperator is now ready (health server listening on {})", HEALTH_SERVER_PORT); + LOGGER.info("ClusterOperator is now ready (health server listening on {})", HEALTH_SERVER_PORT); } else { - log.error("Unable to bind health server on {}", HEALTH_SERVER_PORT, ar.cause()); + LOGGER.error("Unable to bind health server on {}", HEALTH_SERVER_PORT, ar.cause()); } result.handle(ar); }); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java index 3894130c6d..81c9986f8c 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java @@ -33,7 +33,8 @@ * Cluster Operator configuration */ public class ClusterOperatorConfig { - private static final Logger log = LogManager.getLogger(ClusterOperatorConfig.class.getName()); + private static final Logger LOGGER = LogManager.getLogger(ClusterOperatorConfig.class.getName()); + public static final String STRIMZI_NAMESPACE = "STRIMZI_NAMESPACE"; public static final String STRIMZI_FULL_RECONCILIATION_INTERVAL_MS = "STRIMZI_FULL_RECONCILIATION_INTERVAL_MS"; @@ -159,7 +160,7 @@ public static ClusterOperatorConfig fromMap(Map map) { */ private static void warningsForRemovedEndVars(Map map) { if (map.containsKey(STRIMZI_DEFAULT_TLS_SIDECAR_KAFKA_IMAGE)) { - log.warn("Kafka TLS sidecar container has been removed and the environment variable {} is not used anymore. " + + LOGGER.warn("Kafka TLS sidecar container has been removed and the environment variable {} is not used anymore. " + "You can remove it from the Strimzi Cluster Operator deployment.", STRIMZI_DEFAULT_TLS_SIDECAR_KAFKA_IMAGE); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java index d1a6108597..d5b8ba829e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java @@ -20,14 +20,13 @@ import io.strimzi.operator.cluster.operator.assembly.KafkaRebalanceAssemblyOperator; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.operator.resource.ClusterRoleOperator; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.BufferedReader; import java.io.IOException; @@ -43,10 +42,12 @@ import io.vertx.core.VertxOptions; import io.vertx.micrometer.MicrometerMetricsOptions; import io.vertx.micrometer.VertxPrometheusOptions; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; @SuppressFBWarnings("DM_EXIT") public class Main { - private static final Logger log = LogManager.getLogger(Main.class.getName()); + private static final Logger LOGGER = LogManager.getLogger(Main.class.getName()); static { try { @@ -57,9 +58,9 @@ public class Main { } public static void main(String[] args) { - log.info("ClusterOperator {} is starting", Main.class.getPackage().getImplementationVersion()); + LOGGER.info("ClusterOperator {} is starting", Main.class.getPackage().getImplementationVersion()); ClusterOperatorConfig config = ClusterOperatorConfig.fromMap(System.getenv()); - log.info("Cluster Operator configuration is {}", config); + LOGGER.info("Cluster Operator configuration is {}", config); String dnsCacheTtl = System.getenv("STRIMZI_DNS_CACHE_TTL") == null ? "30" : System.getenv("STRIMZI_DNS_CACHE_TTL"); Security.setProperty("networkaddress.cache.ttl", dnsCacheTtl); @@ -78,21 +79,21 @@ public static void main(String[] args) { if (crs.succeeded()) { PlatformFeaturesAvailability.create(vertx, client).onComplete(pfa -> { if (pfa.succeeded()) { - log.info("Environment facts gathered: {}", pfa.result()); + LOGGER.info("Environment facts gathered: {}", pfa.result()); run(vertx, client, pfa.result(), config).onComplete(ar -> { if (ar.failed()) { - log.error("Unable to start operator for 1 or more namespace", ar.cause()); + LOGGER.error("Unable to start operator for 1 or more namespace", ar.cause()); System.exit(1); } }); } else { - log.error("Failed to gather environment facts", pfa.cause()); + LOGGER.error("Failed to gather environment facts", pfa.cause()); System.exit(1); } }); } else { - log.error("Failed to create Cluster Roles", crs.cause()); + LOGGER.error("Failed to create Cluster Roles", crs.cause()); System.exit(1); } }); @@ -119,7 +120,7 @@ static CompositeFuture run(Vertx vertx, KubernetesClient client, PlatformFeature if (pfa.supportsS2I()) { kafkaConnectS2IClusterOperations = new KafkaConnectS2IAssemblyOperator(vertx, pfa, resourceOperatorSupplier, config); } else { - log.info("The KafkaConnectS2I custom resource definition can only be used in environment which supports OpenShift build, image and apps APIs. These APIs do not seem to be supported in this environment."); + LOGGER.info("The KafkaConnectS2I custom resource definition can only be used in environment which supports OpenShift build, image and apps APIs. These APIs do not seem to be supported in this environment."); } KafkaMirrorMaker2AssemblyOperator kafkaMirrorMaker2AssemblyOperator = @@ -152,9 +153,9 @@ static CompositeFuture run(Vertx vertx, KubernetesClient client, PlatformFeature vertx.deployVerticle(operator, res -> { if (res.succeeded()) { - log.info("Cluster Operator verticle started in namespace {} with label selector {}", namespace, config.getCustomResourceSelector()); + LOGGER.info("Cluster Operator verticle started in namespace {} with label selector {}", namespace, config.getCustomResourceSelector()); } else { - log.error("Cluster Operator verticle in namespace {} failed to start", namespace, res.cause()); + LOGGER.error("Cluster Operator verticle in namespace {} failed to start", namespace, res.cause()); System.exit(1); } prom.handle(res); @@ -176,17 +177,17 @@ static CompositeFuture run(Vertx vertx, KubernetesClient client, PlatformFeature clusterRoles.put("strimzi-kafka-client", "033-ClusterRole-strimzi-kafka-client.yaml"); for (Map.Entry clusterRole : clusterRoles.entrySet()) { - log.info("Creating cluster role {}", clusterRole.getKey()); + LOGGER.info("Creating cluster role {}", clusterRole.getKey()); try (BufferedReader br = new BufferedReader( new InputStreamReader(Main.class.getResourceAsStream("/cluster-roles/" + clusterRole.getValue()), StandardCharsets.UTF_8))) { String yaml = br.lines().collect(Collectors.joining(System.lineSeparator())); ClusterRole role = ClusterRoleOperator.convertYamlToClusterRole(yaml); - Future fut = cro.reconcile(role.getMetadata().getName(), role); + Future fut = cro.reconcile(new Reconciliation("start-cluster-operator", "Deployment", config.getOperatorNamespace(), "cluster-operator"), role.getMetadata().getName(), role); futures.add(fut); } catch (IOException e) { - log.error("Failed to create Cluster Roles.", e); + LOGGER.error("Failed to create Cluster Roles.", e); throw new RuntimeException(e); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java index 91c250aaa5..6315dc0069 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java @@ -5,9 +5,9 @@ package io.strimzi.operator.cluster.model; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.model.OrderedProperties; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Collections; import java.util.List; @@ -20,7 +20,7 @@ * Abstract class for processing and generating configuration passed by the user. */ public abstract class AbstractConfiguration { - private static final Logger log = LogManager.getLogger(AbstractConfiguration.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractConfiguration.class.getName()); private final OrderedProperties options = new OrderedProperties(); @@ -28,98 +28,105 @@ public abstract class AbstractConfiguration { * Constructor used to instantiate this class from String configuration. Should be used to create configuration * from the Assembly. * + * @param reconciliation The reconciliation * @param configuration Configuration in String format. Should contain zero or more lines with with key=value * pairs. * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. */ - public AbstractConfiguration(String configuration, List forbiddenPrefixes) { + public AbstractConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes) { options.addStringPairs(configuration); - filterForbidden(forbiddenPrefixes); + filterForbidden(reconciliation, forbiddenPrefixes); } /** * Constructor used to instantiate this class from String configuration. Should be used to create configuration * from the Assembly. * + * @param reconciliation The reconciliation * @param configuration Configuration in String format. Should contain zero or more lines with with key=value * pairs. * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. * @param defaults Properties object with default options */ - public AbstractConfiguration(String configuration, List forbiddenPrefixes, Map defaults) { + public AbstractConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes, Map defaults) { options.addMapPairs(defaults); options.addStringPairs(configuration); - filterForbidden(forbiddenPrefixes); + filterForbidden(reconciliation, forbiddenPrefixes); } /** * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. */ - public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes) { + public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes) { options.addIterablePairs(jsonOptions); - filterForbidden(forbiddenPrefixes); + filterForbidden(reconciliation, forbiddenPrefixes); } /** * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. * @param forbiddenPrefixExceptions Exceptions excluded from forbidden prefix options checking */ - public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions) { + public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions) { options.addIterablePairs(jsonOptions); - filterForbidden(forbiddenPrefixes, forbiddenPrefixExceptions); + filterForbidden(reconciliation, forbiddenPrefixes, forbiddenPrefixExceptions); } /** * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. * @param defaults Properties object with default options */ - public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes, Map defaults) { + public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes, Map defaults) { options.addMapPairs(defaults); options.addIterablePairs(jsonOptions); - filterForbidden(forbiddenPrefixes); + filterForbidden(reconciliation, forbiddenPrefixes); } /** * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. * @param forbiddenPrefixExceptions Exceptions excluded from forbidden prefix options checking * @param defaults Properties object with default options */ - public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions, Map defaults) { + public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions, Map defaults) { options.addMapPairs(defaults); options.addIterablePairs(jsonOptions); - filterForbidden(forbiddenPrefixes, forbiddenPrefixExceptions); + filterForbidden(reconciliation, forbiddenPrefixes, forbiddenPrefixExceptions); } /** * Filters forbidden values from the configuration. * + * @param reconciliation The reconciliation * @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of * these prefixes will be ignored. * @param forbiddenPrefixExceptions Exceptions excluded from forbidden prefix options checking */ - private void filterForbidden(List forbiddenPrefixes, List forbiddenPrefixExceptions) { + private void filterForbidden(Reconciliation reconciliation, List forbiddenPrefixes, List forbiddenPrefixExceptions) { options.filter(k -> forbiddenPrefixes.stream().anyMatch(s -> { boolean forbidden = k.toLowerCase(Locale.ENGLISH).startsWith(s); if (forbidden) { @@ -127,16 +134,16 @@ private void filterForbidden(List forbiddenPrefixes, List forbid forbidden = false; } if (forbidden) { - log.warn("Configuration option \"{}\" is forbidden and will be ignored", k); + LOGGER.warnCr(reconciliation, "Configuration option \"{}\" is forbidden and will be ignored", k); } else { - log.trace("Configuration option \"{}\" is allowed and will be passed to the assembly", k); + LOGGER.traceCr(reconciliation, "Configuration option \"{}\" is allowed and will be passed to the assembly", k); } return forbidden; })); } - private void filterForbidden(List forbiddenPrefixes) { - this.filterForbidden(forbiddenPrefixes, Collections.emptyList()); + private void filterForbidden(Reconciliation reconciliation, List forbiddenPrefixes) { + this.filterForbidden(reconciliation, forbiddenPrefixes, Collections.emptyList()); } public String getConfigOption(String configOption) { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java index e4f86134e4..6cee20c84f 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java @@ -83,11 +83,11 @@ import io.strimzi.api.kafka.model.template.PodManagementPolicy; import io.strimzi.operator.common.MetricsAndLogging; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.IOException; import java.io.InputStream; @@ -109,7 +109,7 @@ public abstract class AbstractModel { public static final String STRIMZI_CLUSTER_OPERATOR_NAME = "strimzi-cluster-operator"; - protected static final Logger log = LogManager.getLogger(AbstractModel.class.getName()); + protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractModel.class.getName()); protected static final String LOG4J2_MONITOR_INTERVAL = "30"; protected static final String DEFAULT_JVM_XMS = "128M"; @@ -183,6 +183,7 @@ public abstract class AbstractModel { } } + protected final Reconciliation reconciliation; protected final String cluster; protected final String namespace; @@ -291,10 +292,12 @@ public abstract class AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name * @param applicationName Name of the application that the extending class is deploying */ - protected AbstractModel(HasMetadata resource, String applicationName) { + protected AbstractModel(Reconciliation reconciliation, HasMetadata resource, String applicationName) { + this.reconciliation = reconciliation; this.cluster = resource.getMetadata().getName(); this.namespace = resource.getMetadata().getNamespace(); this.labels = Labels.generateDefaultLabels(resource, applicationName, STRIMZI_CLUSTER_OPERATOR_NAME); @@ -402,33 +405,34 @@ public OrderedProperties getDefaultLogConfig() { if (logConfigFileName == null || logConfigFileName.isEmpty()) { return new OrderedProperties(); } - return getOrderedProperties(getDefaultLogConfigFileName()); + return getOrderedProperties(reconciliation, getDefaultLogConfigFileName()); } /** * Read a config file and returns the properties in a deterministic order. * + * @param reconciliation The reconciliation * @param configFileName The filename. * @return The OrderedProperties of the inputted file. */ - public static OrderedProperties getOrderedProperties(String configFileName) { + public static OrderedProperties getOrderedProperties(Reconciliation reconciliation, String configFileName) { if (configFileName == null || configFileName.isEmpty()) { throw new IllegalArgumentException("configFileName must be non-empty string"); } OrderedProperties properties = new OrderedProperties(); InputStream is = AbstractModel.class.getResourceAsStream("/" + configFileName); if (is == null) { - log.warn("Cannot find resource '{}'", configFileName); + LOGGER.warnCr(reconciliation, "Cannot find resource '{}'", configFileName); } else { try { properties.addStringPairs(is); } catch (IOException e) { - log.warn("Unable to read default log config from '{}'", configFileName); + LOGGER.warnCr(reconciliation, "Unable to read default log config from '{}'", configFileName); } finally { try { is.close(); } catch (IOException e) { - log.error("Failed to close stream. Reason: " + e.getMessage()); + LOGGER.errorCr(reconciliation, "Failed to close stream. Reason: " + e.getMessage()); } } } @@ -437,6 +441,7 @@ public static OrderedProperties getOrderedProperties(String configFileName) { /** * Transforms map to log4j properties file format. + * * @param properties map of log4j properties. * @return log4j properties as a String. */ @@ -484,7 +489,7 @@ public String parseLogging(Logging logging, ConfigMap externalCm) { if (newRootLogger != null && !rootAppenderName.isEmpty() && !newRootLogger.contains(",")) { // this should never happen as appender name is added in default configuration - log.debug("Newly set rootLogger does not contain appender. Setting appender to {}.", rootAppenderName); + LOGGER.debugCr(reconciliation, "Newly set rootLogger does not contain appender. Setting appender to {}.", rootAppenderName); String level = newSettings.asMap().get("log4j.rootLogger"); newSettings.addPair("log4j.rootLogger", level + ", " + rootAppenderName); } @@ -510,7 +515,7 @@ public String parseLogging(Logging logging, ConfigMap externalCm) { throw new InvalidResourceException("Property logging.valueFrom has to be specified when using external logging."); } } else { - log.debug("logging is not set, using default loggers"); + LOGGER.debugCr(reconciliation, "logging is not set, using default loggers"); return createLog4jProperties(getDefaultLogConfig()); } } @@ -523,10 +528,10 @@ private String getRootAppenderNamesFromDefaultLoggingConfig(OrderedProperties ne if (tmp.length == 2) { appenderName = tmp[1].trim(); } else { - log.warn("Logging configuration for root logger does not contain appender."); + LOGGER.warnCr(reconciliation, "Logging configuration for root logger does not contain appender."); } } else { - log.warn("Logger log4j.rootLogger not set."); + LOGGER.warnCr(reconciliation, "Logger log4j.rootLogger not set."); } return appenderName; } @@ -574,13 +579,13 @@ protected String parseMetrics(ConfigMap externalCm) { if (getMetricsConfigInCm() != null) { if (getMetricsConfigInCm() instanceof JmxPrometheusExporterMetrics) { if (externalCm == null) { - log.warn("ConfigMap {} does not exist. Metrics disabled.", + LOGGER.warnCr(reconciliation, "ConfigMap {} does not exist. Metrics disabled.", ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName()); throw new InvalidResourceException("ConfigMap " + ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName() + " does not exist."); } else { String data = externalCm.getData().get(((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getKey()); if (data == null) { - log.warn("ConfigMap {} does not contain specified key {}. Metrics disabled.", ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName(), + LOGGER.warnCr(reconciliation, "ConfigMap {} does not contain specified key {}. Metrics disabled.", ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName(), ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getKey()); throw new InvalidResourceException("ConfigMap " + ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName() + " does not contain specified key " + ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getKey() + "."); @@ -599,7 +604,7 @@ protected String parseMetrics(ConfigMap externalCm) { } } } else { - log.warn("Unknown type of metrics {}.", getMetricsConfigInCm().getClass()); + LOGGER.warnCr(reconciliation, "Unknown type of metrics {}.", getMetricsConfigInCm().getClass()); throw new InvalidResourceException("Unknown type of metrics " + getMetricsConfigInCm().getClass() + "."); } } @@ -692,6 +697,7 @@ protected static void validatePersistentStorage(Storage storage) { /** * Checks if the supplied PersistentClaimStorage has a valid size + * * @param storage * * @throws InvalidResourceException if the persistent storage size is not valid @@ -822,13 +828,13 @@ protected ContainerPort createContainerPort(String name, int port, String protoc .withProtocol(protocol) .withContainerPort(port) .build(); - log.trace("Created container port {}", containerPort); + LOGGER.traceCr(reconciliation, "Created container port {}", containerPort); return containerPort; } protected ServicePort createServicePort(String name, int port, int targetPort, String protocol) { ServicePort servicePort = createServicePort(name, port, targetPort, null, protocol); - log.trace("Created service port {}", servicePort); + LOGGER.traceCr(reconciliation, "Created service port {}", servicePort); return servicePort; } @@ -842,7 +848,7 @@ protected ServicePort createServicePort(String name, int port, int targetPort, I builder.withNodePort(nodePort); } ServicePort servicePort = builder.build(); - log.trace("Created service port {}", servicePort); + LOGGER.traceCr(reconciliation, "Created service port {}", servicePort); return servicePort; } @@ -954,7 +960,7 @@ protected Service createService(String name, String type, List port service.getSpec().setIpFamilies(ipFamilies.stream().map(IpFamily::toValue).collect(Collectors.toList())); } - log.trace("Created service {}", service); + LOGGER.traceCr(reconciliation, "Created service {}", service); return service; } @@ -991,7 +997,7 @@ protected Service createHeadlessService(List ports) { service.getSpec().setIpFamilies(templateHeadlessServiceIpFamilies.stream().map(IpFamily::toValue).collect(Collectors.toList())); } - log.trace("Created headless service {}", service); + LOGGER.traceCr(reconciliation, "Created headless service {}", service); return service; } @@ -1516,7 +1522,7 @@ protected void addContainerEnvsToExistingEnvs(List existingEnvs, List volumeList, String volumeNamePrefi /** * Creates the VolumeMounts used for authentication of Kafka client based components - * * @param authentication Authentication object from CRD * @param volumeMountList List where the volumes will be added * @param tlsVolumeMount Path where the TLS certs should be mounted @@ -164,7 +158,6 @@ public static void configureClientAuthenticationVolumeMounts(KafkaClientAuthenti /** * Creates the VolumeMounts used for authentication of Kafka client based components - * * @param authentication Authentication object from CRD * @param volumeMountList List where the volume mounts will be added * @param tlsVolumeMount Path where the TLS certs should be mounted diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java index de2d81d318..20db8a6799 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java @@ -12,6 +12,7 @@ import io.strimzi.certs.Subject; import io.strimzi.operator.cluster.ClusterOperator; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import java.io.IOException; import java.util.HashMap; @@ -34,11 +35,11 @@ public class ClusterCa extends Ca { private final Pattern ipv4Address = Pattern.compile("[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}"); - public ClusterCa(CertManager certManager, PasswordGenerator passwordGenerator, String clusterName, Secret caCertSecret, Secret caKeySecret) { - this(certManager, passwordGenerator, clusterName, caCertSecret, caKeySecret, 365, 30, true, null); + public ClusterCa(Reconciliation reconciliation, CertManager certManager, PasswordGenerator passwordGenerator, String clusterName, Secret caCertSecret, Secret caKeySecret) { + this(reconciliation, certManager, passwordGenerator, clusterName, caCertSecret, caKeySecret, 365, 30, true, null); } - public ClusterCa(CertManager certManager, + public ClusterCa(Reconciliation reconciliation, CertManager certManager, PasswordGenerator passwordGenerator, String clusterName, Secret clusterCaCert, @@ -47,12 +48,12 @@ public ClusterCa(CertManager certManager, int renewalDays, boolean generateCa, CertificateExpirationPolicy policy) { - super(certManager, passwordGenerator, "cluster-ca", + super(reconciliation, certManager, passwordGenerator, + "cluster-ca", AbstractModel.clusterCaCertSecretName(clusterName), forceRenewal(clusterCaCert, clusterCaKey, "cluster-ca.key"), AbstractModel.clusterCaKeySecretName(clusterName), - adapt060ClusterCaSecret(clusterCaKey), - validityDays, renewalDays, generateCa, policy); + adapt060ClusterCaSecret(clusterCaKey), validityDays, renewalDays, generateCa, policy); this.clusterName = clusterName; } @@ -143,8 +144,9 @@ public Map generateZkCerts(Kafka kafka, boolean isMaintenanc return subject; }; - log.debug("{}: Reconciling zookeeper certificates", this); + LOGGER.debugCr(reconciliation, "{}: Reconciling zookeeper certificates", this); return maybeCopyOrGenerateCerts( + reconciliation, kafka.getSpec().getZookeeper().getReplicas(), subjectFn, zkNodesSecret, @@ -153,7 +155,7 @@ public Map generateZkCerts(Kafka kafka, boolean isMaintenanc } public Map generateBrokerCerts(Kafka kafka, Set externalBootstrapAddresses, - Map> externalAddresses, boolean isMaintenanceTimeWindowsSatisfied) throws IOException { + Map> externalAddresses, boolean isMaintenanceTimeWindowsSatisfied) throws IOException { String cluster = kafka.getMetadata().getName(); String namespace = kafka.getMetadata().getNamespace(); @@ -202,8 +204,9 @@ public Map generateBrokerCerts(Kafka kafka, Set exte return subject; }; - log.debug("{}: Reconciling kafka broker certificates", this); + LOGGER.debugCr(reconciliation, "{}: Reconciling kafka broker certificates", this); return maybeCopyOrGenerateCerts( + reconciliation, kafka.getSpec().getKafka().getReplicas(), subjectFn, brokersSecret, diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java index ee128ab551..b537bcaf9a 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java @@ -44,6 +44,7 @@ import io.strimzi.operator.cluster.model.cruisecontrol.Capacity; import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; @@ -134,10 +135,11 @@ public class CruiseControl extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected CruiseControl(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected CruiseControl(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = CruiseControlResources.deploymentName(cluster); this.serviceName = CruiseControlResources.serviceName(cluster); this.ancillaryConfigMapName = metricAndLogConfigsName(cluster); @@ -171,13 +173,13 @@ protected static String defaultBootstrapServers(String cluster) { } @SuppressWarnings("deprecation") - public static CruiseControl fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) { + public static CruiseControl fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) { CruiseControl cruiseControl = null; CruiseControlSpec spec = kafkaAssembly.getSpec().getCruiseControl(); KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka(); if (spec != null) { - cruiseControl = new CruiseControl(kafkaAssembly); + cruiseControl = new CruiseControl(reconciliation, kafkaAssembly); cruiseControl.isDeployed = true; cruiseControl.setReplicas(DEFAULT_REPLICAS); @@ -201,9 +203,9 @@ public static CruiseControl fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup ver cruiseControl.tlsSidecarImage = tlsSideCarImage; cruiseControl.setTlsSidecar(tlsSidecar); - cruiseControl = updateConfiguration(spec, cruiseControl); + cruiseControl = cruiseControl.updateConfiguration(spec); - KafkaConfiguration configuration = new KafkaConfiguration(kafkaClusterSpec.getConfig().entrySet()); + KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet()); if (configuration.getConfigOption(MIN_INSYNC_REPLICAS) != null) { cruiseControl.minInsyncReplicas = configuration.getConfigOption(MIN_INSYNC_REPLICAS); } @@ -239,8 +241,8 @@ public static CruiseControl fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup ver return cruiseControl; } - public static CruiseControl updateConfiguration(CruiseControlSpec spec, CruiseControl cruiseControl) { - CruiseControlConfiguration userConfiguration = new CruiseControlConfiguration(spec.getConfig().entrySet()); + public CruiseControl updateConfiguration(CruiseControlSpec spec) { + CruiseControlConfiguration userConfiguration = new CruiseControlConfiguration(reconciliation, spec.getConfig().entrySet()); for (Map.Entry defaultEntry : CruiseControlConfiguration.getCruiseControlDefaultPropertiesMap().entrySet()) { if (userConfiguration.getConfigOption(defaultEntry.getKey()) == null) { userConfiguration.setConfigOption(defaultEntry.getKey(), defaultEntry.getValue()); @@ -248,8 +250,8 @@ public static CruiseControl updateConfiguration(CruiseControlSpec spec, CruiseCo } // Ensure that the configured anomaly.detection.goals are a sub-set of the default goals checkGoals(userConfiguration); - cruiseControl.setConfiguration(userConfiguration); - return cruiseControl; + this.setConfiguration(userConfiguration); + return this; } /** @@ -259,7 +261,7 @@ public static CruiseControl updateConfiguration(CruiseControlSpec spec, CruiseCo * @param configuration The configuration instance to be checked. * @throws UnsupportedOperationException If the configuration contains self.healing.goals configurations. */ - public static void checkGoals(CruiseControlConfiguration configuration) { + public void checkGoals(CruiseControlConfiguration configuration) { // If self healing goals are defined then these take precedence. // Right now, self.healing.goals must either be null or an empty list if (configuration.getConfigOption(CruiseControlConfigurationParameters.CRUISE_CONTROL_SELF_HEALING_CONFIG_KEY.toString()) != null) { @@ -285,7 +287,7 @@ public static void checkGoals(CruiseControlConfiguration configuration) { // If the anomaly detection goals contain goals which are not in the default goals then the CC startup // checks will fail, so we make the anomaly goals match the default goals configuration.setConfigOption(CruiseControlConfigurationParameters.CRUISE_CONTROL_ANOMALY_DETECTION_CONFIG_KEY.toString(), defaultGoalsString); - log.warn("Anomaly goals contained goals which are not in the configured default goals. Anomaly goals have " + + LOGGER.warnCr(reconciliation, "Anomaly goals contained goals which are not in the configured default goals. Anomaly goals have " + "been changed to match the specified default goals."); } } @@ -547,7 +549,7 @@ public Secret generateSecret(ClusterCa clusterCa, boolean isMaintenanceTimeWindo return null; } Secret secret = clusterCa.cruiseControlSecret(); - return ModelUtils.buildSecret(clusterCa, secret, namespace, CruiseControl.secretName(cluster), name, "cruise-control", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied); + return ModelUtils.buildSecret(reconciliation, clusterCa, secret, namespace, CruiseControl.secretName(cluster), name, "cruise-control", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied); } /** @@ -615,7 +617,7 @@ public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels oper .endSpec() .build(); - log.trace("Created network policy {}", networkPolicy); + LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy); return networkPolicy; } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java index 3bb6b50d82..d54a26284e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java @@ -8,6 +8,7 @@ import io.strimzi.api.kafka.model.CruiseControlSpec; import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlGoals; import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters; +import io.strimzi.operator.common.Reconciliation; import java.util.Arrays; import java.util.Collections; @@ -106,14 +107,15 @@ public class CruiseControlConfiguration extends AbstractConfiguration { * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public CruiseControlConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS); + public CruiseControlConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS); } - private CruiseControlConfiguration(String configuration, List forbiddenPrefixes) { - super(configuration, forbiddenPrefixes); + private CruiseControlConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes) { + super(reconciliation, configuration, forbiddenPrefixes); } public static Map getCruiseControlDefaultPropertiesMap() { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java index 36f82ebca5..90b241fca1 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java @@ -33,6 +33,7 @@ import io.strimzi.api.kafka.model.template.EntityOperatorTemplate; import io.strimzi.operator.cluster.ClusterOperatorConfig; import io.strimzi.operator.cluster.Main; +import io.strimzi.operator.common.Reconciliation; import java.io.BufferedReader; import java.io.IOException; @@ -79,8 +80,8 @@ public class EntityOperator extends AbstractModel { /** */ - protected EntityOperator(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected EntityOperator(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = entityOperatorName(cluster); this.replicas = EntityOperatorSpec.DEFAULT_REPLICAS; this.zookeeperConnect = defaultZookeeperConnect(cluster); @@ -137,21 +138,22 @@ public boolean isDeployed() { /** * Create a Entity Operator from given desired resource * + * @param reconciliation The reconciliation * @param kafkaAssembly desired resource with cluster configuration containing the Entity Operator one * @param versions The versions. * @return Entity Operator instance, null if not configured in the ConfigMap */ - public static EntityOperator fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) { + public static EntityOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) { EntityOperator result = null; EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator(); if (entityOperatorSpec != null) { - result = new EntityOperator(kafkaAssembly); + result = new EntityOperator(reconciliation, kafkaAssembly); result.setOwnerReference(kafkaAssembly); - EntityTopicOperator topicOperator = EntityTopicOperator.fromCrd(kafkaAssembly); - EntityUserOperator userOperator = EntityUserOperator.fromCrd(kafkaAssembly); + EntityTopicOperator topicOperator = EntityTopicOperator.fromCrd(reconciliation, kafkaAssembly); + EntityUserOperator userOperator = EntityUserOperator.fromCrd(reconciliation, kafkaAssembly); TlsSidecar tlsSidecar = entityOperatorSpec.getTlsSidecar(); if (entityOperatorSpec.getTemplate() != null) { @@ -217,7 +219,7 @@ protected String getDefaultLogConfigFileName() { public Deployment generateDeployment(boolean isOpenShift, Map annotations, ImagePullPolicy imagePullPolicy, List imagePullSecrets) { if (!isDeployed()) { - log.warn("Topic and/or User Operators not declared: Entity Operator will not be deployed"); + LOGGER.warnCr(reconciliation, "Topic and/or User Operators not declared: Entity Operator will not be deployed"); return null; } @@ -321,7 +323,7 @@ public Secret generateSecret(ClusterCa clusterCa, boolean isMaintenanceTimeWindo return null; } Secret secret = clusterCa.entityOperatorSecret(); - return ModelUtils.buildSecret(clusterCa, secret, namespace, EntityOperator.secretName(cluster), name, + return ModelUtils.buildSecret(reconciliation, clusterCa, secret, namespace, EntityOperator.secretName(cluster), name, "entity-operator", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied); } @@ -386,7 +388,7 @@ public Role generateRole(String ownerNamespace, String namespace) { ClusterRole cr = yamlReader.readValue(yaml, ClusterRole.class); rules = cr.getRules(); } catch (IOException e) { - log.error("Failed to read entity-operator ClusterRole.", e); + LOGGER.errorCr(reconciliation, "Failed to read entity-operator ClusterRole.", e); throw new RuntimeException(e); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java index 7f0c98dd0c..a3f6cda195 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java @@ -23,6 +23,7 @@ import io.strimzi.api.kafka.model.Probe; import io.strimzi.api.kafka.model.ProbeBuilder; import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.OrderedProperties; import java.util.ArrayList; @@ -76,10 +77,11 @@ public class EntityTopicOperator extends AbstractModel { protected SecurityContext templateContainerSecurityContext; /** + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected EntityTopicOperator(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected EntityTopicOperator(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = topicOperatorName(cluster); this.readinessPath = "/"; this.readinessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS; @@ -200,10 +202,11 @@ public String getAncillaryConfigMapKeyLogConfig() { /** * Create an Entity Topic Operator from given desired resource * + * @param reconciliation The reconciliation * @param kafkaAssembly desired resource with cluster configuration containing the Entity Topic Operator one * @return Entity Topic Operator instance, null if not configured in the ConfigMap */ - public static EntityTopicOperator fromCrd(Kafka kafkaAssembly) { + public static EntityTopicOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly) { EntityTopicOperator result = null; EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator(); if (entityOperatorSpec != null) { @@ -212,7 +215,7 @@ public static EntityTopicOperator fromCrd(Kafka kafkaAssembly) { if (topicOperatorSpec != null) { String namespace = kafkaAssembly.getMetadata().getNamespace(); - result = new EntityTopicOperator(kafkaAssembly); + result = new EntityTopicOperator(reconciliation, kafkaAssembly); result.setOwnerReference(kafkaAssembly); String image = topicOperatorSpec.getImage(); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java index 79f08ff31c..aaa5c99f21 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java @@ -24,6 +24,7 @@ import io.strimzi.api.kafka.model.Probe; import io.strimzi.api.kafka.model.ProbeBuilder; import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.OrderedProperties; import java.util.ArrayList; @@ -82,10 +83,11 @@ public class EntityUserOperator extends AbstractModel { protected SecurityContext templateContainerSecurityContext; /** + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected EntityUserOperator(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected EntityUserOperator(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = userOperatorName(cluster); this.readinessPath = "/"; this.livenessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS; @@ -210,10 +212,11 @@ public String getAncillaryConfigMapKeyLogConfig() { /** * Create an Entity User Operator from given desired resource * + * @param reconciliation The reconciliation * @param kafkaAssembly desired resource with cluster configuration containing the Entity User Operator one * @return Entity User Operator instance, null if not configured in the ConfigMap */ - public static EntityUserOperator fromCrd(Kafka kafkaAssembly) { + public static EntityUserOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly) { EntityUserOperator result = null; EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator(); if (entityOperatorSpec != null) { @@ -222,7 +225,7 @@ public static EntityUserOperator fromCrd(Kafka kafkaAssembly) { if (userOperatorSpec != null) { String namespace = kafkaAssembly.getMetadata().getNamespace(); - result = new EntityUserOperator(kafkaAssembly); + result = new EntityUserOperator(reconciliation, kafkaAssembly); result.setOwnerReference(kafkaAssembly); String image = userOperatorSpec.getImage(); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java index faec248918..c2c92b56b9 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java @@ -34,6 +34,7 @@ import io.strimzi.operator.cluster.model.components.JmxTransQueries; import io.strimzi.operator.cluster.model.components.JmxTransServer; import io.strimzi.operator.cluster.model.components.JmxTransServers; +import io.strimzi.operator.common.Reconciliation; import java.util.ArrayList; import java.util.Arrays; @@ -79,10 +80,11 @@ public class JmxTrans extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected JmxTrans(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected JmxTrans(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = JmxTransResources.deploymentName(cluster); this.clusterName = cluster; this.replicas = 1; @@ -99,7 +101,7 @@ protected JmxTrans(HasMetadata resource) { this.isMetricsEnabled = true; } - public static JmxTrans fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) { + public static JmxTrans fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) { JmxTrans result = null; JmxTransSpec spec = kafkaAssembly.getSpec().getJmxTrans(); if (spec != null) { @@ -107,10 +109,10 @@ public static JmxTrans fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions String error = String.format("Can't start up JmxTrans '%s' in '%s' as Kafka spec.kafka.jmxOptions is not specified", JmxTransResources.deploymentName(kafkaAssembly.getMetadata().getName()), kafkaAssembly.getMetadata().getNamespace()); - log.warn(error); + LOGGER.warnCr(reconciliation, error); throw new InvalidResourceException(error); } - result = new JmxTrans(kafkaAssembly); + result = new JmxTrans(reconciliation, kafkaAssembly); result.isDeployed = true; if (kafkaAssembly.getSpec().getKafka().getJmxOptions().getAuthentication() instanceof KafkaJmxAuthenticationPassword) { @@ -202,7 +204,7 @@ private String generateJMXConfig(JmxTransSpec spec, int numOfBrokers) throws Jso try { return mapper.writeValueAsString(servers); } catch (JsonProcessingException e) { - log.error("Could not create JmxTrans config json because: " + e.getMessage()); + LOGGER.errorCr(reconciliation, "Could not create JmxTrans config json because: " + e.getMessage()); throw e; } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java index af728ac28d..55890842f9 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java @@ -32,6 +32,7 @@ import io.strimzi.api.kafka.model.template.KafkaBridgeTemplate; import io.strimzi.api.kafka.model.tracing.Tracing; import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; @@ -120,10 +121,11 @@ public class KafkaBridgeCluster extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected KafkaBridgeCluster(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected KafkaBridgeCluster(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = KafkaBridgeResources.deploymentName(cluster); this.serviceName = KafkaBridgeResources.serviceName(cluster); this.ancillaryConfigMapName = KafkaBridgeResources.metricsAndLogConfigMapName(cluster); @@ -139,9 +141,9 @@ protected KafkaBridgeCluster(HasMetadata resource) { this.logAndMetricsConfigMountPath = "/opt/strimzi/custom-config/"; } - public static KafkaBridgeCluster fromCrd(KafkaBridge kafkaBridge, KafkaVersion.Lookup versions) { + public static KafkaBridgeCluster fromCrd(Reconciliation reconciliation, KafkaBridge kafkaBridge, KafkaVersion.Lookup versions) { - KafkaBridgeCluster kafkaBridgeCluster = new KafkaBridgeCluster(kafkaBridge); + KafkaBridgeCluster kafkaBridgeCluster = new KafkaBridgeCluster(reconciliation, kafkaBridge); KafkaBridgeSpec spec = kafkaBridge.getSpec(); kafkaBridgeCluster.tracing = spec.getTracing(); @@ -172,7 +174,10 @@ public static KafkaBridgeCluster fromCrd(KafkaBridge kafkaBridge, KafkaVersion.L kafkaBridgeCluster.setTls(spec.getTls() != null ? spec.getTls() : null); - AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null); + String warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null); + if (!warnMsg.isEmpty()) { + LOGGER.warnCr(reconciliation, warnMsg); + } kafkaBridgeCluster.setAuthentication(spec.getAuthentication()); if (spec.getTemplate() != null) { @@ -207,7 +212,7 @@ public static KafkaBridgeCluster fromCrd(KafkaBridge kafkaBridge, KafkaVersion.L kafkaBridgeCluster.setHttpEnabled(true); kafkaBridgeCluster.setKafkaBridgeHttpConfig(spec.getHttp()); } else { - log.warn("No protocol specified."); + LOGGER.warnCr(reconciliation, "No protocol specified."); throw new InvalidResourceException("No protocol for communication with Bridge specified. Use HTTP."); } kafkaBridgeCluster.setOwnerReference(kafkaBridge); @@ -355,8 +360,8 @@ protected List getEnvVars() { } varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_BOOTSTRAP_SERVERS, bootstrapServers)); - varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_CONSUMER_CONFIG, kafkaBridgeConsumer == null ? "" : new KafkaBridgeConsumerConfiguration(kafkaBridgeConsumer.getConfig().entrySet()).getConfiguration())); - varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_PRODUCER_CONFIG, kafkaBridgeProducer == null ? "" : new KafkaBridgeProducerConfiguration(kafkaBridgeProducer.getConfig().entrySet()).getConfiguration())); + varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_CONSUMER_CONFIG, kafkaBridgeConsumer == null ? "" : new KafkaBridgeConsumerConfiguration(reconciliation, kafkaBridgeConsumer.getConfig().entrySet()).getConfiguration())); + varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_PRODUCER_CONFIG, kafkaBridgeProducer == null ? "" : new KafkaBridgeProducerConfiguration(reconciliation, kafkaBridgeProducer.getConfig().entrySet()).getConfiguration())); varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_ID, cluster)); varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_HTTP_ENABLED, String.valueOf(httpEnabled))); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java index 89f8f5423d..8127407123 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java @@ -6,6 +6,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.KafkaBridgeConsumerSpec; +import io.strimzi.operator.common.Reconciliation; import java.util.HashMap; import java.util.List; @@ -30,9 +31,10 @@ public class KafkaBridgeConsumerConfiguration extends AbstractConfiguration { * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public KafkaBridgeConsumerConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public KafkaBridgeConsumerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java index 58cd8486df..e146c4debb 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java @@ -6,6 +6,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.KafkaBridgeProducerSpec; +import io.strimzi.operator.common.Reconciliation; import java.util.HashMap; import java.util.List; @@ -30,9 +31,10 @@ public class KafkaBridgeProducerConfiguration extends AbstractConfiguration { * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public KafkaBridgeProducerConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public KafkaBridgeProducerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java index 293911e1e2..5ac6e31ad4 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java @@ -78,6 +78,7 @@ import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters; import io.strimzi.operator.common.Annotations; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.resource.StatusUtils; @@ -237,10 +238,11 @@ public class KafkaCluster extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - private KafkaCluster(HasMetadata resource) { - super(resource, APPLICATION_NAME); + private KafkaCluster(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = kafkaClusterName(cluster); this.serviceName = serviceName(cluster); this.headlessServiceName = headlessServiceName(cluster); @@ -358,13 +360,13 @@ public static String clientsCaCertSecretName(String cluster) { return KafkaResources.clientsCaCertificateSecretName(cluster); } - public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) { - return fromCrd(kafkaAssembly, versions, null, 0); + public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) { + return fromCrd(reconciliation, kafkaAssembly, versions, null, 0); } @SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS", "deprecation"}) - public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) { - KafkaCluster result = new KafkaCluster(kafkaAssembly); + public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) { + KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly); result.setOwnerReference(kafkaAssembly); @@ -413,9 +415,9 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers // Handle Kafka broker configuration KafkaVersion desiredVersion = versions.version(kafkaClusterSpec.getVersion()); - KafkaConfiguration configuration = new KafkaConfiguration(kafkaClusterSpec.getConfig().entrySet()); + KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet()); configureCruiseControlMetrics(kafkaAssembly, result, configuration); - validateConfiguration(kafkaAssembly, desiredVersion, configuration); + validateConfiguration(reconciliation, kafkaAssembly, desiredVersion, configuration); result.setConfiguration(configuration); // Parse different types of metrics configurations @@ -425,15 +427,15 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers Storage newStorage = kafkaClusterSpec.getStorage(); AbstractModel.validatePersistentStorage(newStorage); - StorageDiff diff = new StorageDiff(oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas()); + StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas()); if (!diff.isEmpty()) { - log.warn("Only the following changes to Kafka storage are allowed: " + + LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " + "changing the deleteClaim flag, " + "adding volumes to Jbod storage or removing volumes from Jbod storage, " + "changing overrides to nodes which do not exist yet" + "and increasing size of persistent claim volumes (depending on the volume type and used storage class)."); - log.warn("The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + + LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " + "result, all storage changes will be ignored. Use DEBUG level logging for more information " + "about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName()); @@ -457,11 +459,11 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers // Configure listeners if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().getGenericKafkaListeners() == null) { - log.error("The required field .spec.kafka.listeners is missing"); + LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing"); throw new InvalidResourceException("The required field .spec.kafka.listeners is missing"); } List listeners = kafkaClusterSpec.getListeners().getGenericKafkaListeners(); - ListenersValidator.validate(kafkaClusterSpec.getReplicas(), listeners); + ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners); result.setListeners(listeners); // Set authorization @@ -471,7 +473,7 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers } else { KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization(); if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) { - log.error("Keycloak Authorization: Token Endpoint URI and clientId are both required"); + LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required"); throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required"); } } @@ -626,16 +628,17 @@ private static void configureCruiseControlMetrics(Kafka kafkaAssembly, KafkaClus /** * Validates the Kafka broker configuration against the configuration options of the desired Kafka version. * + * @param reconciliation The reconciliation * @param kafkaAssembly Kafka custom resource * @param desiredVersion Desired Kafka version * @param configuration Kafka broker configuration */ - private static void validateConfiguration(Kafka kafkaAssembly, KafkaVersion desiredVersion, KafkaConfiguration configuration) { + private static void validateConfiguration(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion desiredVersion, KafkaConfiguration configuration) { List errorsInConfig = configuration.validate(desiredVersion); if (!errorsInConfig.isEmpty()) { for (String error : errorsInConfig) { - log.warn("Kafka {}/{} has invalid spec.kafka.config: {}", + LOGGER.warnCr(reconciliation, "Kafka {}/{} has invalid spec.kafka.config: {}", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName(), error); @@ -670,19 +673,18 @@ protected static void validateIntConfigProperty(String propertyName, KafkaCluste * @param externalBootstrapDnsName The set of DNS names for bootstrap service (should be appended to every broker certificate) * @param externalDnsNames The list of DNS names for broker pods (should be appended only to specific certificates for given broker) * @param isMaintenanceTimeWindowsSatisfied Indicates whether we are in the maintenance window or not. - * This is used for certificate renewals */ public void generateCertificates(Kafka kafka, ClusterCa clusterCa, Set externalBootstrapDnsName, - Map> externalDnsNames, boolean isMaintenanceTimeWindowsSatisfied) { - log.debug("Generating certificates"); + Map> externalDnsNames, boolean isMaintenanceTimeWindowsSatisfied) { + LOGGER.debugCr(reconciliation, "Generating certificates"); try { brokerCerts = clusterCa.generateBrokerCerts(kafka, externalBootstrapDnsName, externalDnsNames, isMaintenanceTimeWindowsSatisfied); } catch (IOException e) { - log.warn("Error while generating certificates", e); + LOGGER.warnCr(reconciliation, "Error while generating certificates", e); } - log.debug("End generating certificates"); + LOGGER.debugCr(reconciliation, "End generating certificates"); } /** @@ -845,7 +847,8 @@ public List generateExternalServices(int pod) { String serviceName = ListenersUtils.backwardsCompatibleBrokerServiceName(cluster, pod, listener); List ports = Collections.singletonList( - createServicePort(ListenersUtils.backwardsCompatiblePortName(listener), + createServicePort( + ListenersUtils.backwardsCompatiblePortName(listener), listener.getPort(), listener.getPort(), ListenersUtils.brokerNodePort(listener, pod), @@ -1801,7 +1804,7 @@ public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels oper .endSpec() .build(); - log.trace("Created network policy {}", networkPolicy); + LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy); return networkPolicy; } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java index ba231e189e..cfc430526a 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java @@ -10,6 +10,7 @@ import io.strimzi.kafka.config.model.ConfigModel; import io.strimzi.kafka.config.model.ConfigModels; import io.strimzi.kafka.config.model.Scope; +import io.strimzi.operator.common.Reconciliation; import java.io.IOException; import java.io.InputStream; @@ -43,35 +44,40 @@ public class KafkaConfiguration extends AbstractConfiguration { * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public KafkaConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS); + public KafkaConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS); } - private KafkaConfiguration(String configuration, List forbiddenPrefixes) { - super(configuration, forbiddenPrefixes); + private KafkaConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes) { + super(reconciliation, configuration, forbiddenPrefixes); } /** * Returns a KafkaConfiguration created without forbidden option filtering. + * + * @param reconciliation The reconciliation * @param string A string representation of the Properties * @return The KafkaConfiguration */ - public static KafkaConfiguration unvalidated(String string) { - return new KafkaConfiguration(string, emptyList()); + public static KafkaConfiguration unvalidated(Reconciliation reconciliation, String string) { + return new KafkaConfiguration(reconciliation, string, emptyList()); } /** * Returns a KafkaConfiguration created without forbidden option filtering. + * + * @param reconciliation The reconciliation * @param map A map representation of the Properties * @return The KafkaConfiguration */ - public static KafkaConfiguration unvalidated(Map map) { + public static KafkaConfiguration unvalidated(Reconciliation reconciliation, Map map) { StringBuilder string = new StringBuilder(); map.entrySet().forEach(entry -> string.append(entry.getKey() + "=" + entry.getValue() + "\n")); - return new KafkaConfiguration(string.toString(), emptyList()); + return new KafkaConfiguration(reconciliation, string.toString(), emptyList()); } /** diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java index 26df397573..fb6115f0a4 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java @@ -33,6 +33,7 @@ import io.strimzi.api.kafka.model.template.KafkaConnectTemplate; import io.strimzi.operator.cluster.ClusterOperatorConfig; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; import java.util.ArrayList; import java.util.Arrays; @@ -57,10 +58,11 @@ public class KafkaConnectBuild extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected KafkaConnectBuild(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected KafkaConnectBuild(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = KafkaConnectResources.buildPodName(cluster); this.image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE, DEFAULT_KANIKO_EXECUTOR_IMAGE); } @@ -68,12 +70,13 @@ protected KafkaConnectBuild(HasMetadata resource) { /** * Created the KafkaConnectBuild instance from the Kafka Connect Custom Resource * + * @param reconciliation The reconciliation * @param kafkaConnect Kafka Connect CR with the build configuration * @param versions Kafka versions configuration * @return Instance of KafkaConnectBuild class */ - public static KafkaConnectBuild fromCrd(KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) { - KafkaConnectBuild build = new KafkaConnectBuild(kafkaConnect); + public static KafkaConnectBuild fromCrd(Reconciliation reconciliation, KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) { + KafkaConnectBuild build = new KafkaConnectBuild(reconciliation, kafkaConnect); KafkaConnectSpec spec = kafkaConnect.getSpec(); if (spec == null) { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java index d3e7648955..4c9e7d091e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java @@ -60,6 +60,7 @@ import io.strimzi.api.kafka.model.tracing.Tracing; import io.strimzi.operator.cluster.ClusterOperatorConfig; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; @@ -143,20 +144,22 @@ public class KafkaConnectCluster extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected KafkaConnectCluster(HasMetadata resource) { - this(resource, APPLICATION_NAME); + protected KafkaConnectCluster(Reconciliation reconciliation, HasMetadata resource) { + this(reconciliation, resource, APPLICATION_NAME); } /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name * @param applicationName configurable allow other classes to extend this class */ - protected KafkaConnectCluster(HasMetadata resource, String applicationName) { - super(resource, applicationName); + protected KafkaConnectCluster(Reconciliation reconciliation, HasMetadata resource, String applicationName) { + super(reconciliation, resource, applicationName); this.name = KafkaConnectResources.deploymentName(cluster); this.serviceName = KafkaConnectResources.serviceName(cluster); this.ancillaryConfigMapName = KafkaConnectResources.metricsAndLogConfigMapName(cluster); @@ -172,10 +175,10 @@ protected KafkaConnectCluster(HasMetadata resource, String applicationName) { this.logAndMetricsConfigMountPath = "/opt/kafka/custom-config/"; } - public static KafkaConnectCluster fromCrd(KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) { + public static KafkaConnectCluster fromCrd(Reconciliation reconciliation, KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) { - KafkaConnectCluster cluster = fromSpec(kafkaConnect.getSpec(), versions, - new KafkaConnectCluster(kafkaConnect)); + KafkaConnectCluster cluster = fromSpec(reconciliation, kafkaConnect.getSpec(), versions, + new KafkaConnectCluster(reconciliation, kafkaConnect)); cluster.setOwnerReference(kafkaConnect); @@ -188,7 +191,8 @@ public static KafkaConnectCluster fromCrd(KafkaConnect kafkaConnect, KafkaVersio * thus permitting reuse of the setter-calling code for subclasses. */ @SuppressWarnings("deprecation") - protected static C fromSpec(KafkaConnectSpec spec, + protected static C fromSpec(Reconciliation reconciliation, + KafkaConnectSpec spec, KafkaVersion.Lookup versions, C kafkaConnect) { kafkaConnect.setReplicas(spec.getReplicas() != null && spec.getReplicas() >= 0 ? spec.getReplicas() : DEFAULT_REPLICAS); @@ -196,7 +200,7 @@ protected static C fromSpec(KafkaConnectSpec spe AbstractConfiguration config = kafkaConnect.getConfiguration(); if (config == null) { - config = new KafkaConnectConfiguration(spec.getConfig().entrySet()); + config = new KafkaConnectConfiguration(reconciliation, spec.getConfig().entrySet()); kafkaConnect.setConfiguration(config); } if (kafkaConnect.tracing != null) { @@ -246,7 +250,10 @@ protected static C fromSpec(KafkaConnectSpec spe kafkaConnect.setBootstrapServers(spec.getBootstrapServers()); kafkaConnect.setTls(spec.getTls()); - AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null); + String warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null); + if (!warnMsg.isEmpty()) { + LOGGER.warnCr(reconciliation, warnMsg); + } kafkaConnect.setAuthentication(spec.getAuthentication()); if (spec.getTemplate() != null) { @@ -371,7 +378,7 @@ private List getExternalConfigurationVolumes(boolean isOpenShift) { if (name != null) { if (volume.getConfigMap() != null && volume.getSecret() != null) { - log.warn("Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name); + LOGGER.warnCr(reconciliation, "Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name); } else { if (volume.getConfigMap() != null) { ConfigMapVolumeSource source = volume.getConfigMap(); @@ -443,7 +450,7 @@ private List getExternalConfigurationVolumeMounts() { if (name != null) { if (volume.getConfigMap() != null && volume.getSecret() != null) { - log.warn("Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name); + LOGGER.warnCr(reconciliation, "Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name); } else if (volume.getConfigMap() != null || volume.getSecret() != null) { VolumeMount volumeMount = new VolumeMountBuilder() .withName(VolumeUtils.getValidVolumeName(EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + name)) @@ -628,7 +635,7 @@ private List getExternalConfigurationEnvVars() { if (valueFrom != null) { if (valueFrom.getConfigMapKeyRef() != null && valueFrom.getSecretKeyRef() != null) { - log.warn("Environment variable {} with external Kafka Connect configuration has to contain exactly one reference to either ConfigMap or Secret", name); + LOGGER.warnCr(reconciliation, "Environment variable {} with external Kafka Connect configuration has to contain exactly one reference to either ConfigMap or Secret", name); } else { if (valueFrom.getConfigMapKeyRef() != null) { EnvVarSource envVarSource = new EnvVarSourceBuilder() @@ -646,7 +653,7 @@ private List getExternalConfigurationEnvVars() { } } } else { - log.warn("Name of an environment variable with external Kafka Connect configuration cannot start with `KAFKA_` or `STRIMZI`."); + LOGGER.warnCr(reconciliation, "Name of an environment variable with external Kafka Connect configuration cannot start with `KAFKA_` or `STRIMZI`."); } } @@ -668,6 +675,7 @@ protected String getDefaultLogConfigFileName() { /** * Set the bootstrap servers to connect to + * * @param bootstrapServers bootstrap servers comma separated list */ protected void setBootstrapServers(String bootstrapServers) { @@ -834,7 +842,7 @@ public NetworkPolicy generateNetworkPolicy(boolean connectorOperatorEnabled, .endSpec() .build(); - log.trace("Created network policy {}", networkPolicy); + LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy); return networkPolicy; } else { return null; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java index 6b820d97d2..eb43d43885 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java @@ -6,6 +6,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.KafkaConnectSpec; +import io.strimzi.operator.common.Reconciliation; import java.util.HashMap; import java.util.List; @@ -36,9 +37,10 @@ public class KafkaConnectConfiguration extends AbstractConfiguration { * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public KafkaConnectConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public KafkaConnectConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java index f7c5c53869..4e3ed8da80 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java @@ -31,6 +31,7 @@ import io.strimzi.api.kafka.model.KafkaConnectS2I; import io.strimzi.api.kafka.model.KafkaConnectS2IResources; import io.strimzi.api.kafka.model.KafkaConnectS2ISpec; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import java.util.List; @@ -50,15 +51,15 @@ public class KafkaConnectS2ICluster extends KafkaConnectCluster { * * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - private KafkaConnectS2ICluster(HasMetadata resource) { - super(resource, APPLICATION_NAME); + private KafkaConnectS2ICluster(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); } // Deprecation is suppressed because of KafkaConnectS2I @SuppressWarnings("deprecation") - public static KafkaConnectS2ICluster fromCrd(KafkaConnectS2I kafkaConnectS2I, KafkaVersion.Lookup versions) { + public static KafkaConnectS2ICluster fromCrd(Reconciliation reconciliation, KafkaConnectS2I kafkaConnectS2I, KafkaVersion.Lookup versions) { KafkaConnectS2ISpec spec = kafkaConnectS2I.getSpec(); - KafkaConnectS2ICluster cluster = fromSpec(spec, versions, new KafkaConnectS2ICluster(kafkaConnectS2I)); + KafkaConnectS2ICluster cluster = fromSpec(reconciliation, spec, versions, new KafkaConnectS2ICluster(reconciliation, kafkaConnectS2I)); if (spec.getBuild() != null) { throw new InvalidResourceException(".spec.build can be used only with KafkaConnect and is not supported with KafkaConnectS2I."); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java index dd271e821b..4d8b961756 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java @@ -27,6 +27,7 @@ import io.strimzi.api.kafka.model.ProbeBuilder; import io.strimzi.api.kafka.model.template.KafkaExporterTemplate; import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.common.Reconciliation; import java.util.ArrayList; import java.util.Collections; @@ -68,10 +69,11 @@ public class KafkaExporter extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected KafkaExporter(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected KafkaExporter(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = KafkaExporterResources.deploymentName(cluster); this.replicas = 1; this.readinessPath = "/metrics"; @@ -87,8 +89,8 @@ protected KafkaExporter(HasMetadata resource) { } - public static KafkaExporter fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) { - KafkaExporter kafkaExporter = new KafkaExporter(kafkaAssembly); + public static KafkaExporter fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) { + KafkaExporter kafkaExporter = new KafkaExporter(reconciliation, kafkaAssembly); KafkaExporterSpec spec = kafkaAssembly.getSpec().getKafkaExporter(); if (spec != null) { @@ -318,7 +320,7 @@ public Secret generateSecret(ClusterCa clusterCa, boolean isMaintenanceTimeWindo return null; } Secret secret = clusterCa.kafkaExporterSecret(); - return ModelUtils.buildSecret(clusterCa, secret, namespace, KafkaExporter.secretName(cluster), name, + return ModelUtils.buildSecret(reconciliation, clusterCa, secret, namespace, KafkaExporter.secretName(cluster), name, "kafka-exporter", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java index e7efe36063..2072432ef9 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java @@ -25,6 +25,7 @@ import io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationPlain; import io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationScramSha512; import io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationTls; +import io.strimzi.operator.common.Reconciliation; import java.util.List; import java.util.Map.Entry; @@ -56,10 +57,11 @@ public class KafkaMirrorMaker2Cluster extends KafkaConnectCluster { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - private KafkaMirrorMaker2Cluster(HasMetadata resource) { - super(resource, APPLICATION_NAME); + private KafkaMirrorMaker2Cluster(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = KafkaMirrorMaker2Resources.deploymentName(cluster); this.serviceName = KafkaMirrorMaker2Resources.serviceName(cluster); this.ancillaryConfigMapName = KafkaMirrorMaker2Resources.metricsAndLogConfigMapName(cluster); @@ -68,13 +70,15 @@ private KafkaMirrorMaker2Cluster(HasMetadata resource) { /** * Creates instance of KafkaMirrorMaker2Cluster from CRD definition. * + * @param reconciliation The reconciliation * @param kafkaMirrorMaker2 The Custom Resource based on which the cluster model should be created. * @param versions The image versions for MirrorMaker 2.0 clusters. * @return The MirrorMaker 2.0 cluster model. */ - public static KafkaMirrorMaker2Cluster fromCrd(KafkaMirrorMaker2 kafkaMirrorMaker2, + public static KafkaMirrorMaker2Cluster fromCrd(Reconciliation reconciliation, + KafkaMirrorMaker2 kafkaMirrorMaker2, KafkaVersion.Lookup versions) { - KafkaMirrorMaker2Cluster cluster = new KafkaMirrorMaker2Cluster(kafkaMirrorMaker2); + KafkaMirrorMaker2Cluster cluster = new KafkaMirrorMaker2Cluster(reconciliation, kafkaMirrorMaker2); KafkaMirrorMaker2Spec spec = kafkaMirrorMaker2.getSpec(); cluster.setOwnerReference(kafkaMirrorMaker2); @@ -95,8 +99,8 @@ public static KafkaMirrorMaker2Cluster fromCrd(KafkaMirrorMaker2 kafkaMirrorMake .findFirst() .orElseThrow(() -> new InvalidResourceException("connectCluster with alias " + connectClusterAlias + " cannot be found in the list of clusters at spec.clusters")); } - cluster.setConfiguration(new KafkaMirrorMaker2Configuration(connectCluster.getConfig().entrySet())); - return fromSpec(buildKafkaConnectSpec(spec, connectCluster), versions, cluster); + cluster.setConfiguration(new KafkaMirrorMaker2Configuration(reconciliation, connectCluster.getConfig().entrySet())); + return fromSpec(reconciliation, buildKafkaConnectSpec(spec, connectCluster), versions, cluster); } @SuppressWarnings("deprecation") @@ -211,7 +215,7 @@ private String buildClusterVolumeMountPath(final String baseVolumeMount, final @SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity"}) @Override protected List getEnvVars() { - List varList = super.getEnvVars(); + List varList = super.getEnvVars(); final StringBuilder clusterAliases = new StringBuilder(); final StringBuilder clustersTrustedCerts = new StringBuilder(); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java index b71be67563..f5c1f8aeec 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java @@ -6,6 +6,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.KafkaMirrorMaker2ClusterSpec; +import io.strimzi.operator.common.Reconciliation; import java.util.HashMap; import java.util.List; @@ -39,10 +40,11 @@ public class KafkaMirrorMaker2Configuration extends AbstractConfiguration { * Constructor used to instantiate this class from JsonObject. Should be used to * create configuration from ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value * pairs. */ - public KafkaMirrorMaker2Configuration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public KafkaMirrorMaker2Configuration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java index bee26ab4d4..57d0b0da45 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java @@ -27,12 +27,14 @@ import io.strimzi.api.kafka.model.ProbeBuilder; import io.strimzi.api.kafka.model.template.KafkaMirrorMakerTemplate; import io.strimzi.api.kafka.model.tracing.Tracing; +import io.strimzi.operator.common.Reconciliation; import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +@SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity"}) public class KafkaMirrorMakerCluster extends AbstractModel { protected static final String APPLICATION_NAME = "kafka-mirror-maker"; @@ -103,10 +105,11 @@ public class KafkaMirrorMakerCluster extends AbstractModel { /** * Constructor * + * @param reconciliation The reconciliation * @param resource Kubernetes resource with metadata containing the namespace and cluster name */ - protected KafkaMirrorMakerCluster(HasMetadata resource) { - super(resource, APPLICATION_NAME); + protected KafkaMirrorMakerCluster(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, APPLICATION_NAME); this.name = KafkaMirrorMakerResources.deploymentName(cluster); this.serviceName = KafkaMirrorMakerResources.serviceName(cluster); this.ancillaryConfigMapName = KafkaMirrorMakerResources.metricsAndLogConfigMapName(cluster); @@ -122,8 +125,8 @@ protected KafkaMirrorMakerCluster(HasMetadata resource) { } @SuppressWarnings("deprecation") - public static KafkaMirrorMakerCluster fromCrd(KafkaMirrorMaker kafkaMirrorMaker, KafkaVersion.Lookup versions) { - KafkaMirrorMakerCluster kafkaMirrorMakerCluster = new KafkaMirrorMakerCluster(kafkaMirrorMaker); + public static KafkaMirrorMakerCluster fromCrd(Reconciliation reconciliation, KafkaMirrorMaker kafkaMirrorMaker, KafkaVersion.Lookup versions) { + KafkaMirrorMakerCluster kafkaMirrorMakerCluster = new KafkaMirrorMakerCluster(reconciliation, kafkaMirrorMaker); KafkaMirrorMakerSpec spec = kafkaMirrorMaker.getSpec(); if (spec != null) { @@ -144,14 +147,20 @@ public static KafkaMirrorMakerCluster fromCrd(KafkaMirrorMaker kafkaMirrorMaker, if (include == null && whitelist == null) { throw new InvalidResourceException("One of the fields include or whitelist needs to be specified."); } else if (whitelist != null && include != null) { - log.warn("Both include and whitelist fields are present. Whitelist is deprecated and will be ignored."); + LOGGER.warnCr(reconciliation, "Both include and whitelist fields are present. Whitelist is deprecated and will be ignored."); } kafkaMirrorMakerCluster.setInclude(include != null ? include : whitelist); - AuthenticationUtils.validateClientAuthentication(spec.getProducer().getAuthentication(), spec.getProducer().getTls() != null); + String warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getProducer().getAuthentication(), spec.getProducer().getTls() != null); + if (!warnMsg.isEmpty()) { + LOGGER.warnCr(reconciliation, warnMsg); + } kafkaMirrorMakerCluster.setProducer(spec.getProducer()); - AuthenticationUtils.validateClientAuthentication(spec.getConsumer().getAuthentication(), spec.getConsumer().getTls() != null); + warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getConsumer().getAuthentication(), spec.getConsumer().getTls() != null); + if (!warnMsg.isEmpty()) { + LOGGER.warnCr(reconciliation, warnMsg); + } kafkaMirrorMakerCluster.setConsumer(spec.getConsumer()); kafkaMirrorMakerCluster.setImage(versions.kafkaMirrorMakerImage(spec.getImage(), spec.getVersion())); @@ -307,8 +316,8 @@ protected List getContainers(ImagePullPolicy imagePullPolicy) { return containers; } - private KafkaMirrorMakerConsumerConfiguration getConsumerConfiguration() { - KafkaMirrorMakerConsumerConfiguration config = new KafkaMirrorMakerConsumerConfiguration(consumer.getConfig().entrySet()); + private KafkaMirrorMakerConsumerConfiguration getConsumerConfiguration() { + KafkaMirrorMakerConsumerConfiguration config = new KafkaMirrorMakerConsumerConfiguration(reconciliation, consumer.getConfig().entrySet()); if (tracing != null) { config.setConfigOption("interceptor.classes", "io.opentracing.contrib.kafka.TracingConsumerInterceptor"); @@ -318,7 +327,7 @@ private KafkaMirrorMakerConsumerConfiguration getConsumerConfiguration() { } private KafkaMirrorMakerProducerConfiguration getProducerConfiguration() { - KafkaMirrorMakerProducerConfiguration config = new KafkaMirrorMakerProducerConfiguration(producer.getConfig().entrySet()); + KafkaMirrorMakerProducerConfiguration config = new KafkaMirrorMakerProducerConfiguration(reconciliation, producer.getConfig().entrySet()); if (tracing != null) { config.setConfigOption("interceptor.classes", "io.opentracing.contrib.kafka.TracingProducerInterceptor"); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java index ff6e331ef7..8d54acb614 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java @@ -6,6 +6,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.KafkaMirrorMakerConsumerSpec; +import io.strimzi.operator.common.Reconciliation; import java.util.HashMap; import java.util.List; @@ -29,9 +30,10 @@ public class KafkaMirrorMakerConsumerConfiguration extends AbstractConfiguration * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public KafkaMirrorMakerConsumerConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public KafkaMirrorMakerConsumerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java index 0a087841f5..5d6877af90 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java @@ -6,6 +6,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.api.kafka.model.KafkaMirrorMakerProducerSpec; +import io.strimzi.operator.common.Reconciliation; import java.util.HashMap; import java.util.List; @@ -29,9 +30,10 @@ public class KafkaMirrorMakerProducerConfiguration extends AbstractConfiguration * Constructor used to instantiate this class from JsonObject. Should be used to create configuration from * ConfigMap / CRD. * + * @param reconciliation The reconciliation * @param jsonOptions Json object with configuration options as key ad value pairs. */ - public KafkaMirrorMakerProducerConfiguration(Iterable> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public KafkaMirrorMakerProducerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java index 53d513738a..2d504e2509 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java @@ -11,8 +11,8 @@ import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBroker; import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType; import io.strimzi.kafka.oauth.jsonpath.JsonPathFilterQuery; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import java.util.HashSet; import java.util.List; @@ -26,7 +26,7 @@ * Util methods for validating Kafka listeners */ public class ListenersValidator { - protected static final Logger LOG = LogManager.getLogger(ListenersValidator.class.getName()); + protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ListenersValidator.class.getName()); private final static Pattern LISTENER_NAME_PATTERN = Pattern.compile(GenericKafkaListener.LISTENER_NAME_REGEX); public final static List FORBIDDEN_PORTS = List.of(9404, 9999); public final static int LOWEST_ALLOWED_PORT_NUMBER = 9092; @@ -34,14 +34,15 @@ public class ListenersValidator { /** * Validated the listener configuration. If the configuration is not valid, InvalidResourceException will be thrown. * + * @param reconciliation The reconciliation * @param replicas Number of replicas (required for Ingress validation) * @param listeners Listeners which should be validated */ - public static void validate(int replicas, List listeners) throws InvalidResourceException { + public static void validate(Reconciliation reconciliation, int replicas, List listeners) throws InvalidResourceException { Set errors = validateAndGetErrorMessages(replicas, listeners); if (!errors.isEmpty()) { - LOG.error("Listener configuration is not valid: {}", errors); + LOGGER.errorCr(reconciliation, "Listener configuration is not valid: {}", errors); throw new InvalidResourceException("Listener configuration is not valid: " + errors); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java index b3b6ac2229..96add9bd82 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java @@ -35,10 +35,10 @@ import io.strimzi.api.kafka.model.template.PodTemplate; import io.strimzi.certs.CertAndKey; import io.strimzi.operator.cluster.KafkaUpgradeException; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -60,7 +60,7 @@ public class ModelUtils { private ModelUtils() {} - protected static final Logger log = LogManager.getLogger(ModelUtils.class.getName()); + protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ModelUtils.class.getName()); public static final String TLS_SIDECAR_LOG_LEVEL = "TLS_SIDECAR_LOG_LEVEL"; /** @@ -128,8 +128,8 @@ static EnvVar tlsSidecarLogEnvVar(TlsSidecar tlsSidecar) { tlsSidecar.getLogLevel() : TlsSidecarLogLevel.NOTICE).toValue()); } - public static Secret buildSecret(ClusterCa clusterCa, Secret secret, String namespace, String secretName, - String commonName, String keyCertName, Labels labels, OwnerReference ownerReference, boolean isMaintenanceTimeWindowsSatisfied) { + public static Secret buildSecret(Reconciliation reconciliation, ClusterCa clusterCa, Secret secret, String namespace, String secretName, + String commonName, String keyCertName, Labels labels, OwnerReference ownerReference, boolean isMaintenanceTimeWindowsSatisfied) { Map data = new HashMap<>(4); CertAndKey certAndKey = null; boolean shouldBeRegenerated = false; @@ -146,15 +146,15 @@ public static Secret buildSecret(ClusterCa clusterCa, Secret secret, String name } if (shouldBeRegenerated) { - log.debug("Certificate for pod {} need to be regenerated because: {}", keyCertName, String.join(", ", reasons)); + LOGGER.debugCr(reconciliation, "Certificate for pod {} need to be regenerated because: {}", keyCertName, String.join(", ", reasons)); try { certAndKey = clusterCa.generateSignedCert(commonName, Ca.IO_STRIMZI); } catch (IOException e) { - log.warn("Error while generating certificates", e); + LOGGER.warnCr(reconciliation, "Error while generating certificates", e); } - log.debug("End generating certificates"); + LOGGER.debugCr(reconciliation, "End generating certificates"); } else { if (secret.getData().get(keyCertName + ".p12") != null && !secret.getData().get(keyCertName + ".p12").isEmpty() && @@ -174,7 +174,7 @@ public static Secret buildSecret(ClusterCa clusterCa, Secret secret, String name decodeFromSecret(secret, keyCertName + ".key"), decodeFromSecret(secret, keyCertName + ".crt")); } catch (IOException e) { - log.error("Error generating the keystore for {}", keyCertName, e); + LOGGER.errorCr(reconciliation, "Error generating the keystore for {}", keyCertName, e); } } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java index 92aa929f66..ab87fa9f41 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java @@ -11,9 +11,9 @@ import io.strimzi.api.kafka.model.storage.PersistentClaimStorageOverride; import io.strimzi.api.kafka.model.storage.SingleVolumeStorage; import io.strimzi.api.kafka.model.storage.Storage; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.operator.resource.AbstractJsonDiff; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Collections; import java.util.HashSet; @@ -29,7 +29,7 @@ * Class for diffing storage configuration */ public class StorageDiff extends AbstractJsonDiff { - private static final Logger log = LogManager.getLogger(StorageDiff.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(StorageDiff.class.getName()); private static final Pattern IGNORABLE_PATHS = Pattern.compile( "^(/deleteClaim|/)$"); @@ -43,13 +43,14 @@ public class StorageDiff extends AbstractJsonDiff { * Diffs the storage for allowed or not allowed changes. Examples of allowed changes is increasing volume size or * adding overrides for nodes before scale-up / removing them after scale-down. * + * @param reconciliation The reconciliation * @param current Current Storage configuration * @param desired Desired Storage configuration * @param currentReplicas Current number of replicas (will differ from desired number of replicas when scaling up or down) * @param desiredReplicas Desired number of replicas (will differ from current number of replicas when scaling up or down) */ - public StorageDiff(Storage current, Storage desired, int currentReplicas, int desiredReplicas) { - this(current, desired, currentReplicas, desiredReplicas, ""); + public StorageDiff(Reconciliation reconciliation, Storage current, Storage desired, int currentReplicas, int desiredReplicas) { + this(reconciliation, current, desired, currentReplicas, desiredReplicas, ""); } /** @@ -57,13 +58,14 @@ public StorageDiff(Storage current, Storage desired, int currentReplicas, int de * adding overrides for nodes before scale-up / removing them after scale-down. This constructor is used internally * only. * + * @param reconciliation The reconciliation * @param current Current Storage configuration * @param desired Desired Storage configuration * @param currentReplicas Current number of replicas (will differ from desired number of replicas when scaling up or down) * @param desiredReplicas Desired number of replicas (will differ from current number of replicas when scaling up or down) * @param volumeDesc Description of the volume which is being used */ - private StorageDiff(Storage current, Storage desired, int currentReplicas, int desiredReplicas, String volumeDesc) { + private StorageDiff(Reconciliation reconciliation, Storage current, Storage desired, int currentReplicas, int desiredReplicas, String volumeDesc) { boolean changesType = false; boolean shrinkSize = false; boolean isEmpty = true; @@ -87,7 +89,7 @@ private StorageDiff(Storage current, Storage desired, int currentReplicas, int d volumesAddedOrRemoved |= isNull(currentVolume) != isNull(desiredVolume); - StorageDiff diff = new StorageDiff(currentVolume, desiredVolume, currentReplicas, desiredReplicas, "(volume ID: " + volumeId + ") "); + StorageDiff diff = new StorageDiff(reconciliation, currentVolume, desiredVolume, currentReplicas, desiredReplicas, "(volume ID: " + volumeId + ") "); changesType |= diff.changesType(); shrinkSize |= diff.shrinkSize(); @@ -104,7 +106,7 @@ private StorageDiff(Storage current, Storage desired, int currentReplicas, int d String pathValue = d.get("path").asText(); if (IGNORABLE_PATHS.matcher(pathValue).matches()) { - log.debug("Ignoring Storage {}diff {}", volumeDesc, d); + LOGGER.debugCr(reconciliation, "Ignoring Storage {}diff {}", volumeDesc, d); continue; } @@ -133,10 +135,10 @@ private StorageDiff(Storage current, Storage desired, int currentReplicas, int d } } - if (log.isDebugEnabled()) { - log.debug("Storage {}differs: {}", volumeDesc, d); - log.debug("Current Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(source, pathValue)); - log.debug("Desired Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(target, pathValue)); + if (LOGGER.isDebugEnabled()) { + LOGGER.debugCr(reconciliation, "Storage {}differs: {}", volumeDesc, d); + LOGGER.debugCr(reconciliation, "Current Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(source, pathValue)); + LOGGER.debugCr(reconciliation, "Desired Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(target, pathValue)); } num++; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java index e13cf8abd1..d8eab13dee 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java @@ -6,8 +6,6 @@ import io.fabric8.kubernetes.api.model.ConfigMapVolumeSource; import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.math.BigInteger; import java.nio.charset.StandardCharsets; @@ -43,7 +41,6 @@ * Shared methods for working with Volume */ public class VolumeUtils { - protected static final Logger log = LogManager.getLogger(VolumeUtils.class.getName()); private static Pattern volumeNamePattern = Pattern.compile("^([a-z0-9]{1}[a-z0-9-]{0,61}[a-z0-9]{1})$"); /** @@ -78,8 +75,6 @@ public static Volume createConfigMapVolume(String name, String configMapName, Ma .withConfigMap(configMapVolumeSource) .build(); - log.trace("Created configMap Volume named '{}' with source configMap '{}'", validName, configMapName); - return volume; } @@ -102,8 +97,6 @@ public static Volume createConfigMapVolume(String name, String configMapName) { .withConfigMap(configMapVolumeSource) .build(); - log.trace("Created configMap Volume named '{}' with source configMap '{}'", validName, configMapName); - return volume; } @@ -145,7 +138,6 @@ public static Volume createSecretVolume(String name, String secretName, Map> jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); + public ZookeeperConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) { + super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java index db2abc6d9e..7b065e7f15 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java @@ -52,6 +52,7 @@ import io.strimzi.operator.common.AbstractOperator; import io.strimzi.operator.common.Annotations; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; @@ -72,8 +73,6 @@ import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.json.JsonObject; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.Collections; @@ -100,7 +99,7 @@ public abstract class AbstractConnectOperator, R extends Resource, P extends AbstractKafkaConnectSpec, S extends KafkaConnectStatus> extends AbstractOperator> { - private static final Logger log = LogManager.getLogger(AbstractConnectOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractConnectOperator.class.getName()); private final CrdOperator connectorOperator; private final Function connectClientProvider; @@ -233,8 +232,10 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) { KafkaConnectS2I connectS2i = cf.resultAt(1); KafkaConnectApi apiClient = connectOperator.connectClientProvider.apply(connectOperator.vertx); if (connect == null && connectS2i == null) { - log.info("{} {} in namespace {} was {}, but Connect cluster {} does not exist", connectorKind, connectorName, connectorNamespace, action, connectName); - updateStatus(noConnectCluster(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator); + Reconciliation r = new Reconciliation("connector-watch", connectOperator.kind(), + kafkaConnector.getMetadata().getNamespace(), connectName); + updateStatus(r, noConnectCluster(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator); + LOGGER.infoCr(r, "{} {} in namespace {} was {}, but Connect cluster {} does not exist", connectorKind, connectorName, connectorNamespace, action, connectName); return Future.succeededFuture(); } else if (connect != null && isOlderOrAlone(connect.getMetadata().getCreationTimestamp(), connectS2i)) { // grab the lock and call reconcileConnectors() @@ -243,14 +244,14 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) { kafkaConnector.getMetadata().getNamespace(), connectName); if (!Util.matchesSelector(selector, connect)) { - log.debug("{}: {} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels); return Future.succeededFuture(); } else if (connect.getSpec() != null && connect.getSpec().getReplicas() == 0) { - log.info("{}: {} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName); - updateStatus(zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator); + LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", connectorKind, connectorName, connectorNamespace, action, connectName); + updateStatus(reconciliation, zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator); return Future.succeededFuture(); } else { - log.info("{}: {} {} in namespace {} was {}", reconciliation, connectorKind, connectorName, connectorNamespace, action); + LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", connectorKind, connectorName, connectorNamespace, action); return connectOperator.withLock(reconciliation, LOCK_TIMEOUT_MS, () -> connectOperator.reconcileConnectorAndHandleResult(reconciliation, @@ -258,7 +259,7 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) { isUseResources(connect), kafkaConnector.getMetadata().getName(), action == Action.DELETED ? null : kafkaConnector) .compose(reconcileResult -> { - log.info("{}: reconciled", reconciliation); + LOGGER.infoCr(reconciliation, "reconciled"); return Future.succeededFuture(reconcileResult); })); } @@ -269,14 +270,14 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) { kafkaConnector.getMetadata().getNamespace(), connectName); if (!Util.matchesSelector(selector, connectS2i)) { - log.debug("{}: {} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels); return Future.succeededFuture(); } else if (connectS2i.getSpec() != null && connectS2i.getSpec().getReplicas() == 0) { - log.info("{}: {} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName); - updateStatus(zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator); + LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", connectorKind, connectorName, connectorNamespace, action, connectName); + updateStatus(reconciliation, zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator); return Future.succeededFuture(); } else { - log.info("{}: {} {} in namespace {} was {}", reconciliation, connectorKind, connectorName, connectorNamespace, action); + LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", connectorKind, connectorName, connectorNamespace, action); return connectS2IOperator.withLock(reconciliation, LOCK_TIMEOUT_MS, () -> connectS2IOperator.reconcileConnectorAndHandleResult(reconciliation, @@ -284,14 +285,16 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) { isUseResources(connectS2i), kafkaConnector.getMetadata().getName(), action == Action.DELETED ? null : kafkaConnector) .compose(reconcileResult -> { - log.info("{}: reconciled", reconciliation); + LOGGER.infoCr(reconciliation, "reconciled"); return Future.succeededFuture(reconcileResult); })); } } }); } else { - updateStatus(new InvalidResourceException("Resource lacks label '" + updateStatus(new Reconciliation("connector-watch", connectOperator.kind(), + kafkaConnector.getMetadata().getNamespace(), null), + new InvalidResourceException("Resource lacks label '" + Labels.STRIMZI_CLUSTER_LABEL + "': No connect cluster in which to create this connector."), kafkaConnector, connectOperator.connectorOperator); @@ -299,10 +302,10 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) { break; case ERROR: - log.error("Failed {} {} in namespace {} ", connectorKind, connectorName, connectorNamespace); + LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Failed {} {} in namespace {} ", connectorKind, connectorName, connectorNamespace); break; default: - log.error("Unknown action: {} {} in namespace {}", connectorKind, connectorName, connectorNamespace); + LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Unknown action: {} {} in namespace {}", connectorKind, connectorName, connectorNamespace); } } @@ -376,14 +379,14 @@ protected Future reconcileConnectors(Reconciliation reconciliation, T conn return CompositeFuture.join( apiClient.list(host, port), connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())), - apiClient.listConnectorPlugins(host, port), - apiClient.updateConnectLoggers(host, port, desiredLogging, defaultLogging) + apiClient.listConnectorPlugins(reconciliation, host, port), + apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging) ).compose(cf -> { List runningConnectorNames = cf.resultAt(0); List desiredConnectors = cf.resultAt(1); List connectorPlugins = cf.resultAt(2); - log.debug("{}: Setting list of connector plugins in Kafka Connect status", reconciliation); + LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status"); connectStatus.setConnectorPlugins(connectorPlugins); if (connectorsResourceCounter != null) { @@ -392,12 +395,12 @@ protected Future reconcileConnectors(Reconciliation reconciliation, T conn Set deleteConnectorNames = new HashSet<>(runningConnectorNames); deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet())); - log.debug("{}: {} cluster: delete connectors: {}", reconciliation, kind(), deleteConnectorNames); + LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames); Stream> deletionFutures = deleteConnectorNames.stream().map(connectorName -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null) ); - log.debug("{}: {} cluster: required connectors: {}", reconciliation, kind(), desiredConnectors); + LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors); Stream> createUpdateFutures = desiredConnectors.stream() .map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector)); @@ -405,7 +408,7 @@ protected Future reconcileConnectors(Reconciliation reconciliation, T conn }).recover(error -> { if (error instanceof ConnectTimeoutException) { Promise connectorStatuses = Promise.promise(); - log.warn("{}: Failed to connect to the REST API => trying to update the connector status", reconciliation); + LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status"); connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())) .compose(connectors -> CompositeFuture.join( @@ -463,13 +466,13 @@ private Future reconcileConnector(Reconciliation reconciliation, String ho boolean useResources, String connectorName, KafkaConnector connector) { if (connector == null) { if (useResources) { - log.info("{}: deleting connector: {}", reconciliation, connectorName); - return apiClient.delete(host, port, connectorName); + LOGGER.infoCr(reconciliation, "deleting connector: {}", connectorName); + return apiClient.delete(reconciliation, host, port, connectorName); } else { return Future.succeededFuture(); } } else { - log.info("{}: creating/updating connector: {}", reconciliation, connectorName); + LOGGER.infoCr(reconciliation, "creating/updating connector: {}", connectorName); if (connector.getSpec() == null) { return maybeUpdateConnectorStatus(reconciliation, connector, null, new InvalidResourceException("spec property is required")); @@ -509,32 +512,32 @@ private Future reconcileConnector(Reconciliation reconciliation, String ho */ protected Future maybeCreateOrUpdateConnector(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, KafkaConnectorSpec connectorSpec, CustomResource resource) { - return apiClient.getConnectorConfig(new BackOff(200L, 2, 6), host, port, connectorName).compose( + return apiClient.getConnectorConfig(reconciliation, new BackOff(200L, 2, 6), host, port, connectorName).compose( config -> { if (!needsReconfiguring(reconciliation, connectorName, connectorSpec, config)) { - log.debug("{}: Connector {} exists and has desired config, {}=={}", reconciliation, connectorName, connectorSpec.getConfig(), config); - return apiClient.status(host, port, connectorName) + LOGGER.debugCr(reconciliation, "Connector {} exists and has desired config, {}=={}", connectorName, connectorSpec.getConfig(), config); + return apiClient.status(reconciliation, host, port, connectorName) .compose(status -> pauseResume(reconciliation, host, apiClient, connectorName, connectorSpec, status)) .compose(ignored -> maybeRestartConnector(reconciliation, host, apiClient, connectorName, resource, new ArrayList<>())) .compose(conditions -> maybeRestartConnectorTask(reconciliation, host, apiClient, connectorName, resource, conditions)) .compose(conditions -> - apiClient.statusWithBackOff(new BackOff(200L, 2, 10), host, port, connectorName) + apiClient.statusWithBackOff(reconciliation, new BackOff(200L, 2, 10), host, port, connectorName) .compose(createConnectorStatusAndConditions(conditions))) - .compose(status -> updateConnectorTopics(host, apiClient, connectorName, status)); + .compose(status -> updateConnectorTopics(reconciliation, host, apiClient, connectorName, status)); } else { - log.debug("{}: Connector {} exists but does not have desired config, {}!={}", reconciliation, connectorName, connectorSpec.getConfig(), config); + LOGGER.debugCr(reconciliation, "Connector {} exists but does not have desired config, {}!={}", connectorName, connectorSpec.getConfig(), config); return createOrUpdateConnector(reconciliation, host, apiClient, connectorName, connectorSpec) .compose(createConnectorStatusAndConditions()) - .compose(status -> updateConnectorTopics(host, apiClient, connectorName, status)); + .compose(status -> updateConnectorTopics(reconciliation, host, apiClient, connectorName, status)); } }, error -> { if (error instanceof ConnectRestException && ((ConnectRestException) error).getStatusCode() == 404) { - log.debug("{}: Connector {} does not exist", reconciliation, connectorName); + LOGGER.debugCr(reconciliation, "Connector {} does not exist", connectorName); return createOrUpdateConnector(reconciliation, host, apiClient, connectorName, connectorSpec) .compose(createConnectorStatusAndConditions()) - .compose(status -> updateConnectorTopics(host, apiClient, connectorName, status)); + .compose(status -> updateConnectorTopics(reconciliation, host, apiClient, connectorName, status)); } else { return Future.failedFuture(error); } @@ -555,20 +558,20 @@ private boolean needsReconfiguring(Reconciliation reconciliation, String connect for (Map.Entry entry : connectorSpec.getConfig().entrySet()) { desired.put(entry.getKey(), entry.getValue() != null ? entry.getValue().toString() : null); } - if (log.isDebugEnabled()) { - log.debug("{}: Desired: {}", reconciliation, new TreeMap<>(desired)); - log.debug("{}: Actual: {}", reconciliation, new TreeMap<>(actual)); + if (LOGGER.isDebugEnabled()) { + LOGGER.debugCr(reconciliation, "Desired: {}", new TreeMap<>(desired)); + LOGGER.debugCr(reconciliation, "Actual: {}", new TreeMap<>(actual)); } return !desired.equals(actual); } protected Future> createOrUpdateConnector(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, KafkaConnectorSpec connectorSpec) { - return apiClient.createOrUpdatePutRequest(host, port, connectorName, asJson(connectorSpec)) - .compose(ignored -> apiClient.statusWithBackOff(new BackOff(200L, 2, 10), host, port, + return apiClient.createOrUpdatePutRequest(reconciliation, host, port, connectorName, asJson(reconciliation, connectorSpec)) + .compose(ignored -> apiClient.statusWithBackOff(reconciliation, new BackOff(200L, 2, 10), host, port, connectorName)) .compose(status -> pauseResume(reconciliation, host, apiClient, connectorName, connectorSpec, status)) - .compose(ignored -> apiClient.status(host, port, connectorName)); + .compose(ignored -> apiClient.status(reconciliation, host, port, connectorName)); } private Future pauseResume(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, KafkaConnectorSpec connectorSpec, Map status) { @@ -579,10 +582,10 @@ private Future pauseResume(Reconciliation reconciliation, String host, Kaf String state = (String) path; boolean shouldPause = Boolean.TRUE.equals(connectorSpec.getPause()); if ("RUNNING".equals(state) && shouldPause) { - log.debug("{}: Pausing connector {}", reconciliation, connectorName); + LOGGER.debugCr(reconciliation, "Pausing connector {}", connectorName); return apiClient.pause(host, port, connectorName); } else if ("PAUSED".equals(state) && !shouldPause) { - log.debug("{}: Resuming connector {}", reconciliation, connectorName); + LOGGER.debugCr(reconciliation, "Resuming connector {}", connectorName); return apiClient.resume(host, port, connectorName); } else { return Future.succeededFuture(); @@ -592,14 +595,14 @@ private Future pauseResume(Reconciliation reconciliation, String host, Kaf private Future> maybeRestartConnector(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, CustomResource resource, List conditions) { if (hasRestartAnnotation(resource, connectorName)) { - log.debug("{}: Restarting connector {}", reconciliation, connectorName); + LOGGER.debugCr(reconciliation, "Restarting connector {}", connectorName); return apiClient.restart(host, port, connectorName) .compose(ignored -> removeRestartAnnotation(reconciliation, resource) .compose(v -> Future.succeededFuture(conditions)), throwable -> { // Ignore restart failures - add a warning and try again on the next reconcile String message = "Failed to restart connector " + connectorName + ". " + throwable.getMessage(); - log.warn("{}: {}", reconciliation, message); + LOGGER.warnCr(reconciliation, message); conditions.add(StatusUtils.buildWarningCondition("RestartConnector", message)); return Future.succeededFuture(conditions); }); @@ -611,14 +614,14 @@ private Future> maybeRestartConnector(Reconciliation reconciliat private Future> maybeRestartConnectorTask(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, CustomResource resource, List conditions) { int taskID = getRestartTaskAnnotationTaskID(resource, connectorName); if (taskID >= 0) { - log.debug("{}: Restarting connector task {}:{}", reconciliation, connectorName, taskID); + LOGGER.debugCr(reconciliation, "Restarting connector task {}:{}", connectorName, taskID); return apiClient.restartTask(host, port, connectorName, taskID) .compose(ignored -> removeRestartTaskAnnotation(reconciliation, resource) .compose(v -> Future.succeededFuture(conditions)), throwable -> { // Ignore restart failures - add a warning and try again on the next reconcile String message = "Failed to restart connector task " + connectorName + ":" + taskID + ". " + throwable.getMessage(); - log.warn("{}: {}", reconciliation, message); + LOGGER.warnCr(reconciliation, message); conditions.add(StatusUtils.buildWarningCondition("RestartConnectorTask", message)); return Future.succeededFuture(conditions); }); @@ -627,8 +630,8 @@ private Future> maybeRestartConnectorTask(Reconciliation reconci } } - private Future updateConnectorTopics(String host, KafkaConnectApi apiClient, String connectorName, ConnectorStatusAndConditions status) { - return apiClient.getConnectorTopics(host, port, connectorName) + private Future updateConnectorTopics(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, ConnectorStatusAndConditions status) { + return apiClient.getConnectorTopics(reconciliation, host, port, connectorName) .compose(updateConnectorStatusAndConditions(status)); } @@ -674,24 +677,24 @@ protected Future removeRestartTaskAnnotation(Reconciliation reconciliation * Patches the KafkaConnector CR to remove the supplied annotation. */ private Future removeAnnotation(Reconciliation reconciliation, KafkaConnector resource, String annotationKey) { - log.debug("{}: Removing annotation {}", reconciliation, annotationKey); + LOGGER.debugCr(reconciliation, "Removing annotation {}", annotationKey); KafkaConnector patchedKafkaConnector = new KafkaConnectorBuilder(resource) .editMetadata() .removeFromAnnotations(annotationKey) .endMetadata() .build(); - return connectorOperator.patchAsync(patchedKafkaConnector) + return connectorOperator.patchAsync(reconciliation, patchedKafkaConnector) .compose(ignored -> Future.succeededFuture()); } - public static void updateStatus(Throwable error, KafkaConnector kafkaConnector2, CrdOperator connectorOperations) { + public static void updateStatus(Reconciliation reconciliation, Throwable error, KafkaConnector kafkaConnector2, CrdOperator connectorOperations) { KafkaConnectorStatus status = new KafkaConnectorStatus(); StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnector2, status, error); StatusDiff diff = new StatusDiff(kafkaConnector2.getStatus(), status); if (!diff.isEmpty()) { KafkaConnector copy = new KafkaConnectorBuilder(kafkaConnector2).build(); copy.setStatus(status); - connectorOperations.updateStatusAsync(copy); + connectorOperations.updateStatusAsync(reconciliation, copy); } } @@ -727,11 +730,11 @@ Function, Future> updateConnectorStat return topics -> Future.succeededFuture(new ConnectorStatusAndConditions(status.statusResult, topics, status.conditions)); } - public Set validate(KafkaConnector resource) { + public Set validate(Reconciliation reconciliation, KafkaConnector resource) { if (resource != null) { Set warningConditions = new LinkedHashSet<>(0); - ResourceVisitor.visit(resource, new ValidationVisitor(resource, log, warningConditions)); + ResourceVisitor.visit(reconciliation, resource, new ValidationVisitor(resource, LOGGER, warningConditions)); return warningConditions; } @@ -742,7 +745,7 @@ public Set validate(KafkaConnector resource) { Future maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConnector connector, ConnectorStatusAndConditions connectorStatus, Throwable error) { KafkaConnectorStatus status = new KafkaConnectorStatus(); if (error != null) { - log.warn("{}: Error reconciling connector {}", reconciliation, connector.getMetadata().getName(), error); + LOGGER.warnCr(reconciliation, "Error reconciling connector {}", connector.getMetadata().getName(), error); } Map statusResult = null; @@ -755,7 +758,7 @@ Future maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConn connectorStatus.conditions.forEach(condition -> conditions.add(condition)); } - Set unknownAndDeprecatedConditions = validate(connector); + Set unknownAndDeprecatedConditions = validate(reconciliation, connector); unknownAndDeprecatedConditions.forEach(condition -> conditions.add(condition)); if (!Annotations.isReconciliationPausedWithAnnotation(connector)) { @@ -797,7 +800,7 @@ protected int getActualTaskCount(KafkaConnector connector, Map s } } - protected JsonObject asJson(KafkaConnectorSpec spec) { + protected JsonObject asJson(Reconciliation reconciliation, KafkaConnectorSpec spec) { JsonObject connectorConfigJson = new JsonObject(); if (spec.getConfig() != null) { for (Map.Entry cf : spec.getConfig().entrySet()) { @@ -805,7 +808,7 @@ protected JsonObject asJson(KafkaConnectorSpec spec) { if ("connector.class".equals(name) || "tasks.max".equals(name)) { // TODO include resource namespace and name in this message - log.warn("Configuration parameter {} in KafkaConnector.spec.config will be ignored and the value from KafkaConnector.spec will be used instead", + LOGGER.warnCr(reconciliation, "Configuration parameter {} in KafkaConnector.spec.config will be ignored and the value from KafkaConnector.spec will be used instead", name); } connectorConfigJson.put(name, cf.getValue()); @@ -845,8 +848,8 @@ protected JsonObject asJson(KafkaConnectorSpec spec) { if ((!(fetchedResource instanceof KafkaConnector)) && (!(fetchedResource instanceof KafkaMirrorMaker2)) && StatusUtils.isResourceV1alpha1(fetchedResource)) { - log.warn("{}: {} {} needs to be upgraded from version {} to 'v1beta1' to use the status field", - reconciliation, fetchedResource.getKind(), fetchedResource.getMetadata().getName(), fetchedResource.getApiVersion()); + LOGGER.warnCr(reconciliation, "{} {} needs to be upgraded from version {} to 'v1beta1' to use the status field", + fetchedResource.getKind(), fetchedResource.getMetadata().getName(), fetchedResource.getApiVersion()); updateStatusPromise.complete(); } else { S currentStatus = fetchedResource.getStatus(); @@ -856,26 +859,26 @@ protected JsonObject asJson(KafkaConnectorSpec spec) { if (!ksDiff.isEmpty()) { T resourceWithNewStatus = copyWithStatus.apply(fetchedResource, desiredStatus); - resourceOperator.updateStatusAsync(resourceWithNewStatus).onComplete(updateRes -> { + resourceOperator.updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> { if (updateRes.succeeded()) { - log.debug("{}: Completed status update", reconciliation); + LOGGER.debugCr(reconciliation, "Completed status update"); updateStatusPromise.complete(); } else { - log.error("{}: Failed to update status", reconciliation, updateRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause()); updateStatusPromise.fail(updateRes.cause()); } }); } else { - log.debug("{}: Status did not change", reconciliation); + LOGGER.debugCr(reconciliation, "Status did not change"); updateStatusPromise.complete(); } } } else { - log.error("{}: Current {} resource not found", reconciliation, resource.getKind()); + LOGGER.errorCr(reconciliation, "Current {} resource not found", resource.getKind()); updateStatusPromise.fail("Current " + resource.getKind() + " resource not found"); } } else { - log.error("{}: Failed to get the current {} resource and its status", reconciliation, resource.getKind(), getRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to get the current {} resource and its status", resource.getKind(), getRes.cause()); updateStatusPromise.fail(getRes.cause()); } }); @@ -883,17 +886,17 @@ protected JsonObject asJson(KafkaConnectorSpec spec) { return updateStatusPromise.future(); } - Future> kafkaConnectJmxSecret(String namespace, String name, KafkaConnectCluster connectCluster) { + Future> kafkaConnectJmxSecret(Reconciliation reconciliation, String namespace, String name, KafkaConnectCluster connectCluster) { if (connectCluster.isJmxAuthenticated()) { Future secretFuture = secretOperations.getAsync(namespace, KafkaConnectCluster.jmxSecretName(name)); return secretFuture.compose(res -> { if (res == null) { - return secretOperations.reconcile(namespace, KafkaConnectCluster.jmxSecretName(name), connectCluster.generateJmxSecret()); + return secretOperations.reconcile(reconciliation, namespace, KafkaConnectCluster.jmxSecretName(name), connectCluster.generateJmxSecret()); } return Future.succeededFuture(ReconcileResult.noop(res)); }); } - return secretOperations.reconcile(namespace, KafkaConnectCluster.jmxSecretName(name), null); + return secretOperations.reconcile(reconciliation, namespace, KafkaConnectCluster.jmxSecretName(name), null); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java index c219930ae3..f5941415ee 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java @@ -85,6 +85,7 @@ import io.strimzi.operator.common.Annotations; import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.InvalidConfigurationException; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.MetricsAndLogging; import io.strimzi.operator.common.PasswordGenerator; import io.strimzi.operator.common.Reconciliation; @@ -112,8 +113,6 @@ import org.apache.kafka.clients.admin.Admin; import org.apache.kafka.common.KafkaException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.quartz.CronExpression; import java.nio.charset.StandardCharsets; @@ -157,7 +156,7 @@ */ @SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity", "checkstyle:JavaNCSS"}) public class KafkaAssemblyOperator extends AbstractAssemblyOperator, KafkaSpec, KafkaStatus> { - private static final Logger log = LogManager.getLogger(KafkaAssemblyOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaAssemblyOperator.class.getName()); private final long operationTimeoutMs; private final String operatorNamespace; @@ -463,7 +462,7 @@ Future updateStatus(KafkaStatus desiredStatus) { if (kafka != null) { if ((Constants.RESOURCE_GROUP_NAME + "/" + Constants.V1ALPHA1).equals(kafka.getApiVersion())) { - log.warn("{}: The resource needs to be upgraded from version {} to 'v1beta1' to use the status field", reconciliation, kafka.getApiVersion()); + LOGGER.warnCr(reconciliation, "The resource needs to be upgraded from version {} to 'v1beta1' to use the status field", kafka.getApiVersion()); updateStatusPromise.complete(); } else { KafkaStatus currentStatus = kafka.getStatus(); @@ -473,26 +472,26 @@ Future updateStatus(KafkaStatus desiredStatus) { if (!ksDiff.isEmpty()) { Kafka resourceWithNewStatus = new KafkaBuilder(kafka).withStatus(desiredStatus).build(); - crdOperator.updateStatusAsync(resourceWithNewStatus).onComplete(updateRes -> { + crdOperator.updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> { if (updateRes.succeeded()) { - log.debug("{}: Completed status update", reconciliation); + LOGGER.debugCr(reconciliation, "Completed status update"); updateStatusPromise.complete(); } else { - log.error("{}: Failed to update status", reconciliation, updateRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause()); updateStatusPromise.fail(updateRes.cause()); } }); } else { - log.debug("{}: Status did not change", reconciliation); + LOGGER.debugCr(reconciliation, "Status did not change"); updateStatusPromise.complete(); } } } else { - log.error("{}: Current Kafka resource not found", reconciliation); + LOGGER.errorCr(reconciliation, "Current Kafka resource not found"); updateStatusPromise.fail("Current Kafka resource not found"); } } else { - log.error("{}: Failed to get the current Kafka resource and its status", reconciliation, getRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to get the current Kafka resource and its status", getRes.cause()); updateStatusPromise.fail(getRes.cause()); } }); @@ -513,7 +512,7 @@ Future initialStatus() { Kafka kafka = getRes.result(); if (kafka != null && kafka.getStatus() == null) { - log.debug("{}: Setting the initial status for a new resource", reconciliation); + LOGGER.debugCr(reconciliation, "Setting the initial status for a new resource"); Condition deployingCondition = new ConditionBuilder() .withLastTransitionTime(StatusUtils.iso8601(dateSupplier())) @@ -529,11 +528,11 @@ Future initialStatus() { updateStatus(initialStatus).map(this).onComplete(initialStatusPromise); } else { - log.debug("{}: Status is already set. No need to set initial status", reconciliation); + LOGGER.debugCr(reconciliation, "Status is already set. No need to set initial status"); initialStatusPromise.complete(this); } } else { - log.error("{}: Failed to get the current Kafka resource and its status", reconciliation, getRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to get the current Kafka resource and its status", getRes.cause()); initialStatusPromise.fail(getRes.cause()); } }); @@ -628,11 +627,11 @@ Future reconcileCas(Supplier dateSupplier) { clusterCaCertAnnotations = kafkaAssembly.getSpec().getKafka().getTemplate().getClusterCaCert().getMetadata().getAnnotations(); } - this.clusterCa = new ClusterCa(certManager, passwordGenerator, name, clusterCaCertSecret, clusterCaKeySecret, + this.clusterCa = new ClusterCa(reconciliation, certManager, passwordGenerator, name, clusterCaCertSecret, + clusterCaKeySecret, ModelUtils.getCertificateValidity(clusterCaConfig), ModelUtils.getRenewalDays(clusterCaConfig), - clusterCaConfig == null || clusterCaConfig.isGenerateCertificateAuthority(), - clusterCaConfig != null ? clusterCaConfig.getCertificateExpirationPolicy() : null); + clusterCaConfig == null || clusterCaConfig.isGenerateCertificateAuthority(), clusterCaConfig != null ? clusterCaConfig.getCertificateExpirationPolicy() : null); clusterCa.createRenewOrReplace( reconciliation.namespace(), reconciliation.name(), caLabels.toMap(), clusterCaCertLabels, clusterCaCertAnnotations, @@ -646,13 +645,13 @@ Future reconcileCas(Supplier dateSupplier) { // When we are not supposed to generate the CA but it does not exist, we should just throw an error checkCustomCaSecret(clientsCaConfig, clientsCaCertSecret, clientsCaKeySecret, "Clients CA"); - this.clientsCa = new ClientsCa(certManager, passwordGenerator, - clientsCaCertName, clientsCaCertSecret, - clientsCaKeyName, clientsCaKeySecret, + this.clientsCa = new ClientsCa(reconciliation, certManager, + passwordGenerator, clientsCaCertName, + clientsCaCertSecret, clientsCaKeyName, + clientsCaKeySecret, ModelUtils.getCertificateValidity(clientsCaConfig), ModelUtils.getRenewalDays(clientsCaConfig), - clientsCaConfig == null || clientsCaConfig.isGenerateCertificateAuthority(), - clientsCaConfig != null ? clientsCaConfig.getCertificateExpirationPolicy() : null); + clientsCaConfig == null || clientsCaConfig.isGenerateCertificateAuthority(), clientsCaConfig != null ? clientsCaConfig.getCertificateExpirationPolicy() : null); clientsCa.createRenewOrReplace(reconciliation.namespace(), reconciliation.name(), caLabels.toMap(), emptyMap(), emptyMap(), clientsCaConfig != null && !clientsCaConfig.isGenerateSecretOwnerReference() ? null : ownerRef, @@ -661,14 +660,14 @@ Future reconcileCas(Supplier dateSupplier) { List secretReconciliations = new ArrayList<>(2); if (clusterCaConfig == null || clusterCaConfig.isGenerateCertificateAuthority()) { - Future clusterSecretReconciliation = secretOperations.reconcile(reconciliation.namespace(), clusterCaCertName, this.clusterCa.caCertSecret()) - .compose(ignored -> secretOperations.reconcile(reconciliation.namespace(), clusterCaKeyName, this.clusterCa.caKeySecret())); + Future clusterSecretReconciliation = secretOperations.reconcile(reconciliation, reconciliation.namespace(), clusterCaCertName, this.clusterCa.caCertSecret()) + .compose(ignored -> secretOperations.reconcile(reconciliation, reconciliation.namespace(), clusterCaKeyName, this.clusterCa.caKeySecret())); secretReconciliations.add(clusterSecretReconciliation); } if (clientsCaConfig == null || clientsCaConfig.isGenerateCertificateAuthority()) { - Future clientsSecretReconciliation = secretOperations.reconcile(reconciliation.namespace(), clientsCaCertName, this.clientsCa.caCertSecret()) - .compose(ignored -> secretOperations.reconcile(reconciliation.namespace(), clientsCaKeyName, this.clientsCa.caKeySecret())); + Future clientsSecretReconciliation = secretOperations.reconcile(reconciliation, reconciliation.namespace(), clientsCaCertName, this.clientsCa.caCertSecret()) + .compose(ignored -> secretOperations.reconcile(reconciliation, reconciliation.namespace(), clientsCaKeyName, this.clientsCa.caKeySecret())); secretReconciliations.add(clientsSecretReconciliation); } @@ -721,12 +720,12 @@ Future rollingUpdateForNewCaKey() { if (!reason.isEmpty()) { Future zkRollFuture; Function> rollPodAndLogReason = pod -> { - log.debug("{}: Rolling Pod {} to {}", reconciliation, pod.getMetadata().getName(), reason); + LOGGER.debugCr(reconciliation, "Rolling Pod {} to {}", pod.getMetadata().getName(), reason); return reason; }; if (this.clusterCa.keyReplaced()) { zkRollFuture = zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name)) - .compose(sts -> zkSetOperations.maybeRollingUpdate(sts, rollPodAndLogReason, + .compose(sts -> zkSetOperations.maybeRollingUpdate(reconciliation, sts, rollPodAndLogReason, clusterCa.caCertSecret(), oldCoSecret)); } else { @@ -734,7 +733,7 @@ Future rollingUpdateForNewCaKey() { } return zkRollFuture .compose(i -> kafkaSetOperations.getAsync(namespace, KafkaCluster.kafkaClusterName(name))) - .compose(sts -> new KafkaRoller(vertx, reconciliation, podOperations, 1_000, operationTimeoutMs, + .compose(sts -> new KafkaRoller(reconciliation, vertx, podOperations, 1_000, operationTimeoutMs, () -> new BackOff(250, 2, 10), sts, clusterCa.caCertSecret(), oldCoSecret, adminClientProvider, kafkaCluster.getBrokersConfiguration(), kafkaLogging, kafkaCluster.getKafkaVersion(), true) .rollingRestart(rollPodAndLogReason)) @@ -758,8 +757,8 @@ Future rollDeploymentIfExists(String deploymentName, String reasons) { return deploymentOperations.getAsync(namespace, deploymentName) .compose(dep -> { if (dep != null) { - log.debug("{}: Rolling Deployment {} to {}", reconciliation, deploymentName, reasons); - return deploymentOperations.rollingUpdate(namespace, deploymentName, operationTimeoutMs); + LOGGER.debugCr(reconciliation, "Rolling Deployment {} to {}", deploymentName, reasons); + return deploymentOperations.rollingUpdate(reconciliation, namespace, deploymentName, operationTimeoutMs); } else { return Future.succeededFuture(); } @@ -787,7 +786,7 @@ Future kafkaManualPodRollingUpdate(StatefulSet sts) { if (!podsToRoll.isEmpty()) { return maybeRollKafka(sts, pod -> { if (pod != null && podsToRoll.contains(pod.getMetadata().getName())) { - log.debug("{}: Rolling Kafka pod {} due to manual rolling update annotation on a pod", reconciliation, pod.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Rolling Kafka pod {} due to manual rolling update annotation on a pod", pod.getMetadata().getName()); return singletonList("manual rolling update annotation on a pod"); } else { return new ArrayList<>(); @@ -818,8 +817,8 @@ Future kafkaManualRollingUpdate() { if (pod == null) { throw new ConcurrentDeletionException("Unexpectedly pod no longer exists during roll of StatefulSet."); } - log.debug("{}: Rolling Kafka pod {} due to manual rolling update annotation", - reconciliation, pod.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Rolling Kafka pod {} due to manual rolling update annotation", + pod.getMetadata().getName()); return singletonList("manual rolling update"); }); } else { @@ -855,9 +854,9 @@ Future zkManualPodRollingUpdate(StatefulSet sts) { } if (!podsToRoll.isEmpty()) { - return zkSetOperations.maybeRollingUpdate(sts, pod -> { + return zkSetOperations.maybeRollingUpdate(reconciliation, sts, pod -> { if (pod != null && podsToRoll.contains(pod.getMetadata().getName())) { - log.debug("{}: Rolling ZooKeeper pod {} due to manual rolling update annotation on a pod", reconciliation, pod.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Rolling ZooKeeper pod {} due to manual rolling update annotation on a pod", pod.getMetadata().getName()); return singletonList("manual rolling update annotation on a pod"); } else { return null; @@ -884,9 +883,9 @@ Future zkManualRollingUpdate() { if (sts != null) { if (Annotations.booleanAnnotation(sts, Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, false)) { // User trigger rolling update of the whole StatefulSet - return zkSetOperations.maybeRollingUpdate(sts, pod -> { - log.debug("{}: Rolling Zookeeper pod {} due to manual rolling update", - reconciliation, pod.getMetadata().getName()); + return zkSetOperations.maybeRollingUpdate(reconciliation, sts, pod -> { + LOGGER.debugCr(reconciliation, "Rolling Zookeeper pod {} due to manual rolling update", + pod.getMetadata().getName()); return singletonList("manual rolling update"); }); } else { @@ -905,7 +904,7 @@ Future zkManualRollingUpdate() { Future zkVersionChange() { if (versionChange.isNoop()) { - log.debug("Kafka.spec.kafka.version is unchanged therefore no change to Zookeeper is required"); + LOGGER.debugCr(reconciliation, "Kafka.spec.kafka.version is unchanged therefore no change to Zookeeper is required"); } else { String versionChangeType; @@ -916,7 +915,7 @@ Future zkVersionChange() { } if (versionChange.requiresZookeeperChange()) { - log.info("Kafka {} from {} to {} requires Zookeeper {} from {} to {}", + LOGGER.infoCr(reconciliation, "Kafka {} from {} to {} requires Zookeeper {} from {} to {}", versionChangeType, versionChange.from().version(), versionChange.to().version(), @@ -924,7 +923,7 @@ Future zkVersionChange() { versionChange.from().zookeeperVersion(), versionChange.to().zookeeperVersion()); } else { - log.info("Kafka {} from {} to {} requires no change in Zookeeper version", + LOGGER.infoCr(reconciliation, "Kafka {} from {} to {} requires no change in Zookeeper version", versionChangeType, versionChange.from().version(), versionChange.to().version()); @@ -933,7 +932,7 @@ Future zkVersionChange() { // Get the zookeeper image currently set in the Kafka CR or, if that is not set, the image from the target Kafka version String newZkImage = versions.kafkaImage(kafkaAssembly.getSpec().getZookeeper().getImage(), versionChange.to().version()); - log.debug("Setting new Zookeeper image: " + newZkImage); + LOGGER.debugCr(reconciliation, "Setting new Zookeeper image: " + newZkImage); this.zkCluster.setImage(newZkImage); } @@ -952,7 +951,7 @@ public Future waitForQuiescence(StatefulSet sts) { boolean notUpToDate = !isPodUpToDate(sts, pod); List reason = emptyList(); if (notUpToDate) { - log.debug("Rolling pod {} prior to upgrade", pod.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Rolling pod {} prior to upgrade", pod.getMetadata().getName()); reason = singletonList("upgrade quiescence"); } return reason; @@ -973,7 +972,7 @@ public Future waitForQuiescence(StatefulSet sts) { */ Future prepareVersionChange() { if (versionChange.isNoop()) { - log.debug("{}: No Kafka version change", reconciliation); + LOGGER.debugCr(reconciliation, "{}: No Kafka version change", reconciliation); if (kafkaCluster.getInterBrokerProtocolVersion() == null) { // When IBPV is not set, we set it to current Kafka version @@ -981,7 +980,7 @@ Future prepareVersionChange() { if (highestInterBrokerProtocolVersion != null && !kafkaCluster.getKafkaVersion().protocolVersion().equals(highestInterBrokerProtocolVersion)) { - log.info("{}: Upgrading Kafka inter.broker.protocol.version from {} to {}", reconciliation, highestInterBrokerProtocolVersion, kafkaCluster.getKafkaVersion().protocolVersion()); + LOGGER.infoCr(reconciliation, "Upgrading Kafka inter.broker.protocol.version from {} to {}", highestInterBrokerProtocolVersion, kafkaCluster.getKafkaVersion().protocolVersion()); if (kafkaCluster.getLogMessageFormatVersion() == null && highestLogMessageFormatVersion != null) { @@ -998,20 +997,20 @@ Future prepareVersionChange() { if (highestLogMessageFormatVersion != null && !kafkaCluster.getKafkaVersion().messageVersion().equals(highestLogMessageFormatVersion)) { - log.info("{}: Upgrading Kafka log.message.format.version from {} to {}", reconciliation, highestLogMessageFormatVersion, kafkaCluster.getKafkaVersion().messageVersion()); + LOGGER.infoCr(reconciliation, "Upgrading Kafka log.message.format.version from {} to {}", highestLogMessageFormatVersion, kafkaCluster.getKafkaVersion().messageVersion()); } } return Future.succeededFuture(this); } else { if (versionChange.isUpgrade()) { - log.info("Kafka is upgrading from {} to {}", versionChange.from().version(), versionChange.to().version()); + LOGGER.infoCr(reconciliation, "Kafka is upgrading from {} to {}", versionChange.from().version(), versionChange.to().version()); // We make sure that the highest log.message.format.version or inter.broker.protocol.version version // used by any of the brokers is not higher than the broker version we upgrade from. if ((highestLogMessageFormatVersion != null && compareDottedVersions(versionChange.from().messageVersion(), highestLogMessageFormatVersion) < 0) || (highestInterBrokerProtocolVersion != null && compareDottedVersions(versionChange.from().protocolVersion(), highestInterBrokerProtocolVersion) < 0)) { - log.warn("log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be lower or equal to the Kafka broker version we upgrade from ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.from().version()); + LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be lower or equal to the Kafka broker version we upgrade from ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.from().version()); throw new KafkaUpgradeException("log.message.format.version (" + highestLogMessageFormatVersion + ") and inter.broker.protocol.version (" + highestInterBrokerProtocolVersion + ") used by the brokers have to be lower or equal to the Kafka broker version we upgrade from (" + versionChange.from().version() + ")"); } @@ -1035,7 +1034,7 @@ Future prepareVersionChange() { } } else { // Has to be a downgrade - log.info("Kafka is downgrading from {} to {}", versionChange.from().version(), versionChange.to().version()); + LOGGER.infoCr(reconciliation, "Kafka is downgrading from {} to {}", versionChange.from().version(), versionChange.to().version()); // The currently used log.message.format.version and inter.broker.protocol.version cannot be higher // than the version we are downgrading to. If it is we fail the reconciliation. If they are not set, @@ -1045,7 +1044,7 @@ Future prepareVersionChange() { || compareDottedVersions(versionChange.to().messageVersion(), highestLogMessageFormatVersion) < 0 || highestInterBrokerProtocolVersion == null || compareDottedVersions(versionChange.to().protocolVersion(), highestInterBrokerProtocolVersion) < 0) { - log.warn("log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version()); + LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version()); throw new KafkaUpgradeException("log.message.format.version (" + highestLogMessageFormatVersion + ") and inter.broker.protocol.version (" + highestInterBrokerProtocolVersion + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + versionChange.to().version() + ")"); } @@ -1069,7 +1068,7 @@ Future prepareVersionChange() { // validation. But we still double check it as safety. if (compareDottedVersions(versionChange.to().messageVersion(), desiredLogMessageFormat) < 0 || compareDottedVersions(versionChange.to().protocolVersion(), desiredInterBrokerProtocol) < 0) { - log.warn("log.message.format.version ({}) and inter.broker.protocol.version ({}) used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version()); + LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version()); throw new KafkaUpgradeException("log.message.format.version and inter.broker.protocol.version used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to"); } } @@ -1121,7 +1120,7 @@ Future maybeRollKafka(StatefulSet sts, Function> podNeed */ Future maybeRollKafka(StatefulSet sts, Function> podNeedsRestart, boolean allowReconfiguration) { return adminClientSecrets() - .compose(compositeFuture -> new KafkaRoller(vertx, reconciliation, podOperations, 1_000, operationTimeoutMs, + .compose(compositeFuture -> new KafkaRoller(reconciliation, vertx, podOperations, 1_000, operationTimeoutMs, () -> new BackOff(250, 2, 10), sts, compositeFuture.resultAt(0), compositeFuture.resultAt(1), adminClientProvider, kafkaCluster.getBrokersConfiguration(), kafkaLogging, kafkaCluster.getKafkaVersion(), allowReconfiguration) .rollingRestart(podNeedsRestart)); @@ -1136,7 +1135,7 @@ Future getZookeeperDescription() { this.zkCurrentReplicas = sts.getSpec().getReplicas(); } - this.zkCluster = ZookeeperCluster.fromCrd(kafkaAssembly, versions, oldStorage, zkCurrentReplicas != null ? zkCurrentReplicas : 0); + this.zkCluster = ZookeeperCluster.fromCrd(reconciliation, kafkaAssembly, versions, oldStorage, zkCurrentReplicas != null ? zkCurrentReplicas : 0); // We are upgrading from previous Strimzi version which has a sidecars. The older sidecar // configurations allowed only older versions of TLS to be used by default. But the Zookeeper @@ -1152,7 +1151,7 @@ Future getZookeeperDescription() { zkCluster.getConfiguration().setConfigOption("ssl.enabledProtocols", "TLSv1.2,TLSv1.1,TLSv1"); } - return Util.metricsAndLogging(configMapOperations, kafkaAssembly.getMetadata().getNamespace(), + return Util.metricsAndLogging(reconciliation, configMapOperations, kafkaAssembly.getMetadata().getNamespace(), zkCluster.getLogging(), zkCluster.getMetricsConfigInCm()); }) @@ -1181,21 +1180,21 @@ Future withVoid(Future r) { } Future zookeeperServiceAccount() { - return withVoid(serviceAccountOperations.reconcile(namespace, + return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace, ZookeeperCluster.containerServiceAccountName(zkCluster.getCluster()), zkCluster.generateServiceAccount())); } Future zkService() { - return withVoid(serviceOperations.reconcile(namespace, zkCluster.getServiceName(), zkCluster.generateService())); + return withVoid(serviceOperations.reconcile(reconciliation, namespace, zkCluster.getServiceName(), zkCluster.generateService())); } Future zkHeadlessService() { - return withVoid(serviceOperations.reconcile(namespace, zkCluster.getHeadlessServiceName(), zkCluster.generateHeadlessService())); + return withVoid(serviceOperations.reconcile(reconciliation, namespace, zkCluster.getHeadlessServiceName(), zkCluster.generateHeadlessService())); } Future zkAncillaryCm() { - return withVoid(configMapOperations.reconcile(namespace, zkCluster.getAncillaryConfigMapName(), zkMetricsAndLogsConfigMap)); + return withVoid(configMapOperations.reconcile(reconciliation, namespace, zkCluster.getAncillaryConfigMapName(), zkMetricsAndLogsConfigMap)); } /** @@ -1207,7 +1206,7 @@ Future zkAncillaryCm() { */ Future updateCertificateSecretWithDiff(String secretName, Secret secret) { return secretOperations.getAsync(namespace, secretName) - .compose(oldSecret -> secretOperations.reconcile(namespace, secretName, secret) + .compose(oldSecret -> secretOperations.reconcile(reconciliation, namespace, secretName, secret) .map(res -> { if (res instanceof ReconcileResult.Patched) { // The secret is patched and some changes to the existing certificates actually occured @@ -1228,24 +1227,24 @@ Future zkNodesSecret() { } Future zkNetPolicy() { - return withVoid(networkPolicyOperator.reconcile(namespace, ZookeeperCluster.policyName(name), zkCluster.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels))); + return withVoid(networkPolicyOperator.reconcile(reconciliation, namespace, ZookeeperCluster.policyName(name), zkCluster.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels))); } Future zkPodDisruptionBudget() { - return withVoid(podDisruptionBudgetOperator.reconcile(namespace, zkCluster.getName(), zkCluster.generatePodDisruptionBudget())); + return withVoid(podDisruptionBudgetOperator.reconcile(reconciliation, namespace, zkCluster.getName(), zkCluster.generatePodDisruptionBudget())); } Future zkStatefulSet() { StatefulSet zkSts = zkCluster.generateStatefulSet(pfa.isOpenshift(), imagePullPolicy, imagePullSecrets); Annotations.annotations(zkSts.getSpec().getTemplate()).put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(getCaCertGeneration(this.clusterCa))); Annotations.annotations(zkSts.getSpec().getTemplate()).put(Annotations.ANNO_STRIMZI_LOGGING_HASH, zkLoggingHash); - return withZkDiff(zkSetOperations.reconcile(namespace, zkCluster.getName(), zkSts)); + return withZkDiff(zkSetOperations.reconcile(reconciliation, namespace, zkCluster.getName(), zkSts)); } Future zkRollingUpdate() { // Scale-down and Scale-up might have change the STS. we should get a fresh one. return zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name)) - .compose(sts -> zkSetOperations.maybeRollingUpdate(sts, + .compose(sts -> zkSetOperations.maybeRollingUpdate(reconciliation, sts, pod -> getReasonsToRestartPod(zkDiffs.resource(), pod, existingZookeeperCertsChanged, this.clusterCa))) .map(this); } @@ -1303,7 +1302,7 @@ Future zkScaler(int connectToReplicas) { DnsNameGenerator.podDnsNameWithoutClusterDomain(namespace, KafkaResources.zookeeperHeadlessServiceName(name), zkCluster.getPodName(i)); - ZookeeperScaler zkScaler = zkScalerProvider.createZookeeperScaler(vertx, zkConnectionString(connectToReplicas, zkNodeAddress), zkNodeAddress, clusterCaCertSecret, coKeySecret, operationTimeoutMs); + ZookeeperScaler zkScaler = zkScalerProvider.createZookeeperScaler(reconciliation, vertx, zkConnectionString(connectToReplicas, zkNodeAddress), zkNodeAddress, clusterCaCertSecret, coKeySecret, operationTimeoutMs); return Future.succeededFuture(zkScaler); }); @@ -1314,7 +1313,7 @@ Future zkScalingUp() { if (zkCurrentReplicas != null && zkCurrentReplicas < desired) { - log.info("{}: Scaling Zookeeper up from {} to {} replicas", reconciliation, zkCurrentReplicas, desired); + LOGGER.infoCr(reconciliation, "Scaling Zookeeper up from {} to {} replicas", zkCurrentReplicas, desired); return zkScaler(zkCurrentReplicas) .compose(zkScaler -> { @@ -1327,7 +1326,7 @@ Future zkScalingUp() { if (res.succeeded()) { scalingPromise.complete(res.result()); } else { - log.warn("{}: Failed to scale Zookeeper", reconciliation, res.cause()); + LOGGER.warnCr(reconciliation, "Failed to scale Zookeeper", res.cause()); scalingPromise.fail(res.cause()); } }); @@ -1342,8 +1341,8 @@ Future zkScalingUp() { Future zkScalingUpByOne(ZookeeperScaler zkScaler, int current, int desired) { if (current < desired) { - return zkSetOperations.scaleUp(namespace, zkCluster.getName(), current + 1) - .compose(ignore -> podOperations.readiness(namespace, zkCluster.getPodName(current), 1_000, operationTimeoutMs)) + return zkSetOperations.scaleUp(reconciliation, namespace, zkCluster.getName(), current + 1) + .compose(ignore -> podOperations.readiness(reconciliation, namespace, zkCluster.getPodName(current), 1_000, operationTimeoutMs)) .compose(ignore -> zkScaler.scale(current + 1)) .compose(ignore -> zkScalingUpByOne(zkScaler, current + 1, desired)); } else { @@ -1357,7 +1356,7 @@ Future zkScalingDown() { if (zkCurrentReplicas != null && zkCurrentReplicas > desired) { // With scaling - log.info("{}: Scaling Zookeeper down from {} to {} replicas", reconciliation, zkCurrentReplicas, desired); + LOGGER.infoCr(reconciliation, "Scaling Zookeeper down from {} to {} replicas", zkCurrentReplicas, desired); // No need to check for pod readiness since we run right after the readiness check return zkScaler(desired) @@ -1371,7 +1370,7 @@ Future zkScalingDown() { if (res.succeeded()) { scalingPromise.complete(res.result()); } else { - log.warn("{}: Failed to scale Zookeeper", reconciliation, res.cause()); + LOGGER.warnCr(reconciliation, "Failed to scale Zookeeper", res.cause()); scalingPromise.fail(res.cause()); } }); @@ -1388,7 +1387,7 @@ Future zkScalingDownByOne(ZookeeperScaler zkScaler, int cur if (current > desired) { return podsReady(zkCluster, current - 1) .compose(ignore -> zkScaler.scale(current - 1)) - .compose(ignore -> zkSetOperations.scaleDown(namespace, zkCluster.getName(), current - 1)) + .compose(ignore -> zkSetOperations.scaleDown(reconciliation, namespace, zkCluster.getName(), current - 1)) .compose(ignore -> zkScalingDownByOne(zkScaler, current - 1, desired)); } else { return Future.succeededFuture(this); @@ -1398,7 +1397,7 @@ Future zkScalingDownByOne(ZookeeperScaler zkScaler, int cur Future zkScalingCheck() { // No scaling, but we should check the configuration // This can cover any previous failures in the Zookeeper reconfiguration - log.debug("{}: Verifying that Zookeeper is configured to run with {} replicas", reconciliation, zkCurrentReplicas); + LOGGER.debugCr(reconciliation, "Verifying that Zookeeper is configured to run with {} replicas", zkCurrentReplicas); // No need to check for pod readiness since we run right after the readiness check return zkScaler(zkCluster.getReplicas()) @@ -1411,7 +1410,7 @@ Future zkScalingCheck() { if (res.succeeded()) { scalingPromise.complete(this); } else { - log.warn("{}: Failed to verify Zookeeper configuration", res.cause()); + LOGGER.warnCr(reconciliation, "Failed to verify Zookeeper configuration", res.cause()); scalingPromise.fail(res.cause()); } }); @@ -1421,11 +1420,11 @@ Future zkScalingCheck() { } Future zkServiceEndpointReadiness() { - return withVoid(serviceOperations.endpointReadiness(namespace, zkCluster.getServiceName(), 1_000, operationTimeoutMs)); + return withVoid(serviceOperations.endpointReadiness(reconciliation, namespace, zkCluster.getServiceName(), 1_000, operationTimeoutMs)); } Future zkHeadlessServiceEndpointReadiness() { - return withVoid(serviceOperations.endpointReadiness(namespace, zkCluster.getHeadlessServiceName(), 1_000, operationTimeoutMs)); + return withVoid(serviceOperations.endpointReadiness(reconciliation, namespace, zkCluster.getHeadlessServiceName(), 1_000, operationTimeoutMs)); } Future zkGenerateCertificates(Supplier dateSupplier) { @@ -1456,7 +1455,7 @@ Future zkGenerateCertificates(Supplier dateSupplier) this.kafkaStsAlreadyExists = true; } - this.kafkaCluster = KafkaCluster.fromCrd(kafkaAssembly, versions, oldStorage, kafkaCurrentReplicas); + this.kafkaCluster = KafkaCluster.fromCrd(reconciliation, kafkaAssembly, versions, oldStorage, kafkaCurrentReplicas); this.kafkaBootstrapDnsName.addAll(ListenersUtils.alternativeNames(kafkaCluster.getListeners())); //return Future.succeededFuture(this); @@ -1522,7 +1521,7 @@ Future zkGenerateCertificates(Supplier dateSupplier) // Either Pods or StatefulSet already exist. But none of them contains the version // annotation. This suggests they are not created by the current versions of Strimzi. // Without the annotation, we cannot detect the Kafka version and decide on upgrade. - log.warn("Kafka Pods or StatefulSet exist, but do not contain the {} annotation to detect their version. Kafka upgrade cannot be detected.", ANNO_STRIMZI_IO_KAFKA_VERSION); + LOGGER.warnCr(reconciliation, "Kafka Pods or StatefulSet exist, but do not contain the {} annotation to detect their version. Kafka upgrade cannot be detected.", ANNO_STRIMZI_IO_KAFKA_VERSION); throw new KafkaUpgradeException("Kafka Pods or StatefulSet exist, but do not contain the " + ANNO_STRIMZI_IO_KAFKA_VERSION + " annotation to detect their version. Kafka upgrade cannot be detected."); } } else if (lowestKafkaVersion.equals(highestKafkaVersion)) { @@ -1548,7 +1547,7 @@ Future withKafkaDiff(Future> r } Future kafkaInitServiceAccount() { - return withVoid(serviceAccountOperations.reconcile(namespace, + return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace, kafkaCluster.getServiceAccountName(), kafkaCluster.generateServiceAccount())); } @@ -1556,8 +1555,8 @@ Future kafkaInitServiceAccount() { Future kafkaInitClusterRoleBinding() { ClusterRoleBinding desired = kafkaCluster.generateClusterRoleBinding(namespace); - return withVoid(withIgnoreRbacError( - clusterRoleBindingOperations.reconcile( + return withVoid(withIgnoreRbacError(reconciliation, + clusterRoleBindingOperations.reconcile(reconciliation, KafkaResources.initContainerClusterRoleBindingName(name, namespace), desired), desired @@ -1565,7 +1564,7 @@ Future kafkaInitClusterRoleBinding() { } Future kafkaScaleDown() { - return withVoid(kafkaSetOperations.scaleDown(namespace, kafkaCluster.getName(), kafkaCluster.getReplicas())); + return withVoid(kafkaSetOperations.scaleDown(reconciliation, namespace, kafkaCluster.getName(), kafkaCluster.getReplicas())); } /** @@ -1589,20 +1588,20 @@ Future kafkaServices() { List serviceFutures = new ArrayList<>(services.size()); List existingServiceNames = existingServices.stream().map(svc -> svc.getMetadata().getName()).collect(Collectors.toList()); - log.debug("{}: Reconciling existing Services {} against the desired services", reconciliation, existingServiceNames); + LOGGER.debugCr(reconciliation, "Reconciling existing Services {} against the desired services", existingServiceNames); // Update desired services for (Service service : services) { String serviceName = service.getMetadata().getName(); existingServiceNames.remove(serviceName); - serviceFutures.add(serviceOperations.reconcile(namespace, serviceName, service)); + serviceFutures.add(serviceOperations.reconcile(reconciliation, namespace, serviceName, service)); } - log.debug("{}: Services {} should be deleted", reconciliation, existingServiceNames); + LOGGER.debugCr(reconciliation, "Services {} should be deleted", existingServiceNames); // Delete services which match our selector but are not desired anymore for (String serviceName : existingServiceNames) { - serviceFutures.add(serviceOperations.reconcile(namespace, serviceName, null)); + serviceFutures.add(serviceOperations.reconcile(reconciliation, namespace, serviceName, null)); } return CompositeFuture.join(serviceFutures); @@ -1631,20 +1630,20 @@ Future kafkaRoutes() { List routeFutures = new ArrayList<>(routes.size()); List existingRouteNames = existingRoutes.stream().map(route -> route.getMetadata().getName()).collect(Collectors.toList()); - log.debug("{}: Reconciling existing Routes {} against the desired routes", reconciliation, existingRouteNames); + LOGGER.debugCr(reconciliation, "Reconciling existing Routes {} against the desired routes", existingRouteNames); // Update desired routes for (Route route : routes) { String routeName = route.getMetadata().getName(); existingRouteNames.remove(routeName); - routeFutures.add(routeOperations.reconcile(namespace, routeName, route)); + routeFutures.add(routeOperations.reconcile(reconciliation, namespace, routeName, route)); } - log.debug("{}: Routes {} should be deleted", reconciliation, existingRouteNames); + LOGGER.debugCr(reconciliation, "Routes {} should be deleted", existingRouteNames); // Delete routes which match our selector but are not desired anymore for (String routeName : existingRouteNames) { - routeFutures.add(routeOperations.reconcile(namespace, routeName, null)); + routeFutures.add(routeOperations.reconcile(reconciliation, namespace, routeName, null)); } return CompositeFuture.join(routeFutures); @@ -1652,7 +1651,7 @@ Future kafkaRoutes() { return withVoid(fut); } else { - log.warn("{}: The OpenShift route API is not available in this Kubernetes cluster. Exposing Kafka cluster {} using routes is not possible.", reconciliation, name); + LOGGER.warnCr(reconciliation, "The OpenShift route API is not available in this Kubernetes cluster. Exposing Kafka cluster {} using routes is not possible.", name); return withVoid(Future.failedFuture("The OpenShift route API is not available in this Kubernetes cluster. Exposing Kafka cluster " + name + " using routes is not possible.")); } } @@ -1682,20 +1681,20 @@ Future kafkaIngresses() { List ingressFutures = new ArrayList<>(ingresses.size()); List existingIngressNames = existingIngresses.stream().map(ingress -> ingress.getMetadata().getName()).collect(Collectors.toList()); - log.debug("{}: Reconciling existing Ingresses {} against the desired ingresses", reconciliation, existingIngressNames); + LOGGER.debugCr(reconciliation, "Reconciling existing Ingresses {} against the desired ingresses", existingIngressNames); // Update desired ingresses for (Ingress ingress : ingresses) { String ingressName = ingress.getMetadata().getName(); existingIngressNames.remove(ingressName); - ingressFutures.add(ingressOperations.reconcile(namespace, ingressName, ingress)); + ingressFutures.add(ingressOperations.reconcile(reconciliation, namespace, ingressName, ingress)); } - log.debug("{}: Ingresses {} should be deleted", reconciliation, existingIngressNames); + LOGGER.debugCr(reconciliation, "Ingresses {} should be deleted", existingIngressNames); // Delete ingresses which match our selector but are not desired anymore for (String ingressName : existingIngressNames) { - ingressFutures.add(ingressOperations.reconcile(namespace, ingressName, null)); + ingressFutures.add(ingressOperations.reconcile(reconciliation, namespace, ingressName, null)); } return CompositeFuture.join(ingressFutures); @@ -1726,20 +1725,20 @@ Future kafkaIngressesV1Beta1() { List ingressFutures = new ArrayList<>(ingresses.size()); List existingIngressNames = existingIngresses.stream().map(ingress -> ingress.getMetadata().getName()).collect(Collectors.toList()); - log.debug("{}: Reconciling existing v1beta1 Ingresses {} against the desired ingresses", reconciliation, existingIngressNames); + LOGGER.debugCr(reconciliation, "Reconciling existing v1beta1 Ingresses {} against the desired ingresses", existingIngressNames); // Update desired ingresses for (io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress ingress : ingresses) { String ingressName = ingress.getMetadata().getName(); existingIngressNames.remove(ingressName); - ingressFutures.add(ingressV1Beta1Operations.reconcile(namespace, ingressName, ingress)); + ingressFutures.add(ingressV1Beta1Operations.reconcile(reconciliation, namespace, ingressName, ingress)); } - log.debug("{}: V1beta1 ingresses {} should be deleted", reconciliation, existingIngressNames); + LOGGER.debugCr(reconciliation, "V1beta1 ingresses {} should be deleted", existingIngressNames); // Delete ingresses which match our selector but are not desired anymore for (String ingressName : existingIngressNames) { - ingressFutures.add(ingressV1Beta1Operations.reconcile(namespace, ingressName, null)); + ingressFutures.add(ingressV1Beta1Operations.reconcile(reconciliation, namespace, ingressName, null)); } return CompositeFuture.join(ingressFutures); @@ -1756,22 +1755,22 @@ Future kafkaIngressesV1Beta1() { Future kafkaGetClusterId() { return adminClientSecrets() .compose(compositeFuture -> { - log.debug("{}: Attempt to get clusterId", reconciliation); + LOGGER.debugCr(reconciliation, "Attempt to get clusterId"); Promise resultPromise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { Admin kafkaAdmin = null; try { String bootstrapHostname = KafkaResources.bootstrapServiceName(this.name) + "." + this.namespace + ".svc:" + KafkaCluster.REPLICATION_PORT; - log.debug("{}: Creating AdminClient for clusterId using {}", reconciliation, bootstrapHostname); + LOGGER.debugCr(reconciliation, "Creating AdminClient for clusterId using {}", bootstrapHostname); kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, compositeFuture.resultAt(0), compositeFuture.resultAt(1), "cluster-operator"); kafkaStatus.setClusterId(kafkaAdmin.describeCluster().clusterId().get()); } catch (KafkaException e) { - log.warn("{}: Kafka exception getting clusterId {}", reconciliation, e.getMessage()); + LOGGER.warnCr(reconciliation, "Kafka exception getting clusterId {}", e.getMessage()); } catch (InterruptedException e) { - log.warn("{}: Interrupted exception getting clusterId {}", reconciliation, e.getMessage()); + LOGGER.warnCr(reconciliation, "Interrupted exception getting clusterId {}", e.getMessage()); } catch (ExecutionException e) { - log.warn("{}: Execution exception getting clusterId {}", reconciliation, e.getMessage()); + LOGGER.warnCr(reconciliation, "Execution exception getting clusterId {}", e.getMessage()); } finally { if (kafkaAdmin != null) { kafkaAdmin.close(); @@ -1838,7 +1837,7 @@ Future kafkaLoadBalancerServicesReady() { for (GenericKafkaListener listener : loadBalancerListeners) { String bootstrapServiceName = ListenersUtils.backwardsCompatibleBootstrapServiceName(name, listener); - Future perListenerFut = serviceOperations.hasIngressAddress(namespace, bootstrapServiceName, 1_000, operationTimeoutMs) + Future perListenerFut = serviceOperations.hasIngressAddress(reconciliation, namespace, bootstrapServiceName, 1_000, operationTimeoutMs) .compose(res -> serviceOperations.getAsync(namespace, bootstrapServiceName)) .compose(svc -> { String bootstrapAddress; @@ -1849,7 +1848,7 @@ Future kafkaLoadBalancerServicesReady() { bootstrapAddress = svc.getStatus().getLoadBalancer().getIngress().get(0).getIp(); } - log.debug("{}: Found address {} for Service {}", reconciliation, bootstrapAddress, bootstrapServiceName); + LOGGER.debugCr(reconciliation, "Found address {} for Service {}", bootstrapAddress, bootstrapServiceName); kafkaBootstrapDnsName.add(bootstrapAddress); @@ -1869,7 +1868,7 @@ Future kafkaLoadBalancerServicesReady() { for (int pod = 0; pod < kafkaCluster.getReplicas(); pod++) { perPodFutures.add( - serviceOperations.hasIngressAddress(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) + serviceOperations.hasIngressAddress(reconciliation, namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) ); } @@ -1890,7 +1889,7 @@ Future kafkaLoadBalancerServicesReady() { brokerAddress = svc.getStatus().getLoadBalancer().getIngress().get(0).getIp(); } - log.debug("{}: Found address {} for Service {}", reconciliation, brokerAddress, svc.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Found address {} for Service {}", brokerAddress, svc.getMetadata().getName()); kafkaBrokerDnsNames.computeIfAbsent(podNumber, k -> new HashSet<>(2)).add(brokerAddress); @@ -1934,11 +1933,11 @@ Future kafkaNodePortServicesReady() { for (GenericKafkaListener listener : loadBalancerListeners) { String bootstrapServiceName = ListenersUtils.backwardsCompatibleBootstrapServiceName(name, listener); - Future perListenerFut = serviceOperations.hasNodePort(namespace, bootstrapServiceName, 1_000, operationTimeoutMs) + Future perListenerFut = serviceOperations.hasNodePort(reconciliation, namespace, bootstrapServiceName, 1_000, operationTimeoutMs) .compose(res -> serviceOperations.getAsync(namespace, bootstrapServiceName)) .compose(svc -> { Integer externalBootstrapNodePort = svc.getSpec().getPorts().get(0).getNodePort(); - log.debug("{}: Found node port {} for Service {}", reconciliation, externalBootstrapNodePort, bootstrapServiceName); + LOGGER.debugCr(reconciliation, "Found node port {} for Service {}", externalBootstrapNodePort, bootstrapServiceName); kafkaBootstrapNodePorts.put(ListenersUtils.identifier(listener), externalBootstrapNodePort); return Future.succeededFuture(); @@ -1948,7 +1947,7 @@ Future kafkaNodePortServicesReady() { for (int pod = 0; pod < kafkaCluster.getReplicas(); pod++) { perPodFutures.add( - serviceOperations.hasNodePort(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) + serviceOperations.hasNodePort(reconciliation, namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) ); } @@ -1962,7 +1961,7 @@ Future kafkaNodePortServicesReady() { Future perBrokerFut = serviceOperations.getAsync(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener)) .compose(svc -> { Integer externalBrokerNodePort = svc.getSpec().getPorts().get(0).getNodePort(); - log.debug("{}: Found node port {} for Service {}", reconciliation, externalBrokerNodePort, svc.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Found node port {} for Service {}", externalBrokerNodePort, svc.getMetadata().getName()); kafkaAdvertisedPorts.add(kafkaCluster.getAdvertisedPort(listener, podNumber, externalBrokerNodePort)); @@ -2096,11 +2095,11 @@ Future kafkaRoutesReady() { for (GenericKafkaListener listener : routeListeners) { String bootstrapRouteName = ListenersUtils.backwardsCompatibleBootstrapRouteOrIngressName(name, listener); - Future perListenerFut = routeOperations.hasAddress(namespace, bootstrapRouteName, 1_000, operationTimeoutMs) + Future perListenerFut = routeOperations.hasAddress(reconciliation, namespace, bootstrapRouteName, 1_000, operationTimeoutMs) .compose(res -> routeOperations.getAsync(namespace, bootstrapRouteName)) .compose(route -> { String bootstrapAddress = route.getStatus().getIngress().get(0).getHost(); - log.debug("{}: Found address {} for Route {}", reconciliation, bootstrapAddress, bootstrapRouteName); + LOGGER.debugCr(reconciliation, "Found address {} for Route {}", bootstrapAddress, bootstrapRouteName); kafkaBootstrapDnsName.add(bootstrapAddress); @@ -2120,7 +2119,7 @@ Future kafkaRoutesReady() { for (int pod = 0; pod < kafkaCluster.getReplicas(); pod++) { perPodFutures.add( - routeOperations.hasAddress(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) + routeOperations.hasAddress(reconciliation, namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) ); } @@ -2134,7 +2133,7 @@ Future kafkaRoutesReady() { Future perBrokerFut = routeOperations.getAsync(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener)) .compose(route -> { String brokerAddress = route.getStatus().getIngress().get(0).getHost(); - log.debug("{}: Found address {} for Route {}", reconciliation, brokerAddress, route.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Found address {} for Route {}", brokerAddress, route.getMetadata().getName()); kafkaBrokerDnsNames.computeIfAbsent(podNumber, k -> new HashSet<>(2)).add(brokerAddress); @@ -2182,10 +2181,10 @@ Future kafkaIngressesReady() { for (GenericKafkaListener listener : ingressListeners) { String bootstrapIngressName = ListenersUtils.backwardsCompatibleBootstrapRouteOrIngressName(name, listener); - Future perListenerFut = ingressOperations.hasIngressAddress(namespace, bootstrapIngressName, 1_000, operationTimeoutMs) + Future perListenerFut = ingressOperations.hasIngressAddress(reconciliation, namespace, bootstrapIngressName, 1_000, operationTimeoutMs) .compose(res -> { String bootstrapAddress = listener.getConfiguration().getBootstrap().getHost(); - log.debug("{}: Using address {} for Ingress {}", reconciliation, bootstrapAddress, bootstrapIngressName); + LOGGER.debugCr(reconciliation, "Using address {} for Ingress {}", bootstrapAddress, bootstrapIngressName); kafkaBootstrapDnsName.add(bootstrapAddress); @@ -2203,7 +2202,7 @@ Future kafkaIngressesReady() { for (int pod = 0; pod < kafkaCluster.getReplicas(); pod++) { perPodFutures.add( - ingressOperations.hasIngressAddress(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) + ingressOperations.hasIngressAddress(reconciliation, namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) ); } @@ -2217,7 +2216,7 @@ Future kafkaIngressesReady() { .map(GenericKafkaListenerConfigurationBroker::getHost) .findAny() .orElse(null); - log.debug("{}: Using address {} for Ingress {}", reconciliation, brokerAddress, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener)); + LOGGER.debugCr(reconciliation, "Using address {} for Ingress {}", brokerAddress, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener)); kafkaBrokerDnsNames.computeIfAbsent(pod, k -> new HashSet<>(2)).add(brokerAddress); @@ -2260,10 +2259,10 @@ Future kafkaIngressesV1Beta1Ready() { for (GenericKafkaListener listener : ingressListeners) { String bootstrapIngressName = ListenersUtils.backwardsCompatibleBootstrapRouteOrIngressName(name, listener); - Future perListenerFut = ingressV1Beta1Operations.hasIngressAddress(namespace, bootstrapIngressName, 1_000, operationTimeoutMs) + Future perListenerFut = ingressV1Beta1Operations.hasIngressAddress(reconciliation, namespace, bootstrapIngressName, 1_000, operationTimeoutMs) .compose(res -> { String bootstrapAddress = listener.getConfiguration().getBootstrap().getHost(); - log.debug("{}: Using address {} for v1beta1 Ingress {}", reconciliation, bootstrapAddress, bootstrapIngressName); + LOGGER.debugCr(reconciliation, "Using address {} for v1beta1 Ingress {}", bootstrapAddress, bootstrapIngressName); kafkaBootstrapDnsName.add(bootstrapAddress); @@ -2281,7 +2280,7 @@ Future kafkaIngressesV1Beta1Ready() { for (int pod = 0; pod < kafkaCluster.getReplicas(); pod++) { perPodFutures.add( - ingressV1Beta1Operations.hasIngressAddress(namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) + ingressV1Beta1Operations.hasIngressAddress(reconciliation, namespace, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener), 1_000, operationTimeoutMs) ); } @@ -2295,7 +2294,7 @@ Future kafkaIngressesV1Beta1Ready() { .map(GenericKafkaListenerConfigurationBroker::getHost) .findAny() .orElse(null); - log.debug("{}: Using address {} for v1beta1 Ingress {}", reconciliation, brokerAddress, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener)); + LOGGER.debugCr(reconciliation, "Using address {} for v1beta1 Ingress {}", brokerAddress, ListenersUtils.backwardsCompatibleBrokerServiceName(name, pod, listener)); kafkaBrokerDnsNames.computeIfAbsent(pod, k -> new HashSet<>(2)).add(brokerAddress); @@ -2343,7 +2342,7 @@ Future customListenerCertificates() { .map(listener -> listener.getConfiguration().getBrokerCertChainAndKey().getSecretName()) .distinct() .collect(Collectors.toList()); - log.debug("Validating secret {} with custom TLS listener certificates", secretNames); + LOGGER.debugCr(reconciliation, "Validating secret {} with custom TLS listener certificates", secretNames); List secretFutures = new ArrayList<>(secretNames.size()); Map customSecrets = new HashMap<>(secretNames.size()); @@ -2353,7 +2352,7 @@ Future customListenerCertificates() { .compose(secret -> { if (secret != null) { customSecrets.put(secretName, secret); - log.debug("Found secrets {} with custom TLS listener certificate", secretName); + LOGGER.debugCr(reconciliation, "Found secrets {} with custom TLS listener certificate", secretName); } return Future.succeededFuture(); @@ -2393,7 +2392,7 @@ Future customListenerCertificates() { if (errors.isEmpty()) { return Future.succeededFuture(); } else { - log.error("{}: Failed to process Secrets with custom certificates: {}", reconciliation, errors); + LOGGER.errorCr(reconciliation, "Failed to process Secrets with custom certificates: {}", errors); return Future.failedFuture(new InvalidResourceException("Failed to process Secrets with custom certificates: " + errors)); } }); @@ -2412,10 +2411,10 @@ String getCertificateThumbprint(Secret certSecret, CertAndKeySecretSource custom } Future getKafkaAncillaryCm() { - return Util.metricsAndLogging(configMapOperations, namespace, kafkaCluster.getLogging(), kafkaCluster.getMetricsConfigInCm()) + return Util.metricsAndLogging(reconciliation, configMapOperations, namespace, kafkaCluster.getLogging(), kafkaCluster.getMetricsConfigInCm()) .compose(metricsAndLoggingCm -> { ConfigMap brokerCm = kafkaCluster.generateAncillaryConfigMap(metricsAndLoggingCm, kafkaAdvertisedHostnames, kafkaAdvertisedPorts, featureGates.controlPlaneListenerEnabled()); - KafkaConfiguration kc = KafkaConfiguration.unvalidated(kafkaCluster.getBrokersConfiguration()); // has to be after generateAncillaryConfigMap() which generates the configuration + KafkaConfiguration kc = KafkaConfiguration.unvalidated(reconciliation, kafkaCluster.getBrokersConfiguration()); // has to be after generateAncillaryConfigMap() which generates the configuration // if BROKER_ADVERTISED_HOSTNAMES_FILENAME or BROKER_ADVERTISED_PORTS_FILENAME changes, compute a hash and put it into annotation String brokerConfiguration = brokerCm.getData().getOrDefault(KafkaCluster.BROKER_ADVERTISED_HOSTNAMES_FILENAME, ""); @@ -2434,7 +2433,7 @@ Future getKafkaAncillaryCm() { } Future kafkaAncillaryCm() { - return withVoid(getKafkaAncillaryCm().compose(cm -> configMapOperations.reconcile(namespace, kafkaCluster.getAncillaryConfigMapName(), cm))); + return withVoid(getKafkaAncillaryCm().compose(cm -> configMapOperations.reconcile(reconciliation, namespace, kafkaCluster.getAncillaryConfigMapName(), cm))); } Future kafkaBrokersSecret() { @@ -2450,22 +2449,22 @@ Future kafkaJmxSecret() { Future secretFuture = secretOperations.getAsync(namespace, KafkaCluster.jmxSecretName(name)); return secretFuture.compose(res -> { if (res == null) { - return withVoid(secretOperations.reconcile(namespace, KafkaCluster.jmxSecretName(name), + return withVoid(secretOperations.reconcile(reconciliation, namespace, KafkaCluster.jmxSecretName(name), kafkaCluster.generateJmxSecret())); } return withVoid(Future.succeededFuture(this)); }); } - return withVoid(secretOperations.reconcile(namespace, KafkaCluster.jmxSecretName(name), null)); + return withVoid(secretOperations.reconcile(reconciliation, namespace, KafkaCluster.jmxSecretName(name), null)); } Future kafkaNetPolicy() { - return withVoid(networkPolicyOperator.reconcile(namespace, KafkaCluster.networkPolicyName(name), kafkaCluster.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels))); + return withVoid(networkPolicyOperator.reconcile(reconciliation, namespace, KafkaCluster.networkPolicyName(name), kafkaCluster.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels))); } Future kafkaPodDisruptionBudget() { - return withVoid(podDisruptionBudgetOperator.reconcile(namespace, kafkaCluster.getName(), kafkaCluster.generatePodDisruptionBudget())); + return withVoid(podDisruptionBudgetOperator.reconcile(reconciliation, namespace, kafkaCluster.getName(), kafkaCluster.generatePodDisruptionBudget())); } int getPodIndexFromPvcName(String pvcName) { @@ -2493,13 +2492,13 @@ Future maybeResizeReconcilePvcs(List reconcilePvc(desiredPvc).onComplete(resultPromise); } else if (currentPvc.getStatus().getConditions().stream().filter(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH))).findFirst().orElse(null) != null) { // The PVC is Bound but it is already resizing => Nothing to do, we should let it resize - log.debug("{}: The PVC {} is resizing, nothing to do", reconciliation, desiredPvc.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName()); resultPromise.complete(); } else if (currentPvc.getStatus().getConditions().stream().filter(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH))).findFirst().orElse(null) != null) { // The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it String podName = cluster.getPodName(getPodIndexFromPvcName(desiredPvc.getMetadata().getName())); fsResizingRestartRequest.add(podName); - log.info("{}: The PVC {} is waiting for file system resizing and the pod {} needs to be restarted.", reconciliation, desiredPvc.getMetadata().getName(), podName); + LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod {} needs to be restarted.", desiredPvc.getMetadata().getName(), podName); resultPromise.complete(); } else { // The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed @@ -2528,7 +2527,7 @@ Future maybeResizeReconcilePvcs(List Future reconcilePvc(PersistentVolumeClaim desired) { Promise resultPromise = Promise.promise(); - pvcOperations.reconcile(namespace, desired.getMetadata().getName(), desired).onComplete(pvcRes -> { + pvcOperations.reconcile(reconciliation, namespace, desired.getMetadata().getName(), desired).onComplete(pvcRes -> { if (pvcRes.succeeded()) { resultPromise.complete(); } else { @@ -2550,16 +2549,16 @@ Future resizePvc(PersistentVolumeClaim current, PersistentVolumeClaim desi StorageClass sc = scRes.result(); if (sc == null) { - log.warn("{}: Storage Class {} not found. PVC {} cannot be resized. Reconciliation will proceed without reconciling this PVC.", reconciliation, storageClassName, desired.getMetadata().getName()); + LOGGER.warnCr(reconciliation, "Storage Class {} not found. PVC {} cannot be resized. Reconciliation will proceed without reconciling this PVC.", storageClassName, desired.getMetadata().getName()); resultPromise.complete(); } else if (sc.getAllowVolumeExpansion() == null || !sc.getAllowVolumeExpansion()) { // Resizing not suported in SC => do nothing - log.warn("{}: Storage Class {} does not support resizing of volumes. PVC {} cannot be resized. Reconciliation will proceed without reconciling this PVC.", reconciliation, storageClassName, desired.getMetadata().getName()); + LOGGER.warnCr(reconciliation, "Storage Class {} does not support resizing of volumes. PVC {} cannot be resized. Reconciliation will proceed without reconciling this PVC.", storageClassName, desired.getMetadata().getName()); resultPromise.complete(); } else { // Resizing supported by SC => We can reconcile the PVC to have it resized - log.info("{}: Resizing PVC {} from {} to {}.", reconciliation, desired.getMetadata().getName(), current.getStatus().getCapacity().get("storage").getAmount(), desired.getSpec().getResources().getRequests().get("storage").getAmount()); - pvcOperations.reconcile(namespace, desired.getMetadata().getName(), desired).onComplete(pvcRes -> { + LOGGER.infoCr(reconciliation, "Resizing PVC {} from {} to {}.", desired.getMetadata().getName(), current.getStatus().getCapacity().get("storage").getAmount(), desired.getSpec().getResources().getRequests().get("storage").getAmount()); + pvcOperations.reconcile(reconciliation, namespace, desired.getMetadata().getName(), desired).onComplete(pvcRes -> { if (pvcRes.succeeded()) { resultPromise.complete(); } else { @@ -2568,12 +2567,12 @@ Future resizePvc(PersistentVolumeClaim current, PersistentVolumeClaim desi }); } } else { - log.error("{}: Storage Class {} not found. PVC {} cannot be resized.", reconciliation, storageClassName, desired.getMetadata().getName(), scRes.cause()); + LOGGER.errorCr(reconciliation, "Storage Class {} not found. PVC {} cannot be resized.", storageClassName, desired.getMetadata().getName(), scRes.cause()); resultPromise.fail(scRes.cause()); } }); } else { - log.warn("{}: PVC {} does not use any Storage Class and cannot be resized. Reconciliation will proceed without reconciling this PVC.", reconciliation, desired.getMetadata().getName()); + LOGGER.warnCr(reconciliation, "PVC {} does not use any Storage Class and cannot be resized. Reconciliation will proceed without reconciling this PVC.", desired.getMetadata().getName()); resultPromise.complete(); } @@ -2647,7 +2646,7 @@ Future kafkaRollToAddOrRemoveVolumes() { for (Pod pod : pods) { if (!needsRestartBecauseAddedOrRemovedJbodVolumes(pod, jbodStorage, kafkaCurrentReplicas, kafkaCluster.getReplicas()).isEmpty()) { // At least one broker needs rolling update => we can trigger it without checking the other brokers - log.debug("{}: Kafka brokers needs rolling update to add or remove JBOD volumes", reconciliation); + LOGGER.debugCr(reconciliation, "Kafka brokers needs rolling update to add or remove JBOD volumes"); return kafkaSetOperations.getAsync(namespace, KafkaCluster.kafkaClusterName(name)) .compose(sts -> { @@ -2662,7 +2661,7 @@ Future kafkaRollToAddOrRemoveVolumes() { } } - log.debug("{}: No rolling update of Kafka brokers due to added or removed JBOD volumes is needed", reconciliation); + LOGGER.debugCr(reconciliation, "No rolling update of Kafka brokers due to added or removed JBOD volumes is needed"); return withVoid(Future.succeededFuture()); }); } else { @@ -2690,7 +2689,7 @@ private List needsRestartBecauseAddedOrRemovedJbodVolumes(Pod pod, JbodS if (jsonStorage != null) { Storage currentStorage = ModelUtils.decodeStorageFromJson(jsonStorage); - if (new StorageDiff(currentStorage, desiredStorage, currentReplicas, desiredReplicas).isVolumesAddedOrRemoved()) { + if (new StorageDiff(reconciliation, currentStorage, desiredStorage, currentReplicas, desiredReplicas).isVolumesAddedOrRemoved()) { return singletonList("JBOD volumes were added or removed"); } } @@ -2725,7 +2724,7 @@ StatefulSet getKafkaStatefulSet() { } Future kafkaStatefulSet() { - return withKafkaDiff(kafkaSetOperations.reconcile(namespace, kafkaCluster.getName(), getKafkaStatefulSet())); + return withKafkaDiff(kafkaSetOperations.reconcile(reconciliation, namespace, kafkaCluster.getName(), getKafkaStatefulSet())); } Future kafkaRollingUpdate() { @@ -2734,7 +2733,7 @@ Future kafkaRollingUpdate() { } Future kafkaScaleUp() { - return withVoid(kafkaSetOperations.scaleUp(namespace, kafkaCluster.getName(), kafkaCluster.getReplicas())); + return withVoid(kafkaSetOperations.scaleUp(reconciliation, namespace, kafkaCluster.getName(), kafkaCluster.getReplicas())); } Future zkPodsReady() { @@ -2759,19 +2758,19 @@ Future podsReady(AbstractModel model, int replicas) { List podFutures = new ArrayList<>(replicas); for (int i = 0; i < replicas; i++) { - log.debug("{}: Checking readiness of pod {}.", reconciliation, model.getPodName(i)); - podFutures.add(podOperations.readiness(namespace, model.getPodName(i), 1_000, operationTimeoutMs)); + LOGGER.debugCr(reconciliation, "Checking readiness of pod {}.", model.getPodName(i)); + podFutures.add(podOperations.readiness(reconciliation, namespace, model.getPodName(i), 1_000, operationTimeoutMs)); } return withVoid(CompositeFuture.join(podFutures)); } Future kafkaServiceEndpointReady() { - return withVoid(serviceOperations.endpointReadiness(namespace, kafkaCluster.getServiceName(), 1_000, operationTimeoutMs)); + return withVoid(serviceOperations.endpointReadiness(reconciliation, namespace, kafkaCluster.getServiceName(), 1_000, operationTimeoutMs)); } Future kafkaHeadlessServiceEndpointReady() { - return withVoid(serviceOperations.endpointReadiness(namespace, kafkaCluster.getHeadlessServiceName(), 1_000, operationTimeoutMs)); + return withVoid(serviceOperations.endpointReadiness(reconciliation, namespace, kafkaCluster.getHeadlessServiceName(), 1_000, operationTimeoutMs)); } /** @@ -2846,7 +2845,7 @@ Future kafkaManualPodCleaning() { */ Future maybeCleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet sts, List desiredPvcs, Future> existingPvcsFuture) { if (sts != null) { - log.debug("{}: Considering manual cleaning of Pods for StatefulSet {}", reconciliation, sts.getMetadata().getName()); + LOGGER.debugCr(reconciliation, "Considering manual cleaning of Pods for StatefulSet {}", sts.getMetadata().getName()); String stsName = sts.getMetadata().getName(); @@ -2856,7 +2855,7 @@ Future maybeCleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet st if (pod != null) { if (Annotations.booleanAnnotation(pod, AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC, false)) { - log.debug("{}: Pod and PVCs for {} should be deleted based on annotation", reconciliation, podName); + LOGGER.debugCr(reconciliation, "Pod and PVCs for {} should be deleted based on annotation", podName); return existingPvcsFuture .compose(existingPvcs -> { @@ -2914,10 +2913,10 @@ Future cleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet sts, St // We start by deleting the StatefulSet so that it doesn't interfere with the pod deletion process // The deletion has to be non-cascading so that the other pods are not affected - Future fut = stsOperator.deleteAsync(namespace, sts.getMetadata().getName(), false) + Future fut = stsOperator.deleteAsync(reconciliation, namespace, sts.getMetadata().getName(), false) .compose(ignored -> { // After the StatefulSet is deleted, we can delete the pod which was marked for deletion - return podOperations.reconcile(namespace, podName, null); + return podOperations.reconcile(reconciliation, namespace, podName, null); }) .compose(ignored -> { // With the pod deleting, we can delete all the PVCs belonging to this pod @@ -2925,19 +2924,19 @@ Future cleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet sts, St for (PersistentVolumeClaim pvc : deletePvcs) { String pvcName = pvc.getMetadata().getName(); - log.debug("{}: Deleting PVC {} for Pod {} based on {} annotation", reconciliation, pvcName, podName, AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC); - deleteResults.add(pvcOperations.reconcile(namespace, pvcName, null)); + LOGGER.debugCr(reconciliation, "Deleting PVC {} for Pod {} based on {} annotation", pvcName, podName, AbstractScalableResourceOperator.ANNO_STRIMZI_IO_DELETE_POD_AND_PVC); + deleteResults.add(pvcOperations.reconcile(reconciliation, namespace, pvcName, null)); } return CompositeFuture.join(deleteResults); }) .compose(ignored -> { // The pod deletion just triggers it asynchronously // We have to wait for the pod to be actually deleted - log.debug("{}: Checking if Pod {} has been deleted", reconciliation, podName); + LOGGER.debugCr(reconciliation, "Checking if Pod {} has been deleted", podName); - Future waitForDeletion = podOperations.waitFor(namespace, podName, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { + Future waitForDeletion = podOperations.waitFor(reconciliation, namespace, podName, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { Pod deletion = podOperations.get(namespace, podName); - log.trace("Checking if Pod {} in namespace {} has been deleted or recreated", podName, namespace); + LOGGER.traceCr(reconciliation, "Checking if Pod {} in namespace {} has been deleted or recreated", podName, namespace); return deletion == null; }); @@ -2952,11 +2951,11 @@ Future cleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet sts, St String pvcName = pvc.getMetadata().getName(); String uid = pvc.getMetadata().getUid(); - log.debug("{}: Checking if PVC {} for Pod {} has been deleted", reconciliation, pvcName, podName); + LOGGER.debugCr(reconciliation, "Checking if PVC {} for Pod {} has been deleted", pvcName, podName); - Future waitForDeletion = pvcOperations.waitFor(namespace, pvcName, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { + Future waitForDeletion = pvcOperations.waitFor(reconciliation, namespace, pvcName, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { PersistentVolumeClaim deletion = pvcOperations.get(namespace, pvcName); - log.trace("Checking if {} {} in namespace {} has been deleted", pvc.getKind(), pvcName, namespace); + LOGGER.traceCr(reconciliation, "Checking if {} {} in namespace {} has been deleted", pvc.getKind(), pvcName, namespace); return deletion == null || (deletion.getMetadata() != null && !uid.equals(deletion.getMetadata().getUid())); }); @@ -2971,8 +2970,8 @@ Future cleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet sts, St List createResults = new ArrayList<>(createPvcs.size()); for (PersistentVolumeClaim pvc : createPvcs) { - log.debug("{}: Reconciling PVC {} for Pod {} after it was deleted and maybe recreated by the pod", reconciliation, pvc.getMetadata().getName(), podName); - createResults.add(pvcOperations.reconcile(namespace, pvc.getMetadata().getName(), pvc)); + LOGGER.debugCr(reconciliation, "Reconciling PVC {} for Pod {} after it was deleted and maybe recreated by the pod", pvc.getMetadata().getName(), podName); + createResults.add(pvcOperations.reconcile(reconciliation, namespace, pvc.getMetadata().getName(), pvc)); } return CompositeFuture.join(createResults); @@ -2989,9 +2988,9 @@ Future cleanPodAndPvc(StatefulSetOperator stsOperator, StatefulSet sts, St sts.getMetadata().setUid(null); sts.setStatus(null); - return stsOperator.reconcile(namespace, sts.getMetadata().getName(), sts); + return stsOperator.reconcile(reconciliation, namespace, sts.getMetadata().getName(), sts); }) - .compose(ignored -> podOperations.readiness(namespace, podName, pollingIntervalMs, timeoutMs)); + .compose(ignored -> podOperations.readiness(reconciliation, namespace, podName, pollingIntervalMs, timeoutMs)); return fut; } @@ -3066,11 +3065,11 @@ Future persistentClaimDeletion(List maybeDeletePvcs maybeDeletePvcs.removeAll(desiredPvcs); for (String pvcName : maybeDeletePvcs) { - log.debug("{}: Considering PVC {} for deletion", reconciliation, pvcName); + LOGGER.debugCr(reconciliation, "Considering PVC {} for deletion", pvcName); if (Annotations.booleanAnnotation(pvcOperations.get(namespace, pvcName), AbstractModel.ANNO_STRIMZI_IO_DELETE_CLAIM, false)) { - log.debug("{}: Deleting PVC {}", reconciliation, pvcName); - futures.add(pvcOperations.reconcile(namespace, pvcName, null)); + LOGGER.debugCr(reconciliation, "Deleting PVC {}", pvcName); + futures.add(pvcOperations.reconcile(reconciliation, namespace, pvcName, null)); } } @@ -3078,7 +3077,7 @@ Future persistentClaimDeletion(List maybeDeletePvcs } final Future getEntityOperatorDescription() { - this.entityOperator = EntityOperator.fromCrd(kafkaAssembly, versions); + this.entityOperator = EntityOperator.fromCrd(reconciliation, kafkaAssembly, versions); if (entityOperator != null) { EntityTopicOperator topicOperator = entityOperator.getTopicOperator(); @@ -3086,9 +3085,9 @@ final Future getEntityOperatorDescription() { return CompositeFuture.join( topicOperator == null ? Future.succeededFuture(null) : - Util.metricsAndLogging(configMapOperations, kafkaAssembly.getMetadata().getNamespace(), topicOperator.getLogging(), null), + Util.metricsAndLogging(reconciliation, configMapOperations, kafkaAssembly.getMetadata().getNamespace(), topicOperator.getLogging(), null), userOperator == null ? Future.succeededFuture(null) : - Util.metricsAndLogging(configMapOperations, kafkaAssembly.getMetadata().getNamespace(), userOperator.getLogging(), null)) + Util.metricsAndLogging(reconciliation, configMapOperations, kafkaAssembly.getMetadata().getNamespace(), userOperator.getLogging(), null)) .compose(res -> { MetricsAndLogging toMetricsAndLogging = res.resultAt(0); MetricsAndLogging uoMetricsAndLogging = res.resultAt(1); @@ -3118,7 +3117,7 @@ Future entityOperatorRole() { role = null; } - return withVoid(roleOperations.reconcile( + return withVoid(roleOperations.reconcile(reconciliation, namespace, EntityOperator.getRoleName(name), role)); @@ -3138,7 +3137,7 @@ Future entityTopicOperatorRole() { final Future> topicWatchedNamespaceFuture; if (!namespace.equals(topicWatchedNamespace)) { - topicWatchedNamespaceFuture = roleOperations.reconcile( + topicWatchedNamespaceFuture = roleOperations.reconcile(reconciliation, topicWatchedNamespace, EntityOperator.getRoleName(name), entityOperator.generateRole(namespace, topicWatchedNamespace)); @@ -3163,7 +3162,7 @@ Future entityUserOperatorRole() { final Future> userWatchedNamespaceFuture; if (!namespace.equals(userWatchedNamespace)) { - userWatchedNamespaceFuture = roleOperations.reconcile( + userWatchedNamespaceFuture = roleOperations.reconcile(reconciliation, userWatchedNamespace, EntityOperator.getRoleName(name), entityOperator.generateRole(namespace, userWatchedNamespace)); @@ -3175,7 +3174,7 @@ Future entityUserOperatorRole() { } Future entityOperatorServiceAccount() { - return withVoid(serviceAccountOperations.reconcile(namespace, + return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace, EntityOperator.entityOperatorServiceAccountName(name), isEntityOperatorDeployed() ? entityOperator.generateServiceAccount() : null)); } @@ -3191,8 +3190,8 @@ Future entityOperatorTopicOpRoleBindingForRole() { // or if the topic operator needs to watch a different namespace if (!isEntityOperatorDeployed() || entityOperator.getTopicOperator() == null) { - log.debug("entityOperatorTopicOpRoleBindingForRole not required"); - return withVoid(roleBindingOperations.reconcile( + LOGGER.debugCr(reconciliation, "entityOperatorTopicOpRoleBindingForRole not required"); + return withVoid(roleBindingOperations.reconcile(reconciliation, namespace, EntityTopicOperator.roleBindingForRoleName(name), null)); @@ -3211,7 +3210,7 @@ Future entityOperatorTopicOpRoleBindingForRole() { final Future> watchedNamespaceFuture; if (!namespace.equals(watchedNamespace)) { - watchedNamespaceFuture = roleBindingOperations.reconcile( + watchedNamespaceFuture = roleBindingOperations.reconcile(reconciliation, watchedNamespace, EntityTopicOperator.roleBindingForRoleName(name), entityOperator.getTopicOperator().generateRoleBindingForRole(namespace, watchedNamespace)); @@ -3220,7 +3219,7 @@ Future entityOperatorTopicOpRoleBindingForRole() { } // Create role binding for the the UI runs in (it needs to access the CA etc.) - Future> ownNamespaceFuture = roleBindingOperations.reconcile( + Future> ownNamespaceFuture = roleBindingOperations.reconcile(reconciliation, namespace, EntityTopicOperator.roleBindingForRoleName(name), entityOperator.getTopicOperator().generateRoleBindingForRole(namespace, namespace)); @@ -3233,8 +3232,8 @@ Future entityOperatorUserOpRoleBindingForRole() { // or if the user operator needs to watch a different namespace if (!isEntityOperatorDeployed() || entityOperator.getUserOperator() == null) { - log.debug("entityOperatorUserOpRoleBindingForRole not required"); - return withVoid(roleBindingOperations.reconcile( + LOGGER.debugCr(reconciliation, "entityOperatorUserOpRoleBindingForRole not required"); + return withVoid(roleBindingOperations.reconcile(reconciliation, namespace, EntityUserOperator.roleBindingForRoleName(name), null)); @@ -3252,7 +3251,7 @@ Future entityOperatorUserOpRoleBindingForRole() { } if (!namespace.equals(watchedNamespace)) { - watchedNamespaceFuture = roleBindingOperations.reconcile( + watchedNamespaceFuture = roleBindingOperations.reconcile(reconciliation, watchedNamespace, EntityUserOperator.roleBindingForRoleName(name), entityOperator.getUserOperator().generateRoleBindingForRole(namespace, watchedNamespace)); @@ -3261,7 +3260,7 @@ Future entityOperatorUserOpRoleBindingForRole() { } // Create role binding for the the UI runs in (it needs to access the CA etc.) - ownNamespaceFuture = roleBindingOperations.reconcile( + ownNamespaceFuture = roleBindingOperations.reconcile(reconciliation, namespace, EntityUserOperator.roleBindingForRoleName(name), entityOperator.getUserOperator().generateRoleBindingForRole(namespace, namespace)); @@ -3271,14 +3270,14 @@ Future entityOperatorUserOpRoleBindingForRole() { } Future entityOperatorTopicOpAncillaryCm() { - return withVoid(configMapOperations.reconcile(namespace, + return withVoid(configMapOperations.reconcile(reconciliation, namespace, isEntityOperatorDeployed() && entityOperator.getTopicOperator() != null ? entityOperator.getTopicOperator().getAncillaryConfigMapName() : EntityTopicOperator.metricAndLogConfigsName(name), topicOperatorMetricsAndLogsConfigMap)); } Future entityOperatorUserOpAncillaryCm() { - return withVoid(configMapOperations.reconcile(namespace, + return withVoid(configMapOperations.reconcile(reconciliation, namespace, isEntityOperatorDeployed() && entityOperator.getUserOperator() != null ? entityOperator.getUserOperator().getAncillaryConfigMapName() : EntityUserOperator.metricAndLogConfigsName(name), userOperatorMetricsAndLogsConfigMap)); @@ -3296,7 +3295,7 @@ Future entityOperatorDeployment() { Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(clusterCaCertGeneration)); Annotations.annotations(eoDeployment.getSpec().getTemplate()).put( Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, String.valueOf(clientsCaCertGeneration)); - return deploymentOperations.reconcile(namespace, EntityOperator.entityOperatorName(name), eoDeployment); + return deploymentOperations.reconcile(reconciliation, namespace, EntityOperator.entityOperatorName(name), eoDeployment); }).compose(recon -> { if (recon instanceof ReconcileResult.Noop) { // Lets check if we need to roll the deployment manually @@ -3309,21 +3308,21 @@ Future entityOperatorDeployment() { return Future.succeededFuture(this); }); } else { - return withVoid(deploymentOperations.reconcile(namespace, EntityOperator.entityOperatorName(name), null)); + return withVoid(deploymentOperations.reconcile(reconciliation, namespace, EntityOperator.entityOperatorName(name), null)); } } Future entityOperatorRollingUpdate() { - return withVoid(deploymentOperations.rollingUpdate(namespace, EntityOperator.entityOperatorName(name), operationTimeoutMs)); + return withVoid(deploymentOperations.rollingUpdate(reconciliation, namespace, EntityOperator.entityOperatorName(name), operationTimeoutMs)); } Future entityOperatorReady() { if (this.entityOperator != null && isEntityOperatorDeployed()) { Future future = deploymentOperations.getAsync(namespace, this.entityOperator.getName()); return future.compose(dep -> { - return withVoid(deploymentOperations.waitForObserved(namespace, this.entityOperator.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.waitForObserved(reconciliation, namespace, this.entityOperator.getName(), 1_000, operationTimeoutMs)); }).compose(dep -> { - return withVoid(deploymentOperations.readiness(namespace, this.entityOperator.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.readiness(reconciliation, namespace, this.entityOperator.getName(), 1_000, operationTimeoutMs)); }).map(i -> this); } return withVoid(Future.succeededFuture()); @@ -3340,7 +3339,7 @@ Future entityOperatorSecret(Supplier dateSupplier) { private boolean isPodUpToDate(StatefulSet sts, Pod pod) { final int stsGeneration = StatefulSetOperator.getStsGeneration(sts); final int podGeneration = StatefulSetOperator.getPodGeneration(pod); - log.debug("Rolling update of {}/{}: pod {} has {}={}; sts has {}={}", + LOGGER.debugCr(reconciliation, "Rolling update of {}/{}: pod {} has {}={}; sts has {}={}", sts.getMetadata().getNamespace(), sts.getMetadata().getName(), pod.getMetadata().getName(), StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, podGeneration, StatefulSetOperator.ANNO_STRIMZI_IO_GENERATION, stsGeneration); @@ -3348,9 +3347,9 @@ private boolean isPodUpToDate(StatefulSet sts, Pod pod) { } private final Future getCruiseControlDescription() { - CruiseControl cruiseControl = CruiseControl.fromCrd(kafkaAssembly, versions); + CruiseControl cruiseControl = CruiseControl.fromCrd(reconciliation, kafkaAssembly, versions); if (cruiseControl != null) { - Util.metricsAndLogging(configMapOperations, kafkaAssembly.getMetadata().getNamespace(), + Util.metricsAndLogging(reconciliation, configMapOperations, kafkaAssembly.getMetadata().getNamespace(), cruiseControl.getLogging(), cruiseControl.getMetricsConfigInCm()) .compose(metricsAndLogging -> { ConfigMap logAndMetricsConfigMap = cruiseControl.generateMetricsAndLogConfigMap(metricsAndLogging); @@ -3368,13 +3367,13 @@ private final Future getCruiseControlDescription() { } Future cruiseControlServiceAccount() { - return withVoid(serviceAccountOperations.reconcile(namespace, + return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace, CruiseControl.cruiseControlServiceAccountName(name), ccDeployment != null ? cruiseControl.generateServiceAccount() : null)); } Future cruiseControlAncillaryCm() { - return withVoid(configMapOperations.reconcile(namespace, + return withVoid(configMapOperations.reconcile(reconciliation, namespace, ccDeployment != null && cruiseControl != null ? cruiseControl.getAncillaryConfigMapName() : CruiseControl.metricAndLogConfigsName(name), cruiseControlMetricsAndLogsConfigMap)); @@ -3392,7 +3391,7 @@ Future cruiseControlDeployment() { if (this.cruiseControl != null && ccDeployment != null) { Future future = deploymentOperations.getAsync(namespace, this.cruiseControl.getName()); return future.compose(dep -> { - return deploymentOperations.reconcile(namespace, this.cruiseControl.getName(), ccDeployment); + return deploymentOperations.reconcile(reconciliation, namespace, this.cruiseControl.getName(), ccDeployment); }).compose(recon -> { if (recon instanceof ReconcileResult.Noop) { // Lets check if we need to roll the deployment manually @@ -3405,32 +3404,32 @@ Future cruiseControlDeployment() { return Future.succeededFuture(this); }); } else { - return withVoid(deploymentOperations.reconcile(namespace, CruiseControl.cruiseControlName(name), null)); + return withVoid(deploymentOperations.reconcile(reconciliation, namespace, CruiseControl.cruiseControlName(name), null)); } } Future cruiseControlRollingUpdate() { - return withVoid(deploymentOperations.rollingUpdate(namespace, CruiseControl.cruiseControlName(name), operationTimeoutMs)); + return withVoid(deploymentOperations.rollingUpdate(reconciliation, namespace, CruiseControl.cruiseControlName(name), operationTimeoutMs)); } Future cruiseControlService() { - return withVoid(serviceOperations.reconcile(namespace, CruiseControl.cruiseControlServiceName(name), cruiseControl != null ? cruiseControl.generateService() : null)); + return withVoid(serviceOperations.reconcile(reconciliation, namespace, CruiseControl.cruiseControlServiceName(name), cruiseControl != null ? cruiseControl.generateService() : null)); } Future cruiseControlReady() { if (this.cruiseControl != null && ccDeployment != null) { Future future = deploymentOperations.getAsync(namespace, this.cruiseControl.getName()); return future.compose(dep -> { - return withVoid(deploymentOperations.waitForObserved(namespace, this.cruiseControl.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.waitForObserved(reconciliation, namespace, this.cruiseControl.getName(), 1_000, operationTimeoutMs)); }).compose(dep -> { - return withVoid(deploymentOperations.readiness(namespace, this.cruiseControl.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.readiness(reconciliation, namespace, this.cruiseControl.getName(), 1_000, operationTimeoutMs)); }).map(i -> this); } return withVoid(Future.succeededFuture()); } Future cruiseControlNetPolicy() { - return withVoid(networkPolicyOperator.reconcile(namespace, CruiseControl.policyName(name), + return withVoid(networkPolicyOperator.reconcile(reconciliation, namespace, CruiseControl.policyName(name), cruiseControl != null ? cruiseControl.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels) : null)); } @@ -3445,7 +3444,7 @@ private boolean isPodCaCertUpToDate(Pod pod, Ca ca) { private boolean isCustomCertUpToDate(StatefulSet sts, Pod pod, String annotation) { final String stsThumbprint = Annotations.stringAnnotation(sts.getSpec().getTemplate(), annotation, ""); final String podThumbprint = Annotations.stringAnnotation(pod, annotation, ""); - log.debug("Rolling update of {}/{}: pod {} has {}={}; sts has {}={}", + LOGGER.debugCr(reconciliation, "Rolling update of {}/{}: pod {} has {}={}; sts has {}={}", sts.getMetadata().getNamespace(), sts.getMetadata().getName(), pod.getMetadata().getName(), annotation, podThumbprint, annotation, stsThumbprint); @@ -3496,8 +3495,8 @@ private List getReasonsToRestartPod(StatefulSet sts, Pod pod, reasons.add("server certificates changed"); } if (!reasons.isEmpty()) { - log.debug("{}: Rolling pod {} due to {}", - reconciliation, pod.getMetadata().getName(), reasons); + LOGGER.debugCr(reconciliation, "Rolling pod {} due to {}", + pod.getMetadata().getName(), reasons); } return reasons; } @@ -3522,7 +3521,7 @@ private boolean isMaintenanceTimeWindowsSatisfied(Supplier dateSupplier) { } return isSatisfiedBy; } catch (ParseException e) { - log.warn("The provided maintenance time windows list contains {} which is not a valid cron expression", currentCron); + LOGGER.warnCr(reconciliation, "The provided maintenance time windows list contains {} which is not a valid cron expression", currentCron); return false; } } @@ -3562,11 +3561,11 @@ Future clusterOperatorSecret(Supplier dateSupplier) { .withController(false) .build(); - Secret secret = ModelUtils.buildSecret(clusterCa, clusterCa.clusterOperatorSecret(), namespace, + Secret secret = ModelUtils.buildSecret(reconciliation, clusterCa, clusterCa.clusterOperatorSecret(), namespace, ClusterOperator.secretName(name), "cluster-operator", "cluster-operator", labels, ownerRef, isMaintenanceTimeWindowsSatisfied(dateSupplier)); - return withVoid(secretOperations.reconcile(namespace, ClusterOperator.secretName(name), + return withVoid(secretOperations.reconcile(reconciliation, namespace, ClusterOperator.secretName(name), secret)); } @@ -3603,7 +3602,7 @@ void addListenerStatus(ListenerStatus ls) { Future kafkaCustomCertificatesToStatus() { for (GenericKafkaListener listener : kafkaCluster.getListeners()) { if (listener.isTls()) { - log.debug("Adding certificate to status for listener: {}", listener.getName()); + LOGGER.debugCr(reconciliation, "Adding certificate to status for listener: {}", listener.getName()); addCertificateToListener(listener.getName(), customListenerCertificates.get(listener.getName())); } } @@ -3637,13 +3636,13 @@ String getInternalServiceHostname(String serviceName, boolean useServiceDnsDomai } private final Future getKafkaExporterDescription() { - this.kafkaExporter = KafkaExporter.fromCrd(kafkaAssembly, versions); + this.kafkaExporter = KafkaExporter.fromCrd(reconciliation, kafkaAssembly, versions); this.exporterDeployment = kafkaExporter.generateDeployment(pfa.isOpenshift(), imagePullPolicy, imagePullSecrets); return Future.succeededFuture(this); } Future kafkaExporterServiceAccount() { - return withVoid(serviceAccountOperations.reconcile(namespace, + return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace, KafkaExporter.containerServiceAccountName(name), exporterDeployment != null ? kafkaExporter.generateServiceAccount() : null)); } @@ -3664,7 +3663,7 @@ Future kafkaExporterDeployment() { int caCertGeneration = getCaCertGeneration(this.clusterCa); Annotations.annotations(exporterDeployment.getSpec().getTemplate()).put( Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(caCertGeneration)); - return deploymentOperations.reconcile(namespace, KafkaExporter.kafkaExporterName(name), exporterDeployment); + return deploymentOperations.reconcile(reconciliation, namespace, KafkaExporter.kafkaExporterName(name), exporterDeployment); }) .compose(recon -> { if (recon instanceof ReconcileResult.Noop) { @@ -3678,21 +3677,21 @@ Future kafkaExporterDeployment() { return Future.succeededFuture(this); }); } else { - return withVoid(deploymentOperations.reconcile(namespace, KafkaExporter.kafkaExporterName(name), null)); + return withVoid(deploymentOperations.reconcile(reconciliation, namespace, KafkaExporter.kafkaExporterName(name), null)); } } Future kafkaExporterRollingUpdate() { - return withVoid(deploymentOperations.rollingUpdate(namespace, KafkaExporter.kafkaExporterName(name), operationTimeoutMs)); + return withVoid(deploymentOperations.rollingUpdate(reconciliation, namespace, KafkaExporter.kafkaExporterName(name), operationTimeoutMs)); } Future kafkaExporterReady() { if (this.kafkaExporter != null && exporterDeployment != null) { Future future = deploymentOperations.getAsync(namespace, this.kafkaExporter.getName()); return future.compose(dep -> { - return withVoid(deploymentOperations.waitForObserved(namespace, this.kafkaExporter.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.waitForObserved(reconciliation, namespace, this.kafkaExporter.getName(), 1_000, operationTimeoutMs)); }).compose(dep -> { - return withVoid(deploymentOperations.readiness(namespace, this.kafkaExporter.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.readiness(reconciliation, namespace, this.kafkaExporter.getName(), 1_000, operationTimeoutMs)); }).map(i -> this); } return withVoid(Future.succeededFuture()); @@ -3701,7 +3700,7 @@ Future kafkaExporterReady() { Future getJmxTransDescription() { try { int numOfBrokers = kafkaCluster.getReplicas(); - this.jmxTrans = JmxTrans.fromCrd(kafkaAssembly, versions); + this.jmxTrans = JmxTrans.fromCrd(reconciliation, kafkaAssembly, versions); if (this.jmxTrans != null) { this.jmxTransConfigMap = jmxTrans.generateJmxTransConfigMap(kafkaAssembly.getSpec().getJmxTrans(), numOfBrokers); this.jmxTransDeployment = jmxTrans.generateDeployment(imagePullPolicy, imagePullSecrets); @@ -3714,14 +3713,14 @@ Future getJmxTransDescription() { } Future jmxTransConfigMap() { - return withVoid(configMapOperations.reconcile(namespace, + return withVoid(configMapOperations.reconcile(reconciliation, namespace, JmxTrans.jmxTransConfigName(name), jmxTransConfigMap)); } Future jmxTransServiceAccount() { - return withVoid(serviceAccountOperations.reconcile(namespace, + return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace, JmxTrans.containerServiceAccountName(name), jmxTrans != null ? jmxTrans.generateServiceAccount() : null)); } @@ -3737,12 +3736,12 @@ Future jmxTransDeployment() { Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(caCertGeneration)); Annotations.annotations(jmxTransDeployment.getSpec().getTemplate()).put( JmxTrans.CONFIG_MAP_ANNOTATION_KEY, resourceVersion); - return withVoid(deploymentOperations.reconcile(namespace, JmxTrans.jmxTransName(name), + return withVoid(deploymentOperations.reconcile(reconciliation, namespace, JmxTrans.jmxTransName(name), jmxTransDeployment)); }); }); } else { - return withVoid(deploymentOperations.reconcile(namespace, JmxTrans.jmxTransName(name), null)); + return withVoid(deploymentOperations.reconcile(reconciliation, namespace, JmxTrans.jmxTransName(name), null)); } } @@ -3750,9 +3749,9 @@ Future jmxTransDeploymentReady() { if (this.jmxTrans != null && jmxTransDeployment != null) { Future future = deploymentOperations.getAsync(namespace, this.jmxTrans.getName()); return future.compose(dep -> { - return withVoid(deploymentOperations.waitForObserved(namespace, this.jmxTrans.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.waitForObserved(reconciliation, namespace, this.jmxTrans.getName(), 1_000, operationTimeoutMs)); }).compose(dep -> { - return withVoid(deploymentOperations.readiness(namespace, this.jmxTrans.getName(), 1_000, operationTimeoutMs)); + return withVoid(deploymentOperations.readiness(reconciliation, namespace, this.jmxTrans.getName(), 1_000, operationTimeoutMs)); }).map(i -> this); } return withVoid(Future.succeededFuture()); @@ -3777,7 +3776,7 @@ protected KafkaStatus createStatus() { */ @Override protected Future delete(Reconciliation reconciliation) { - return withIgnoreRbacError(clusterRoleBindingOperations.reconcile(KafkaResources.initContainerClusterRoleBindingName(reconciliation.name(), reconciliation.namespace()), null), null) + return withIgnoreRbacError(reconciliation, clusterRoleBindingOperations.reconcile(reconciliation, KafkaResources.initContainerClusterRoleBindingName(reconciliation.name(), reconciliation.namespace()), null), null) .map(Boolean.FALSE); // Return FALSE since other resources are still deleted by garbage collection } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperator.java index 12abde1245..8bce72a5de 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperator.java @@ -20,6 +20,7 @@ import io.strimzi.operator.cluster.model.KafkaBridgeCluster; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.PasswordGenerator; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.ReconciliationException; @@ -31,8 +32,6 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Collections; @@ -43,7 +42,7 @@ * */ public class KafkaBridgeAssemblyOperator extends AbstractAssemblyOperator, KafkaBridgeSpec, KafkaBridgeStatus> { - private static final Logger log = LogManager.getLogger(KafkaBridgeAssemblyOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaBridgeAssemblyOperator.class.getName()); private final DeploymentOperator deploymentOperations; private final KafkaVersion.Lookup versions; @@ -73,8 +72,9 @@ protected Future createOrUpdate(Reconciliation reconciliation KafkaBridgeCluster bridge; try { - bridge = KafkaBridgeCluster.fromCrd(assemblyResource, versions); + bridge = KafkaBridgeCluster.fromCrd(reconciliation, assemblyResource, versions); } catch (Exception e) { + LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, Future.failedFuture(e)); return Future.failedFuture(new ReconciliationException(kafkaBridgeStatus, e)); } @@ -82,17 +82,17 @@ protected Future createOrUpdate(Reconciliation reconciliation Promise createOrUpdatePromise = Promise.promise(); boolean bridgeHasZeroReplicas = bridge.getReplicas() == 0; - log.debug("{}: Updating Kafka Bridge cluster", reconciliation); - kafkaBridgeServiceAccount(namespace, bridge) - .compose(i -> deploymentOperations.scaleDown(namespace, bridge.getName(), bridge.getReplicas())) - .compose(scale -> serviceOperations.reconcile(namespace, bridge.getServiceName(), bridge.generateService())) - .compose(i -> Util.metricsAndLogging(configMapOperations, namespace, bridge.getLogging(), null)) - .compose(metricsAndLogging -> configMapOperations.reconcile(namespace, bridge.getAncillaryConfigMapName(), bridge.generateMetricsAndLogConfigMap(metricsAndLogging))) - .compose(i -> podDisruptionBudgetOperator.reconcile(namespace, bridge.getName(), bridge.generatePodDisruptionBudget())) - .compose(i -> deploymentOperations.reconcile(namespace, bridge.getName(), bridge.generateDeployment(Collections.emptyMap(), pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) - .compose(i -> deploymentOperations.scaleUp(namespace, bridge.getName(), bridge.getReplicas())) - .compose(i -> deploymentOperations.waitForObserved(namespace, bridge.getName(), 1_000, operationTimeoutMs)) - .compose(i -> bridgeHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(namespace, bridge.getName(), 1_000, operationTimeoutMs)) + LOGGER.debugCr(reconciliation, "Updating Kafka Bridge cluster"); + kafkaBridgeServiceAccount(reconciliation, namespace, bridge) + .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, bridge.getName(), bridge.getReplicas())) + .compose(scale -> serviceOperations.reconcile(reconciliation, namespace, bridge.getServiceName(), bridge.generateService())) + .compose(i -> Util.metricsAndLogging(reconciliation, configMapOperations, namespace, bridge.getLogging(), null)) + .compose(metricsAndLogging -> configMapOperations.reconcile(reconciliation, namespace, bridge.getAncillaryConfigMapName(), bridge.generateMetricsAndLogConfigMap(metricsAndLogging))) + .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, bridge.getName(), bridge.generatePodDisruptionBudget())) + .compose(i -> deploymentOperations.reconcile(reconciliation, namespace, bridge.getName(), bridge.generateDeployment(Collections.emptyMap(), pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) + .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, bridge.getName(), bridge.getReplicas())) + .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, bridge.getName(), 1_000, operationTimeoutMs)) + .compose(i -> bridgeHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, bridge.getName(), 1_000, operationTimeoutMs)) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaBridgeStatus, reconciliationResult.mapEmpty()); if (!bridgeHasZeroReplicas) { @@ -121,8 +121,8 @@ protected KafkaBridgeStatus createStatus() { return new KafkaBridgeStatus(); } - Future> kafkaBridgeServiceAccount(String namespace, KafkaBridgeCluster bridge) { - return serviceAccountOperations.reconcile(namespace, + Future> kafkaBridgeServiceAccount(Reconciliation reconciliation, String namespace, KafkaBridgeCluster bridge) { + return serviceAccountOperations.reconcile(reconciliation, namespace, KafkaBridgeResources.serviceAccountName(bridge.getCluster()), bridge.generateServiceAccount()); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApi.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApi.java index 0733e1e9bf..9155679c5e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApi.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApi.java @@ -6,6 +6,7 @@ import io.strimzi.api.kafka.model.connect.ConnectorPlugin; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.OrderedProperties; import io.vertx.core.Future; import io.vertx.core.http.HttpClientResponse; @@ -20,6 +21,7 @@ public interface KafkaConnectApi { /** * Make a {@code PUT} request to {@code /connectors/${connectorName}/config}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to create or update. @@ -27,51 +29,56 @@ public interface KafkaConnectApi { * @return A Future which completes with the result of the request. If the request was successful, * this returns information about the connector, including its name, config and tasks. */ - Future> createOrUpdatePutRequest(String host, int port, String connectorName, JsonObject configJson); + Future> createOrUpdatePutRequest(Reconciliation reconciliation, String host, int port, String connectorName, JsonObject configJson); /** * Make a {@code GET} request to {@code /connectors/${connectorName}/config}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to get the config of. * @return A Future which completes with the result of the request. If the request was successful, * this returns the connector's config. */ - Future> getConnectorConfig(String host, int port, String connectorName); + Future> getConnectorConfig(Reconciliation reconciliation, String host, int port, String connectorName); - Future> getConnectorConfig(BackOff backOff, String host, int port, String connectorName); + Future> getConnectorConfig(Reconciliation reconciliation, BackOff backOff, String host, int port, String connectorName); /** * Make a {@code GET} request to {@code /connectors/${connectorName}}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to create or update. * @return A Future which completes with the result of the request. If the request was successful, * this returns information about the connector, including its name, config and tasks. */ - Future> getConnector(String host, int port, String connectorName); + Future> getConnector(Reconciliation reconciliation, String host, int port, String connectorName); /** * Make a {@code DELETE} request to {@code /connectors/${connectorName}}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to delete. * @return A Future which completes with the result of the request. */ - Future delete(String host, int port, String connectorName); + Future delete(Reconciliation reconciliation, String host, int port, String connectorName); /** * Make a {@code GET} request to {@code /connectors/${connectorName}/status}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to get the status of. * @return A Future which completes with the result of the request. If the request was successful, * this returns the connector's status. */ - Future> status(String host, int port, String connectorName); + Future> status(Reconciliation reconciliation, String host, int port, String connectorName); /** * Make a {@code GET} request to {@code /connectors/${connectorName}/status}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to get the status of. @@ -79,10 +86,11 @@ public interface KafkaConnectApi { * @return A Future which completes with the result of the request. If the request was successful, * this returns the connector's status. */ - Future> status(String host, int port, String connectorName, Set okStatusCodes); + Future> status(Reconciliation reconciliation, String host, int port, String connectorName, Set okStatusCodes); /** * Make a {@code GET} request to {@code /connectors/${connectorName}/status}, retrying according to {@code backoff}. + * @param reconciliation The reconciliation * @param backOff The backoff parameters. * @param host The host to make the request to. * @param port The port to make the request to. @@ -90,7 +98,7 @@ public interface KafkaConnectApi { * @return A Future which completes with the result of the request. If the request was successful, * this returns the connector's status. */ - Future> statusWithBackOff(BackOff backOff, String host, int port, String connectorName); + Future> statusWithBackOff(Reconciliation reconciliation, BackOff backOff, String host, int port, String connectorName); /** * Make a {@code PUT} request to {@code /connectors/${connectorName}/pause}. @@ -121,15 +129,17 @@ public interface KafkaConnectApi { /** * Make a {@code GET} request to {@code /connector-plugins}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @return A Future which completes with the result of the request. If the request was successful, * this returns the list of connector plugins. */ - Future> listConnectorPlugins(String host, int port); + Future> listConnectorPlugins(Reconciliation reconciliation, String host, int port); /** * Make a {@code GET} request to {@code /admin/loggers/$logger}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param desiredLogging Desired logging. @@ -137,16 +147,17 @@ public interface KafkaConnectApi { * @return A Future which completes with the result of the request. If the request was successful, * this returns the list of connector loggers. */ - Future updateConnectLoggers(String host, int port, String desiredLogging, OrderedProperties defaultLogging); + Future updateConnectLoggers(Reconciliation reconciliation, String host, int port, String desiredLogging, OrderedProperties defaultLogging); /** * Make a {@code GET} request to {@code /admin/loggers}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @return A Future which completes with the result of the request. If the request was successful, * this returns the list of connect loggers. */ - Future>> listConnectLoggers(String host, int port); + Future>> listConnectLoggers(Reconciliation reconciliation, String host, int port); /** * Make a {@code POST} request to {@code /connectors/${connectorName}/restart}. @@ -169,13 +180,14 @@ public interface KafkaConnectApi { /** * Make a {@code GET} request to {@code /connectors/${connectorName}/topics}. + * @param reconciliation The reconciliation * @param host The host to make the request to. * @param port The port to make the request to. * @param connectorName The name of the connector to get the status of. * @return A Future which completes with the result of the request. If the request was successful, * this returns the connector's topics. */ - Future> getConnectorTopics(String host, int port, String connectorName); + Future> getConnectorTopics(Reconciliation reconciliation, String host, int port, String connectorName); } class ConnectRestException extends RuntimeException { diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiImpl.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiImpl.java index 72e0ec9002..ddaa02360f 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiImpl.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiImpl.java @@ -10,6 +10,8 @@ import io.strimzi.api.kafka.model.connect.ConnectorPlugin; import io.strimzi.operator.cluster.operator.resource.HttpClientUtils; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.OrderedProperties; import io.vertx.core.Future; @@ -21,8 +23,6 @@ import io.vertx.core.http.HttpMethod; import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.IOException; import java.util.ArrayList; @@ -39,7 +39,7 @@ @SuppressWarnings({"deprecation"}) class KafkaConnectApiImpl implements KafkaConnectApi { - private static final Logger log = LogManager.getLogger(KafkaConnectApiImpl.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaConnectApiImpl.class); public static final TypeReference> TREE_TYPE = new TypeReference>() { }; public static final TypeReference> MAP_OF_STRINGS = new TypeReference>() { @@ -58,11 +58,12 @@ public KafkaConnectApiImpl(Vertx vertx) { @Override @SuppressWarnings("unchecked") public Future> createOrUpdatePutRequest( + Reconciliation reconciliation, String host, int port, String connectorName, JsonObject configJson) { Buffer data = configJson.toBuffer(); String path = "/connectors/" + connectorName + "/config"; - log.debug("Making PUT request to {} with body {}", path, configJson); + LOGGER.debugCr(reconciliation, "Making PUT request to {} with body {}", path, configJson); return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.PUT, port, host, path, request -> { if (request.succeeded()) { @@ -77,7 +78,7 @@ public Future> createOrUpdatePutRequest( response.result().bodyHandler(buffer -> { try { Map t = mapper.readValue(buffer.getBytes(), Map.class); - log.debug("Got {} response to PUT request to {}: {}", response.result().statusCode(), path, t); + LOGGER.debugCr(reconciliation, "Got {} response to PUT request to {}: {}", response.result().statusCode(), path, t); result.complete(t); } catch (IOException e) { result.fail(new ConnectRestException(response.result(), "Could not deserialize response: " + e)); @@ -85,7 +86,7 @@ public Future> createOrUpdatePutRequest( }); } else { // TODO Handle 409 (Conflict) indicating a rebalance in progress - log.debug("Got {} response to PUT request to {}", response.result().statusCode(), path); + LOGGER.debugCr(reconciliation, "Got {} response to PUT request to {}", response.result().statusCode(), path); response.result().bodyHandler(buffer -> { JsonObject x = buffer.toJsonObject(); result.fail(new ConnectRestException(response.result(), x.getString("message"))); @@ -103,15 +104,16 @@ public Future> createOrUpdatePutRequest( @Override public Future> getConnector( + Reconciliation reconciliation, String host, int port, String connectorName) { - return doGet(host, port, String.format("/connectors/%s", connectorName), + return doGet(reconciliation, host, port, String.format("/connectors/%s", connectorName), new HashSet<>(asList(200, 201)), TREE_TYPE); } - private Future doGet(String host, int port, String path, Set okStatusCodes, TypeReference type) { - log.debug("Making GET request to {}", path); + private Future doGet(Reconciliation reconciliation, String host, int port, String path, Set okStatusCodes, TypeReference type) { + LOGGER.debugCr(reconciliation, "Making GET request to {}", path); return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.GET, port, host, path, request -> { if (request.succeeded()) { @@ -123,7 +125,7 @@ private Future doGet(String host, int port, String path, Set okS response.result().bodyHandler(buffer -> { try { T t = mapper.readValue(buffer.getBytes(), type); - log.debug("Got {} response to GET request to {}: {}", response.result().statusCode(), path, t); + LOGGER.debugCr(reconciliation, "Got {} response to GET request to {}: {}", response.result().statusCode(), path, t); result.complete(t); } catch (IOException e) { result.fail(new ConnectRestException(response.result(), "Could not deserialize response: " + e)); @@ -131,7 +133,7 @@ private Future doGet(String host, int port, String path, Set okS }); } else { // TODO Handle 409 (Conflict) indicating a rebalance in progress - log.debug("Got {} response to GET request to {}", response.result().statusCode(), path); + LOGGER.debugCr(reconciliation, "Got {} response to GET request to {}", response.result().statusCode(), path); response.result().bodyHandler(buffer -> { JsonObject x = buffer.toJsonObject(); result.fail(new ConnectRestException(response.result(), x.getString("message"))); @@ -149,21 +151,22 @@ private Future doGet(String host, int port, String path, Set okS @Override public Future> getConnectorConfig( + Reconciliation reconciliation, String host, int port, String connectorName) { - return doGet(host, port, String.format("/connectors/%s/config", connectorName), + return doGet(reconciliation, host, port, String.format("/connectors/%s/config", connectorName), new HashSet<>(asList(200, 201)), MAP_OF_STRINGS); } @Override - public Future> getConnectorConfig(BackOff backOff, String host, int port, String connectorName) { - return withBackoff(backOff, connectorName, Collections.singleton(409), - () -> getConnectorConfig(host, port, connectorName), "config"); + public Future> getConnectorConfig(Reconciliation reconciliation, BackOff backOff, String host, int port, String connectorName) { + return withBackoff(reconciliation, backOff, connectorName, Collections.singleton(409), + () -> getConnectorConfig(reconciliation, host, port, connectorName), "config"); } @Override - public Future delete(String host, int port, String connectorName) { + public Future delete(Reconciliation reconciliation, String host, int port, String connectorName) { String path = "/connectors/" + connectorName; return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.DELETE, port, host, path, request -> { @@ -174,9 +177,9 @@ public Future delete(String host, int port, String connectorName) { request.result().send(response -> { if (response.succeeded()) { if (response.result().statusCode() == 204) { - log.debug("Connector was deleted. Waiting for status deletion!"); - withBackoff(new BackOff(200L, 2, 10), connectorName, Collections.singleton(200), - () -> status(host, port, connectorName, Collections.singleton(404)), "status") + LOGGER.debugCr(reconciliation, "Connector was deleted. Waiting for status deletion!"); + withBackoff(reconciliation, new BackOff(200L, 2, 10), connectorName, Collections.singleton(200), + () -> status(reconciliation, host, port, connectorName, Collections.singleton(404)), "status") .onComplete(res -> { if (res.succeeded()) { result.complete(); @@ -202,12 +205,13 @@ public Future delete(String host, int port, String connectorName) { } @Override - public Future> statusWithBackOff(BackOff backOff, String host, int port, String connectorName) { - return withBackoff(backOff, connectorName, Collections.singleton(404), - () -> status(host, port, connectorName), "status"); + public Future> statusWithBackOff(Reconciliation reconciliation, BackOff backOff, String host, int port, String connectorName) { + return withBackoff(reconciliation, backOff, connectorName, Collections.singleton(404), + () -> status(reconciliation, host, port, connectorName), "status"); } - private Future withBackoff(BackOff backOff, String connectorName, + private Future withBackoff(Reconciliation reconciliation, + BackOff backOff, String connectorName, Set retriableStatusCodes, Supplier> supplier, String attribute) { @@ -224,10 +228,10 @@ public void handle(Long tid) { if (cause instanceof ConnectRestException && retriableStatusCodes.contains(((ConnectRestException) cause).getStatusCode())) { if (backOff.done()) { - log.debug("Connector {} {} returned HTTP {} and we run out of back off time", connectorName, attribute, ((ConnectRestException) cause).getStatusCode()); + LOGGER.debugCr(reconciliation, "Connector {} {} returned HTTP {} and we run out of back off time", connectorName, attribute, ((ConnectRestException) cause).getStatusCode()); result.fail(cause); } else { - log.debug("Connector {} {} returned HTTP {} - backing off", connectorName, attribute, ((ConnectRestException) cause).getStatusCode()); + LOGGER.debugCr(reconciliation, "Connector {} {} returned HTTP {} - backing off", connectorName, attribute, ((ConnectRestException) cause).getStatusCode()); rescheduleOrComplete(tid); } } else { @@ -239,12 +243,12 @@ public void handle(Long tid) { void rescheduleOrComplete(Long tid) { if (backOff.done()) { - log.warn("Giving up waiting for status of connector {} after {} attempts taking {}ms", + LOGGER.warnCr(reconciliation, "Giving up waiting for status of connector {} after {} attempts taking {}ms", connectorName, backOff.maxAttempts(), backOff.totalDelayMs()); } else { // Schedule ourselves to run again long delay = backOff.delayMs(); - log.debug("Status for connector {} not found; " + + LOGGER.debugCr(reconciliation, "Status for connector {} not found; " + "backing off for {}ms (cumulative {}ms)", connectorName, delay, backOff.cumulativeDelayMs()); if (delay < 1) { @@ -261,14 +265,14 @@ void rescheduleOrComplete(Long tid) { } @Override - public Future> status(String host, int port, String connectorName) { - return status(host, port, connectorName, Collections.singleton(200)); + public Future> status(Reconciliation reconciliation, String host, int port, String connectorName) { + return status(reconciliation, host, port, connectorName, Collections.singleton(200)); } @Override - public Future> status(String host, int port, String connectorName, Set okStatusCodes) { + public Future> status(Reconciliation reconciliation, String host, int port, String connectorName, Set okStatusCodes) { String path = "/connectors/" + connectorName + "/status"; - return doGet(host, port, path, okStatusCodes, TREE_TYPE); + return doGet(reconciliation, host, port, path, okStatusCodes, TREE_TYPE); } @Override @@ -345,7 +349,7 @@ public Future> list(String host, int port) { } @Override - public Future> listConnectorPlugins(String host, int port) { + public Future> listConnectorPlugins(Reconciliation reconciliation, String host, int port) { String path = "/connector-plugins"; return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.GET, port, host, path, request -> { @@ -359,7 +363,7 @@ public Future> listConnectorPlugins(String host, int port) try { result.complete(asList(mapper.readValue(buffer.getBytes(), ConnectorPlugin[].class))); } catch (IOException e) { - log.warn("Failed to parse list of connector plugins", e); + LOGGER.warnCr(reconciliation, "Failed to parse list of connector plugins", e); result.fail(new ConnectRestException(response.result(), "Failed to parse list of connector plugins", e)); } }); @@ -376,11 +380,11 @@ public Future> listConnectorPlugins(String host, int port) })); } - private Future updateConnectorLogger(String host, int port, String logger, String level) { + private Future updateConnectorLogger(Reconciliation reconciliation, String host, int port, String logger, String level) { String path = "/admin/loggers/" + logger; JsonObject levelJO = new JsonObject(); levelJO.put("level", level); - log.debug("Making PUT request to {} with body {}", path, levelJO); + LOGGER.debugCr(reconciliation, "Making PUT request to {} with body {}", path, levelJO); return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> { Buffer buffer = levelJO.toBuffer(); httpClient @@ -394,11 +398,11 @@ private Future updateConnectorLogger(String host, int port, String logger, if (response.succeeded()) { if (response.result().statusCode() == 200) { response.result().bodyHandler(body -> { - log.debug("Logger {} updated to level {}", logger, level); + LOGGER.debugCr(reconciliation, "Logger {} updated to level {}", logger, level); result.complete(); }); } else { - log.debug("Logger {} did not update to level {} (http code {})", logger, level, response.result().statusCode()); + LOGGER.debugCr(reconciliation, "Logger {} did not update to level {} (http code {})", logger, level, response.result().statusCode()); result.fail(new ConnectRestException(response.result(), "Unexpected status code")); } } else { @@ -413,7 +417,7 @@ private Future updateConnectorLogger(String host, int port, String logger, } @Override - public Future>> listConnectLoggers(String host, int port) { + public Future>> listConnectLoggers(Reconciliation reconciliation, String host, int port) { String path = "/admin/loggers/"; return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.GET, port, host, path, request -> { @@ -428,7 +432,7 @@ public Future>> listConnectLoggers(String host, Map> fetchedLoggers = mapper.readValue(buffer.getBytes(), MAP_OF_MAP_OF_STRINGS); result.complete(fetchedLoggers); } catch (IOException e) { - log.warn("Failed to get list of connector loggers", e); + LOGGER.warnCr(reconciliation, "Failed to get list of connector loggers", e); result.fail(new ConnectRestException(response.result(), "Failed to get connector loggers", e)); } }); @@ -445,7 +449,7 @@ public Future>> listConnectLoggers(String host, })); } - private Future updateLoggers(String host, int port, String desiredLogging, Map> fetchedLoggers, OrderedProperties defaultLogging) { + private Future updateLoggers(Reconciliation reconciliation, String host, int port, String desiredLogging, Map> fetchedLoggers, OrderedProperties defaultLogging) { desiredLogging = Util.expandVars(desiredLogging); Map updateLoggers = new LinkedHashMap<>(); defaultLogging.asMap().entrySet().forEach(entry -> { @@ -477,7 +481,7 @@ private Future updateLoggers(String host, int port, String desiredLogging, LinkedHashMap updateSortedLoggers = sortLoggers(updateLoggers); Future result = Future.succeededFuture(); for (Map.Entry logger : updateSortedLoggers.entrySet()) { - result = result.compose(previous -> updateConnectorLogger(host, port, logger.getKey(), getLoggerLevelFromAppenderCouple(logger.getValue()))); + result = result.compose(previous -> updateConnectorLogger(reconciliation, host, port, logger.getKey(), getLoggerLevelFromAppenderCouple(logger.getValue()))); } return result; } @@ -497,9 +501,9 @@ private String getLoggerLevelFromAppenderCouple(String couple) { } @Override - public Future updateConnectLoggers(String host, int port, String desiredLogging, OrderedProperties defaultLogging) { - return listConnectLoggers(host, port) - .compose(fetchedLoggers -> updateLoggers(host, port, desiredLogging, fetchedLoggers, defaultLogging)); + public Future updateConnectLoggers(Reconciliation reconciliation, String host, int port, String desiredLogging, OrderedProperties defaultLogging) { + return listConnectLoggers(reconciliation, host, port) + .compose(fetchedLoggers -> updateLoggers(reconciliation, host, port, desiredLogging, fetchedLoggers, defaultLogging)); } /** @@ -568,7 +572,7 @@ private Future restartConnectorOrTask(String host, int port, String path) } @Override - public Future> getConnectorTopics(String host, int port, String connectorName) { + public Future> getConnectorTopics(Reconciliation reconciliation, String host, int port, String connectorName) { String path = String.format("/connectors/%s/topics", connectorName); return HttpClientUtils.withHttpClient(vertx, new HttpClientOptions().setLogActivity(true), (httpClient, result) -> httpClient.request(HttpMethod.GET, port, host, path, request -> { @@ -587,7 +591,7 @@ public Future> getConnectorTopics(String host, int port, String con Map>> t = mapper.readValue(buffer.getBytes(), MAP_OF_MAP_OF_LIST_OF_STRING); result.complete(t.get(connectorName).get("topics")); } catch (IOException e) { - log.warn("Failed to parse list of connector topics", e); + LOGGER.warnCr(reconciliation, "Failed to parse list of connector topics", e); result.fail(new ConnectRestException(response.result(), "Failed to parse list of connector topics", e)); } }); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperator.java index cbcd96f0e8..2b3ae30453 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperator.java @@ -31,6 +31,7 @@ import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.ReconciliationException; import io.strimzi.operator.common.Util; @@ -45,8 +46,6 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.HashMap; import java.util.Map; @@ -62,7 +61,7 @@ // Deprecation is suppressed because of KafkaConnectS2I @SuppressWarnings("deprecation") public class KafkaConnectAssemblyOperator extends AbstractConnectOperator, KafkaConnectSpec, KafkaConnectStatus> { - private static final Logger log = LogManager.getLogger(KafkaConnectAssemblyOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaConnectAssemblyOperator.class.getName()); private final DeploymentOperator deploymentOperations; private final NetworkPolicyOperator networkPolicyOperator; private final PodOperator podOperator; @@ -113,9 +112,10 @@ protected Future createOrUpdate(Reconciliation reconciliatio KafkaConnectBuild build; KafkaConnectStatus kafkaConnectStatus = new KafkaConnectStatus(); try { - connect = KafkaConnectCluster.fromCrd(kafkaConnect, versions); - build = KafkaConnectBuild.fromCrd(kafkaConnect, versions); + connect = KafkaConnectCluster.fromCrd(reconciliation, kafkaConnect, versions); + build = KafkaConnectBuild.fromCrd(reconciliation, kafkaConnect, versions); } catch (Exception e) { + LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnect, kafkaConnectStatus, Future.failedFuture(e)); return Future.failedFuture(new ReconciliationException(kafkaConnectStatus, e)); } @@ -125,7 +125,7 @@ protected Future createOrUpdate(Reconciliation reconciliatio Map annotations = new HashMap<>(2); - log.debug("{}: Updating Kafka Connect cluster", reconciliation); + LOGGER.debugCr(reconciliation, "Updating Kafka Connect cluster"); Future connectS2ICheck; if (connectS2IOperations != null) { @@ -148,9 +148,9 @@ protected Future createOrUpdate(Reconciliation reconciliatio return Future.succeededFuture(); } }) - .compose(i -> connectServiceAccount(namespace, connect)) - .compose(i -> connectInitClusterRoleBinding(namespace, kafkaConnect.getMetadata().getName(), connect)) - .compose(i -> networkPolicyOperator.reconcile(namespace, connect.getName(), connect.generateNetworkPolicy(isUseResources(kafkaConnect), operatorNamespace, operatorNamespaceLabels))) + .compose(i -> connectServiceAccount(reconciliation, namespace, connect)) + .compose(i -> connectInitClusterRoleBinding(reconciliation, namespace, kafkaConnect.getMetadata().getName(), connect)) + .compose(i -> networkPolicyOperator.reconcile(reconciliation, namespace, connect.getName(), connect.generateNetworkPolicy(isUseResources(kafkaConnect), operatorNamespace, operatorNamespaceLabels))) .compose(i -> deploymentOperations.getAsync(namespace, connect.getName())) .compose(deployment -> { if (deployment != null) { @@ -162,19 +162,19 @@ protected Future createOrUpdate(Reconciliation reconciliatio return Future.succeededFuture(); }) - .compose(i -> connectBuild(namespace, build, buildState)) - .compose(i -> deploymentOperations.scaleDown(namespace, connect.getName(), connect.getReplicas())) - .compose(scale -> serviceOperations.reconcile(namespace, connect.getServiceName(), connect.generateService())) - .compose(i -> Util.metricsAndLogging(configMapOperations, namespace, connect.getLogging(), connect.getMetricsConfigInCm())) + .compose(i -> connectBuild(reconciliation, namespace, build, buildState)) + .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, connect.getName(), connect.getReplicas())) + .compose(scale -> serviceOperations.reconcile(reconciliation, namespace, connect.getServiceName(), connect.generateService())) + .compose(i -> Util.metricsAndLogging(reconciliation, configMapOperations, namespace, connect.getLogging(), connect.getMetricsConfigInCm())) .compose(metricsAndLoggingCm -> { ConfigMap logAndMetricsConfigMap = connect.generateMetricsAndLogConfigMap(metricsAndLoggingCm); annotations.put(Annotations.ANNO_STRIMZI_LOGGING_DYNAMICALLY_UNCHANGEABLE_HASH, Util.stringHash(Util.getLoggingDynamicallyUnmodifiableEntries(logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG)))); desiredLogging.set(logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG)); - return configMapOperations.reconcile(namespace, connect.getAncillaryConfigMapName(), logAndMetricsConfigMap); + return configMapOperations.reconcile(reconciliation, namespace, connect.getAncillaryConfigMapName(), logAndMetricsConfigMap); }) - .compose(i -> kafkaConnectJmxSecret(namespace, kafkaConnect.getMetadata().getName(), connect)) - .compose(i -> podDisruptionBudgetOperator.reconcile(namespace, connect.getName(), connect.generatePodDisruptionBudget())) + .compose(i -> kafkaConnectJmxSecret(reconciliation, namespace, kafkaConnect.getMetadata().getName(), connect)) + .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, connect.getName(), connect.generatePodDisruptionBudget())) .compose(i -> { if (buildState.desiredBuildRevision != null) { annotations.put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, buildState.desiredBuildRevision); @@ -186,11 +186,11 @@ protected Future createOrUpdate(Reconciliation reconciliatio dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage(buildState.desiredImage); } - return deploymentOperations.reconcile(namespace, connect.getName(), dep); + return deploymentOperations.reconcile(reconciliation, namespace, connect.getName(), dep); }) - .compose(i -> deploymentOperations.scaleUp(namespace, connect.getName(), connect.getReplicas())) - .compose(i -> deploymentOperations.waitForObserved(namespace, connect.getName(), 1_000, operationTimeoutMs)) - .compose(i -> connectHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(namespace, connect.getName(), 1_000, operationTimeoutMs)) + .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, connect.getName(), connect.getReplicas())) + .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, connect.getName(), 1_000, operationTimeoutMs)) + .compose(i -> connectHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, connect.getName(), 1_000, operationTimeoutMs)) .compose(i -> reconcileConnectors(reconciliation, kafkaConnect, kafkaConnectStatus, connectHasZeroReplicas, desiredLogging.get(), connect.getDefaultLogConfig())) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnect, kafkaConnectStatus, reconciliationResult); @@ -217,8 +217,8 @@ protected KafkaConnectStatus createStatus() { return new KafkaConnectStatus(); } - private Future> connectServiceAccount(String namespace, KafkaConnectCluster connect) { - return serviceAccountOperations.reconcile(namespace, + private Future> connectServiceAccount(Reconciliation reconciliation, String namespace, KafkaConnectCluster connect) { + return serviceAccountOperations.reconcile(reconciliation, namespace, KafkaConnectResources.serviceAccountName(connect.getCluster()), connect.generateServiceAccount()); } @@ -228,16 +228,17 @@ private Future> connectServiceAccount(String nam * The init-container needs to be able to read the labels from the node it is running on to be able to determine * the `client.rack` option. * + * @param reconciliation The reconciliation * @param namespace Namespace of the service account to which the ClusterRole should be bound * @param name Name of the ClusterRoleBinding * @param connectCluster Name of the Connect cluster * @return Future for tracking the asynchronous result of the ClusterRoleBinding reconciliation */ - Future> connectInitClusterRoleBinding(String namespace, String name, KafkaConnectCluster connectCluster) { + Future> connectInitClusterRoleBinding(Reconciliation reconciliation, String namespace, String name, KafkaConnectCluster connectCluster) { ClusterRoleBinding desired = connectCluster.generateClusterRoleBinding(); - return withIgnoreRbacError( - clusterRoleBindingOperations.reconcile( + return withIgnoreRbacError(reconciliation, + clusterRoleBindingOperations.reconcile(reconciliation, KafkaConnectResources.initContainerClusterRoleBindingName(name, namespace), desired), desired @@ -253,18 +254,19 @@ Future> connectInitClusterRoleBinding(String @Override protected Future delete(Reconciliation reconciliation) { return super.delete(reconciliation) - .compose(i -> withIgnoreRbacError(clusterRoleBindingOperations.reconcile(KafkaConnectResources.initContainerClusterRoleBindingName(reconciliation.name(), reconciliation.namespace()), null), null)) + .compose(i -> withIgnoreRbacError(reconciliation, clusterRoleBindingOperations.reconcile(reconciliation, KafkaConnectResources.initContainerClusterRoleBindingName(reconciliation.name(), reconciliation.namespace()), null), null)) .map(Boolean.FALSE); // Return FALSE since other resources are still deleted by garbage collection } /** * Builds a new container image with connectors on Kubernetes using Kaniko or on OpenShift using BuildConfig * + * @param reconciliation The reconciliation * @param namespace Namespace of the Connect cluster * @param connectBuild KafkaConnectBuild object * @return Future for tracking the asynchronous result of the Kubernetes image build */ - Future connectBuild(String namespace, KafkaConnectBuild connectBuild, BuildState buildState) { + Future connectBuild(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, BuildState buildState) { if (connectBuild.getBuild() != null) { // Build exists => let's build KafkaConnectDockerfile dockerfile = connectBuild.generateDockerfile(); @@ -274,24 +276,24 @@ Future connectBuild(String namespace, KafkaConnectBuild connectBuild, Buil if (newBuildRevision.equals(buildState.currentBuildRevision) && !buildState.forceRebuild) { // The revision is the same and rebuild was not forced => nothing to do - log.info("Build configuration did not changed. Nothing new to build. Container image {} will be used.", buildState.currentImage); + LOGGER.infoCr(reconciliation, "Build configuration did not changed. Nothing new to build. Container image {} will be used.", buildState.currentImage); buildState.desiredImage = buildState.currentImage; buildState.desiredBuildRevision = newBuildRevision; return Future.succeededFuture(); } else if (pfa.supportsS2I()) { // Revisions differ and we have S2I support => we are on OpenShift and should do a build - return openShiftBuild(namespace, connectBuild, buildState, dockerfile, newBuildRevision); + return openShiftBuild(reconciliation, namespace, connectBuild, buildState, dockerfile, newBuildRevision); } else { // Revisions differ and no S2I support => we are on Kubernetes and should do a build - return kubernetesBuild(namespace, connectBuild, buildState, dockerFileConfigMap, newBuildRevision); + return kubernetesBuild(reconciliation, namespace, connectBuild, buildState, dockerFileConfigMap, newBuildRevision); } } else { // Build is not configured => we should delete resources buildState.desiredBuildRevision = null; - return configMapOperations.reconcile(namespace, KafkaConnectResources.dockerFileConfigMapName(connectBuild.getCluster()), null) - .compose(ignore -> podOperator.reconcile(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null)) - .compose(ignore -> serviceAccountOperations.reconcile(namespace, KafkaConnectResources.buildServiceAccountName(connectBuild.getCluster()), null)) - .compose(ignore -> pfa.supportsS2I() ? buildConfigOperator.reconcile(namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), null) : Future.succeededFuture()) + return configMapOperations.reconcile(reconciliation, namespace, KafkaConnectResources.dockerFileConfigMapName(connectBuild.getCluster()), null) + .compose(ignore -> podOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null)) + .compose(ignore -> serviceAccountOperations.reconcile(reconciliation, namespace, KafkaConnectResources.buildServiceAccountName(connectBuild.getCluster()), null)) + .compose(ignore -> pfa.supportsS2I() ? buildConfigOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), null) : Future.succeededFuture()) .mapEmpty(); } } @@ -300,6 +302,7 @@ Future connectBuild(String namespace, KafkaConnectBuild connectBuild, Buil * Executes the Kafka Connect Build on Kubernetes. Run only if needed because of changes to the Dockerfile or when * triggered by annotation. * + * @param reconciliation The reconciliation * @param namespace Namespace where the Kafka Connect is deployed * @param connectBuild The KafkaConnectBuild model with the build definitions * @param buildState State object of the Kafka Connect build used to pass information around @@ -308,7 +311,7 @@ Future connectBuild(String namespace, KafkaConnectBuild connectBuild, Buil * * @return Future which completes when the build is finished (or fails if it fails) */ - private Future kubernetesBuild(String namespace, KafkaConnectBuild connectBuild, BuildState buildState, ConfigMap dockerFileConfigMap, String newBuildRevision) { + private Future kubernetesBuild(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, BuildState buildState, ConfigMap dockerFileConfigMap, String newBuildRevision) { return podOperator.getAsync(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster())) .compose(pod -> { if (pod != null) { @@ -318,19 +321,19 @@ private Future kubernetesBuild(String namespace, KafkaConnectBuild connect && !buildState.forceRebuild) { // Builder pod exists, is not failed, and is building the same Dockerfile and we are not // asked to force re-build by the annotation => we re-use the existing build - log.info("Previous build exists with the same Dockerfile and will be reused."); - return kubernetesBuildWaitForFinish(namespace, connectBuild, buildState, newBuildRevision); + LOGGER.infoCr(reconciliation, "Previous build exists with the same Dockerfile and will be reused."); + return kubernetesBuildWaitForFinish(reconciliation, namespace, connectBuild, buildState, newBuildRevision); } else { // Pod exists, but it either failed or is for different Dockerfile => start new build - log.info("Previous build exists, but uses different Dockerfile or failed. New build will be started."); - return podOperator.reconcile(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null) - .compose(ignore -> kubernetesBuildStart(namespace, connectBuild, dockerFileConfigMap, newBuildRevision)) - .compose(ignore -> kubernetesBuildWaitForFinish(namespace, connectBuild, buildState, newBuildRevision)); + LOGGER.infoCr(reconciliation, "Previous build exists, but uses different Dockerfile or failed. New build will be started."); + return podOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null) + .compose(ignore -> kubernetesBuildStart(reconciliation, namespace, connectBuild, dockerFileConfigMap, newBuildRevision)) + .compose(ignore -> kubernetesBuildWaitForFinish(reconciliation, namespace, connectBuild, buildState, newBuildRevision)); } } else { // Pod does not exist => Start new build - return kubernetesBuildStart(namespace, connectBuild, dockerFileConfigMap, newBuildRevision) - .compose(ignore -> kubernetesBuildWaitForFinish(namespace, connectBuild, buildState, newBuildRevision)); + return kubernetesBuildStart(reconciliation, namespace, connectBuild, dockerFileConfigMap, newBuildRevision) + .compose(ignore -> kubernetesBuildWaitForFinish(reconciliation, namespace, connectBuild, buildState, newBuildRevision)); } }); } @@ -339,6 +342,7 @@ private Future kubernetesBuild(String namespace, KafkaConnectBuild connect * Starts the Kafka Connect Build on Kubernetes by creating the ConfigMap with the Dockerfile and starting the * builder Pod. * + * @param reconciliation Reconciliation object * @param namespace Namespace where the Kafka Connect is deployed * @param connectBuild The KafkaConnectBuild model with the build definitions * @param dockerFileConfigMap ConfigMap with the generated Dockerfile @@ -346,10 +350,10 @@ private Future kubernetesBuild(String namespace, KafkaConnectBuild connect * * @return Future which completes when the build is finished (or fails if it fails) */ - private Future kubernetesBuildStart(String namespace, KafkaConnectBuild connectBuild, ConfigMap dockerFileConfigMap, String newBuildRevision) { - return configMapOperations.reconcile(namespace, KafkaConnectResources.dockerFileConfigMapName(connectBuild.getCluster()), dockerFileConfigMap) - .compose(ignore -> serviceAccountOperations.reconcile(namespace, KafkaConnectResources.buildServiceAccountName(connectBuild.getCluster()), connectBuild.generateServiceAccount())) - .compose(ignore -> podOperator.reconcile(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), connectBuild.generateBuilderPod(pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, newBuildRevision))) + private Future kubernetesBuildStart(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, ConfigMap dockerFileConfigMap, String newBuildRevision) { + return configMapOperations.reconcile(reconciliation, namespace, KafkaConnectResources.dockerFileConfigMapName(connectBuild.getCluster()), dockerFileConfigMap) + .compose(ignore -> serviceAccountOperations.reconcile(reconciliation, namespace, KafkaConnectResources.buildServiceAccountName(connectBuild.getCluster()), connectBuild.generateServiceAccount())) + .compose(ignore -> podOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), connectBuild.generateBuilderPod(pfa.isOpenshift(), imagePullPolicy, imagePullSecrets, newBuildRevision))) .mapEmpty(); } @@ -369,6 +373,7 @@ private boolean kubernetesBuildPodFinished(String namespace, String podName) { /** * Waits for the Kafka Connect build to finish and collects the results from it * + * @param reconciliation The reconciliation * @param namespace Namespace where the Kafka Connect is deployed * @param connectBuild The KafkaConnectBuild model with the build definitions * @param buildState State object of the Kafka Connect build used to pass information around @@ -376,24 +381,24 @@ private boolean kubernetesBuildPodFinished(String namespace, String podName) { * * @return Future which completes when the build is finished (or fails if it fails) */ - private Future kubernetesBuildWaitForFinish(String namespace, KafkaConnectBuild connectBuild, BuildState buildState, String newBuildRevision) { - return podOperator.waitFor(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), "complete", 1_000, connectBuildTimeoutMs, (ignore1, ignore2) -> kubernetesBuildPodFinished(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()))) + private Future kubernetesBuildWaitForFinish(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, BuildState buildState, String newBuildRevision) { + return podOperator.waitFor(reconciliation, namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), "complete", 1_000, connectBuildTimeoutMs, (ignore1, ignore2) -> kubernetesBuildPodFinished(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()))) .compose(ignore -> podOperator.getAsync(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()))) .compose(pod -> { if (KafkaConnectBuildUtils.buildPodSucceeded(pod)) { ContainerStateTerminated state = pod.getStatus().getContainerStatuses().get(0).getState().getTerminated(); buildState.desiredImage = state.getMessage().trim(); buildState.desiredBuildRevision = newBuildRevision; - log.info("Build completed successfully. New image is {}.", buildState.desiredImage); + LOGGER.infoCr(reconciliation, "Build completed successfully. New image is {}.", buildState.desiredImage); return Future.succeededFuture(); } else { ContainerStateTerminated state = pod.getStatus().getContainerStatuses().get(0).getState().getTerminated(); - log.warn("Build failed with code {}: {}", state.getExitCode(), state.getMessage()); + LOGGER.warnCr(reconciliation, "Build failed with code {}: {}", state.getExitCode(), state.getMessage()); return Future.failedFuture("The Kafka Connect build failed"); } }) - .compose(i -> podOperator.reconcile(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null)) - .compose(ignore -> pfa.supportsS2I() ? buildConfigOperator.reconcile(namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), null) : Future.succeededFuture()) + .compose(i -> podOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null)) + .compose(ignore -> pfa.supportsS2I() ? buildConfigOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), null) : Future.succeededFuture()) .mapEmpty(); } @@ -401,6 +406,7 @@ private Future kubernetesBuildWaitForFinish(String namespace, KafkaConnect * Executes the Kafka Connect Build on OpenShift. Run only if needed because of changes to the Dockerfile or when * triggered by annotation. * + * @param reconciliation The reconciliation * @param namespace Namespace where the Kafka Connect is deployed * @param connectBuild The KafkaConnectBuild model with the build definitions * @param buildState State object of the Kafka Connect build used to pass information around @@ -409,7 +415,7 @@ private Future kubernetesBuildWaitForFinish(String namespace, KafkaConnect * * @return Future which completes when the build is finished (or fails if it fails) */ - private Future openShiftBuild(String namespace, KafkaConnectBuild connectBuild, BuildState buildState, KafkaConnectDockerfile dockerfile, String newBuildRevision) { + private Future openShiftBuild(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, BuildState buildState, KafkaConnectDockerfile dockerfile, String newBuildRevision) { return buildConfigOperator.getAsync(namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster())) .compose(buildConfig -> { if (buildConfig != null @@ -429,17 +435,17 @@ private Future openShiftBuild(String namespace, KafkaConnectBuild connectB && !buildState.forceRebuild) { // Build exists, is not failed, and is building the same Dockerfile and we are not // asked to force re-build by the annotation => we re-use the existing build - log.info("Previous build exists with the same Dockerfile and will be reused."); + LOGGER.infoCr(reconciliation, "Previous build exists with the same Dockerfile and will be reused."); buildState.currentBuildName = build.getMetadata().getName(); - return openShiftBuildWaitForFinish(namespace, connectBuild, buildState, newBuildRevision); + return openShiftBuildWaitForFinish(reconciliation, namespace, connectBuild, buildState, newBuildRevision); } else { // Build exists, but it either failed or is for different Dockerfile => start new build - return openShiftBuildStart(namespace, connectBuild, buildState, dockerfile, newBuildRevision) - .compose(ignore -> openShiftBuildWaitForFinish(namespace, connectBuild, buildState, newBuildRevision)); + return openShiftBuildStart(reconciliation, namespace, connectBuild, buildState, dockerfile, newBuildRevision) + .compose(ignore -> openShiftBuildWaitForFinish(reconciliation, namespace, connectBuild, buildState, newBuildRevision)); } } else { - return openShiftBuildStart(namespace, connectBuild, buildState, dockerfile, newBuildRevision) - .compose(ignore -> openShiftBuildWaitForFinish(namespace, connectBuild, buildState, newBuildRevision)); + return openShiftBuildStart(reconciliation, namespace, connectBuild, buildState, dockerfile, newBuildRevision) + .compose(ignore -> openShiftBuildWaitForFinish(reconciliation, namespace, connectBuild, buildState, newBuildRevision)); } }); } @@ -455,9 +461,9 @@ private Future openShiftBuild(String namespace, KafkaConnectBuild connectB * * @return Future which completes when the build is finished (or fails if it fails) */ - private Future openShiftBuildStart(String namespace, KafkaConnectBuild connectBuild, BuildState buildState, KafkaConnectDockerfile dockerfile, String newBuildRevision) { - return configMapOperations.reconcile(namespace, KafkaConnectResources.dockerFileConfigMapName(connectBuild.getCluster()), null) - .compose(ignore -> buildConfigOperator.reconcile(namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), connectBuild.generateBuildConfig(dockerfile))) + private Future openShiftBuildStart(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, BuildState buildState, KafkaConnectDockerfile dockerfile, String newBuildRevision) { + return configMapOperations.reconcile(reconciliation, namespace, KafkaConnectResources.dockerFileConfigMapName(connectBuild.getCluster()), null) + .compose(ignore -> buildConfigOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), connectBuild.generateBuildConfig(dockerfile))) .compose(ignore -> buildConfigOperator.startBuild(namespace, KafkaConnectResources.buildConfigName(connectBuild.getCluster()), connectBuild.generateBuildRequest(newBuildRevision))) .compose(build -> { buildState.currentBuildName = build.getMetadata().getName(); @@ -481,6 +487,7 @@ private boolean openShiftBuildFinished(String namespace, String buildName) { /** * Waits for the Kafka Connect build to finish and collects the results from it * + * @param reconciliation The reconciliation * @param namespace Namespace where the Kafka Connect is deployed * @param connectBuild The KafkaConnectBuild model with the build definitions * @param buildState State object of the Kafka Connect build used to pass information around @@ -488,8 +495,8 @@ private boolean openShiftBuildFinished(String namespace, String buildName) { * * @return Future which completes when the build is finished (or fails if it fails) */ - private Future openShiftBuildWaitForFinish(String namespace, KafkaConnectBuild connectBuild, BuildState buildState, String newBuildRevision) { - return buildOperator.waitFor(namespace, buildState.currentBuildName, "complete", 1_000, connectBuildTimeoutMs, (ignore1, ignore2) -> openShiftBuildFinished(namespace, buildState.currentBuildName)) + private Future openShiftBuildWaitForFinish(Reconciliation reconciliation, String namespace, KafkaConnectBuild connectBuild, BuildState buildState, String newBuildRevision) { + return buildOperator.waitFor(reconciliation, namespace, buildState.currentBuildName, "complete", 1_000, connectBuildTimeoutMs, (ignore1, ignore2) -> openShiftBuildFinished(namespace, buildState.currentBuildName)) .compose(ignore -> buildOperator.getAsync(namespace, buildState.currentBuildName)) .compose(build -> { if (KafkaConnectBuildUtils.buildSucceeded(build)) { @@ -505,24 +512,24 @@ private Future openShiftBuildWaitForFinish(String namespace, KafkaConnectB buildState.desiredImage = image.replace(tag, digest); buildState.desiredBuildRevision = newBuildRevision; - log.info("Build {} completed successfully. New image is {}.", buildState.currentBuildName, buildState.desiredImage); + LOGGER.infoCr(reconciliation, "Build {} completed successfully. New image is {}.", buildState.currentBuildName, buildState.desiredImage); return Future.succeededFuture(); } else { - log.warn("Build {} completed successfully. But the new container image was not found.", buildState.currentBuildName); + LOGGER.warnCr(reconciliation, "Build {} completed successfully. But the new container image was not found.", buildState.currentBuildName); return Future.failedFuture("The Kafka Connect build completed, but the new container image was not found."); } } else { // Build failed. If the Status exists, we try to provide more detailed information if (build.getStatus() != null) { - log.info("Build {} failed with code {}: {}", buildState.currentBuildName, build.getStatus().getPhase(), build.getStatus().getLogSnippet()); + LOGGER.infoCr(reconciliation, "Build {} failed with code {}: {}", buildState.currentBuildName, build.getStatus().getPhase(), build.getStatus().getLogSnippet()); } else { - log.warn("Build {} failed for unknown reason", buildState.currentBuildName); + LOGGER.warnCr(reconciliation, "Build {} failed for unknown reason", buildState.currentBuildName); } return Future.failedFuture("The Kafka Connect build failed."); } }) - .compose(ignore -> podOperator.reconcile(namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null)) + .compose(ignore -> podOperator.reconcile(reconciliation, namespace, KafkaConnectResources.buildPodName(connectBuild.getCluster()), null)) .mapEmpty(); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperator.java index 330b454f64..cf544be795 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperator.java @@ -26,6 +26,7 @@ import io.strimzi.operator.cluster.model.StatusDiff; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.ReconciliationException; import io.strimzi.operator.common.Util; @@ -39,8 +40,6 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.HashMap; import java.util.Map; @@ -58,8 +57,8 @@ // Deprecation is suppressed because of KafkaConnectS2I @SuppressWarnings("deprecation") public class KafkaConnectS2IAssemblyOperator extends AbstractConnectOperator, KafkaConnectS2ISpec, KafkaConnectS2IStatus> { - private static final Logger log = LogManager.getLogger(KafkaConnectS2IAssemblyOperator.class.getName()); - + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaConnectS2IAssemblyOperator.class.getName()); + private final DeploymentConfigOperator deploymentConfigOperations; private final ImageStreamOperator imagesStreamOperations; private final BuildConfigOperator buildConfigOperations; @@ -101,8 +100,9 @@ public Future createOrUpdate(Reconciliation reconciliatio KafkaConnectS2IStatus kafkaConnectS2Istatus = new KafkaConnectS2IStatus(); try { - connect = KafkaConnectS2ICluster.fromCrd(kafkaConnectS2I, versions); + connect = KafkaConnectS2ICluster.fromCrd(reconciliation, kafkaConnectS2I, versions); } catch (Exception e) { + LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnectS2I, kafkaConnectS2Istatus, Future.failedFuture(e)); return Future.failedFuture(new ReconciliationException(kafkaConnectS2Istatus, e)); } @@ -117,7 +117,7 @@ public Future createOrUpdate(Reconciliation reconciliatio boolean connectHasZeroReplicas = connect.getReplicas() == 0; - log.debug("{}: Updating Kafka Connect S2I cluster", reconciliation); + LOGGER.debugCr(reconciliation, "Updating Kafka Connect S2I cluster"); connectOperations.getAsync(kafkaConnectS2I.getMetadata().getNamespace(), kafkaConnectS2I.getMetadata().getName()) .compose(otherConnect -> { @@ -138,27 +138,27 @@ public Future createOrUpdate(Reconciliation reconciliatio "Kafka Connect S2I deployment cannot be enabled."); } }) - .compose(i -> connectServiceAccount(namespace, connect)) - .compose(i -> networkPolicyOperator.reconcile(namespace, connect.getName(), connect.generateNetworkPolicy(isUseResources(kafkaConnectS2I), operatorNamespace, operatorNamespaceLabels))) - .compose(i -> deploymentConfigOperations.scaleDown(namespace, connect.getName(), connect.getReplicas())) - .compose(scale -> serviceOperations.reconcile(namespace, connect.getServiceName(), connect.generateService())) - .compose(i -> Util.metricsAndLogging(configMapOperations, namespace, connect.getLogging(), connect.getMetricsConfigInCm())) + .compose(i -> connectServiceAccount(reconciliation, namespace, connect)) + .compose(i -> networkPolicyOperator.reconcile(reconciliation, namespace, connect.getName(), connect.generateNetworkPolicy(isUseResources(kafkaConnectS2I), operatorNamespace, operatorNamespaceLabels))) + .compose(i -> deploymentConfigOperations.scaleDown(reconciliation, namespace, connect.getName(), connect.getReplicas())) + .compose(scale -> serviceOperations.reconcile(reconciliation, namespace, connect.getServiceName(), connect.generateService())) + .compose(i -> Util.metricsAndLogging(reconciliation, configMapOperations, namespace, connect.getLogging(), connect.getMetricsConfigInCm())) .compose(metricsAndLoggingCm -> { ConfigMap logAndMetricsConfigMap = connect.generateMetricsAndLogConfigMap(metricsAndLoggingCm); annotations.put(Annotations.ANNO_STRIMZI_LOGGING_DYNAMICALLY_UNCHANGEABLE_HASH, Util.stringHash(Util.getLoggingDynamicallyUnmodifiableEntries(logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG)))); desiredLogging.set(logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG)); - return configMapOperations.reconcile(namespace, connect.getAncillaryConfigMapName(), logAndMetricsConfigMap); + return configMapOperations.reconcile(reconciliation, namespace, connect.getAncillaryConfigMapName(), logAndMetricsConfigMap); }) - .compose(i -> kafkaConnectJmxSecret(namespace, connect.getName(), connect)) - .compose(i -> deploymentConfigOperations.reconcile(namespace, connect.getName(), connect.generateDeploymentConfig(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) - .compose(i -> imagesStreamOperations.reconcile(namespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()), connect.generateSourceImageStream())) - .compose(i -> imagesStreamOperations.reconcile(namespace, KafkaConnectS2IResources.targetImageStreamName(connect.getCluster()), connect.generateTargetImageStream())) - .compose(i -> podDisruptionBudgetOperator.reconcile(namespace, connect.getName(), connect.generatePodDisruptionBudget())) - .compose(i -> buildConfigOperations.reconcile(namespace, KafkaConnectS2IResources.buildConfigName(connect.getCluster()), connect.generateBuildConfig())) - .compose(i -> deploymentConfigOperations.scaleUp(namespace, connect.getName(), connect.getReplicas())) - .compose(i -> deploymentConfigOperations.waitForObserved(namespace, connect.getName(), 1_000, operationTimeoutMs)) - .compose(i -> connectHasZeroReplicas ? Future.succeededFuture() : deploymentConfigOperations.readiness(namespace, connect.getName(), 1_000, operationTimeoutMs)) + .compose(i -> kafkaConnectJmxSecret(reconciliation, namespace, connect.getName(), connect)) + .compose(i -> deploymentConfigOperations.reconcile(reconciliation, namespace, connect.getName(), connect.generateDeploymentConfig(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) + .compose(i -> imagesStreamOperations.reconcile(reconciliation, namespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()), connect.generateSourceImageStream())) + .compose(i -> imagesStreamOperations.reconcile(reconciliation, namespace, KafkaConnectS2IResources.targetImageStreamName(connect.getCluster()), connect.generateTargetImageStream())) + .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, connect.getName(), connect.generatePodDisruptionBudget())) + .compose(i -> buildConfigOperations.reconcile(reconciliation, namespace, KafkaConnectS2IResources.buildConfigName(connect.getCluster()), connect.generateBuildConfig())) + .compose(i -> deploymentConfigOperations.scaleUp(reconciliation, namespace, connect.getName(), connect.getReplicas())) + .compose(i -> deploymentConfigOperations.waitForObserved(reconciliation, namespace, connect.getName(), 1_000, operationTimeoutMs)) + .compose(i -> connectHasZeroReplicas ? Future.succeededFuture() : deploymentConfigOperations.readiness(reconciliation, namespace, connect.getName(), 1_000, operationTimeoutMs)) .compose(i -> reconcileConnectors(reconciliation, kafkaConnectS2I, kafkaConnectS2Istatus, connectHasZeroReplicas, desiredLogging.get(), connect.getDefaultLogConfig())) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnectS2I, kafkaConnectS2Istatus, reconciliationResult); @@ -189,13 +189,13 @@ protected KafkaConnectS2IStatus createStatus() { * Updates the Status field of the Kafka ConnectS2I CR. It diffs the desired status against the current status and calls * the update only when there is any difference in non-timestamp fields. * - * @param kafkaConnectS2Iassembly The CR of Kafka ConnectS2I * @param reconciliation Reconciliation information + * @param kafkaConnectS2Iassembly The CR of Kafka ConnectS2I * @param desiredStatus The KafkaConnectS2Istatus which should be set * * @return */ - Future updateStatus(KafkaConnectS2I kafkaConnectS2Iassembly, Reconciliation reconciliation, KafkaConnectS2IStatus desiredStatus) { + Future updateStatus(Reconciliation reconciliation, KafkaConnectS2I kafkaConnectS2Iassembly, KafkaConnectS2IStatus desiredStatus) { Promise updateStatusPromise = Promise.promise(); resourceOperator.getAsync(kafkaConnectS2Iassembly.getMetadata().getNamespace(), kafkaConnectS2Iassembly.getMetadata().getName()).onComplete(getRes -> { @@ -204,7 +204,7 @@ Future updateStatus(KafkaConnectS2I kafkaConnectS2Iassembly, Reconciliatio if (connect != null) { if (StatusUtils.isResourceV1alpha1(connect)) { - log.warn("{}: The resource needs to be upgraded from version {} to 'v1beta1' to use the status field", reconciliation, connect.getApiVersion()); + LOGGER.warnCr(reconciliation, "The resource needs to be upgraded from version {} to 'v1beta1' to use the status field", connect.getApiVersion()); updateStatusPromise.complete(); } else { KafkaConnectS2IStatus currentStatus = connect.getStatus(); @@ -214,26 +214,26 @@ Future updateStatus(KafkaConnectS2I kafkaConnectS2Iassembly, Reconciliatio if (!ksDiff.isEmpty()) { KafkaConnectS2I resourceWithNewStatus = new KafkaConnectS2IBuilder(connect).withStatus(desiredStatus).build(); - ((CrdOperator) resourceOperator).updateStatusAsync(resourceWithNewStatus).onComplete(updateRes -> { + ((CrdOperator) resourceOperator).updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> { if (updateRes.succeeded()) { - log.debug("{}: Completed status update", reconciliation); + LOGGER.debugCr(reconciliation, "Completed status update"); updateStatusPromise.complete(); } else { - log.error("{}: Failed to update status", reconciliation, updateRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause()); updateStatusPromise.fail(updateRes.cause()); } }); } else { - log.debug("{}: Status did not change", reconciliation); + LOGGER.debugCr(reconciliation, "Status did not change"); updateStatusPromise.complete(); } } } else { - log.error("{}: Current Kafka ConnectS2I resource not found", reconciliation); + LOGGER.errorCr(reconciliation, "Current Kafka ConnectS2I resource not found"); updateStatusPromise.fail("Current Kafka ConnectS2I resource not found"); } } else { - log.error("{}: Failed to get the current Kafka ConnectS2I resource and its status", reconciliation, getRes.cause()); + LOGGER.errorCr(reconciliation, "Failed to get the current Kafka ConnectS2I resource and its status", getRes.cause()); updateStatusPromise.fail(getRes.cause()); } }); @@ -241,8 +241,8 @@ Future updateStatus(KafkaConnectS2I kafkaConnectS2Iassembly, Reconciliatio return updateStatusPromise.future(); } - Future> connectServiceAccount(String namespace, KafkaConnectCluster connect) { - return serviceAccountOperations.reconcile(namespace, + Future> connectServiceAccount(Reconciliation reconciliation, String namespace, KafkaConnectCluster connect) { + return serviceAccountOperations.reconcile(reconciliation, namespace, KafkaConnectS2IResources.serviceAccountName(connect.getCluster()), connect.generateServiceAccount()); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperator.java index ee958a64c8..8a159ac25b 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperator.java @@ -20,6 +20,7 @@ import io.strimzi.api.kafka.model.status.Condition; import io.strimzi.operator.cluster.model.AbstractModel; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.ReconciliationException; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.operator.resource.NetworkPolicyOperator; @@ -27,8 +28,6 @@ import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.config.SslConfigs; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.ServiceAccount; @@ -80,7 +79,7 @@ * */ public class KafkaMirrorMaker2AssemblyOperator extends AbstractConnectOperator, KafkaMirrorMaker2Spec, KafkaMirrorMaker2Status> { - private static final Logger log = LogManager.getLogger(KafkaMirrorMaker2AssemblyOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaMirrorMaker2AssemblyOperator.class.getName()); private final DeploymentOperator deploymentOperations; private final NetworkPolicyOperator networkPolicyOperator; private final KafkaVersion.Lookup versions; @@ -132,8 +131,9 @@ protected Future createOrUpdate(Reconciliation reconcil KafkaMirrorMaker2Cluster mirrorMaker2Cluster; KafkaMirrorMaker2Status kafkaMirrorMaker2Status = new KafkaMirrorMaker2Status(); try { - mirrorMaker2Cluster = KafkaMirrorMaker2Cluster.fromCrd(kafkaMirrorMaker2, versions); + mirrorMaker2Cluster = KafkaMirrorMaker2Cluster.fromCrd(reconciliation, kafkaMirrorMaker2, versions); } catch (Exception e) { + LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(kafkaMirrorMaker2, kafkaMirrorMaker2Status, Future.failedFuture(e)); return Future.failedFuture(new ReconciliationException(kafkaMirrorMaker2Status, e)); } @@ -146,25 +146,25 @@ protected Future createOrUpdate(Reconciliation reconcil boolean mirrorMaker2HasZeroReplicas = mirrorMaker2Cluster.getReplicas() == 0; - log.debug("{}: Updating Kafka MirrorMaker 2.0 cluster", reconciliation); - mirrorMaker2ServiceAccount(namespace, mirrorMaker2Cluster) - .compose(i -> networkPolicyOperator.reconcile(namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generateNetworkPolicy(true, operatorNamespace, operatorNamespaceLabels))) - .compose(i -> deploymentOperations.scaleDown(namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.getReplicas())) - .compose(scale -> serviceOperations.reconcile(namespace, mirrorMaker2Cluster.getServiceName(), mirrorMaker2Cluster.generateService())) - .compose(i -> Util.metricsAndLogging(configMapOperations, namespace, mirrorMaker2Cluster.getLogging(), mirrorMaker2Cluster.getMetricsConfigInCm())) + LOGGER.debugCr(reconciliation, "Updating Kafka MirrorMaker 2.0 cluster"); + mirrorMaker2ServiceAccount(reconciliation, namespace, mirrorMaker2Cluster) + .compose(i -> networkPolicyOperator.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generateNetworkPolicy(true, operatorNamespace, operatorNamespaceLabels))) + .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.getReplicas())) + .compose(scale -> serviceOperations.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getServiceName(), mirrorMaker2Cluster.generateService())) + .compose(i -> Util.metricsAndLogging(reconciliation, configMapOperations, namespace, mirrorMaker2Cluster.getLogging(), mirrorMaker2Cluster.getMetricsConfigInCm())) .compose(metricsAndLoggingCm -> { ConfigMap logAndMetricsConfigMap = mirrorMaker2Cluster.generateMetricsAndLogConfigMap(metricsAndLoggingCm); annotations.put(Annotations.ANNO_STRIMZI_LOGGING_DYNAMICALLY_UNCHANGEABLE_HASH, Util.stringHash(Util.getLoggingDynamicallyUnmodifiableEntries(logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG)))); desiredLogging.set(logAndMetricsConfigMap.getData().get(AbstractModel.ANCILLARY_CM_KEY_LOG_CONFIG)); - return configMapOperations.reconcile(namespace, mirrorMaker2Cluster.getAncillaryConfigMapName(), logAndMetricsConfigMap); + return configMapOperations.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getAncillaryConfigMapName(), logAndMetricsConfigMap); }) - .compose(i -> kafkaConnectJmxSecret(namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster)) - .compose(i -> podDisruptionBudgetOperator.reconcile(namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generatePodDisruptionBudget())) - .compose(i -> deploymentOperations.reconcile(namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) - .compose(i -> deploymentOperations.scaleUp(namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.getReplicas())) - .compose(i -> deploymentOperations.waitForObserved(namespace, mirrorMaker2Cluster.getName(), 1_000, operationTimeoutMs)) - .compose(i -> mirrorMaker2HasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(namespace, mirrorMaker2Cluster.getName(), 1_000, operationTimeoutMs)) + .compose(i -> kafkaConnectJmxSecret(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster)) + .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generatePodDisruptionBudget())) + .compose(i -> deploymentOperations.reconcile(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) + .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, mirrorMaker2Cluster.getName(), mirrorMaker2Cluster.getReplicas())) + .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, mirrorMaker2Cluster.getName(), 1_000, operationTimeoutMs)) + .compose(i -> mirrorMaker2HasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, mirrorMaker2Cluster.getName(), 1_000, operationTimeoutMs)) .compose(i -> mirrorMaker2HasZeroReplicas ? Future.succeededFuture() : reconcileConnectors(reconciliation, kafkaMirrorMaker2, mirrorMaker2Cluster, kafkaMirrorMaker2Status, desiredLogging.get())) .map((Void) null) .onComplete(reconciliationResult -> { @@ -194,8 +194,8 @@ protected KafkaMirrorMaker2Status createStatus() { return new KafkaMirrorMaker2Status(); } - private Future> mirrorMaker2ServiceAccount(String namespace, KafkaMirrorMaker2Cluster mirrorMaker2Cluster) { - return serviceAccountOperations.reconcile(namespace, + private Future> mirrorMaker2ServiceAccount(Reconciliation reconciliation, String namespace, KafkaMirrorMaker2Cluster mirrorMaker2Cluster) { + return serviceAccountOperations.reconcile(reconciliation, namespace, KafkaMirrorMaker2Resources.serviceAccountName(mirrorMaker2Cluster.getCluster()), mirrorMaker2Cluster.generateServiceAccount()); } @@ -223,9 +223,9 @@ protected Future reconcileConnectors(Reconciliation reconciliation, KafkaM .map(mirror -> mirror.getSourceCluster() + "->" + mirror.getTargetCluster() + connectorEntry.getKey()) .collect(Collectors.toSet())); } - log.debug("{}: delete MirrorMaker 2.0 connectors: {}", reconciliation, deleteMirrorMaker2ConnectorNames); + LOGGER.debugCr(reconciliation, "delete MirrorMaker 2.0 connectors: {}", deleteMirrorMaker2ConnectorNames); Stream> deletionFutures = deleteMirrorMaker2ConnectorNames.stream() - .map(connectorName -> apiClient.delete(host, KafkaConnectCluster.REST_API_PORT, connectorName)); + .map(connectorName -> apiClient.delete(reconciliation, host, KafkaConnectCluster.REST_API_PORT, connectorName)); Stream> createUpdateFutures = mirrors.stream() .map(mirror -> reconcileMirrorMaker2Connectors(reconciliation, host, apiClient, kafkaMirrorMaker2, mirror, mirrorMaker2Cluster, mirrorMaker2Status, desiredLogging)); return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null); @@ -269,16 +269,16 @@ private Future reconcileMirrorMaker2Connectors(Reconciliation reconciliati .withTasksMax(mm2ConnectorSpec.getTasksMax()) .build(); - prepareMirrorMaker2ConnectorConfig(mirror, clusterMap.get(sourceClusterAlias), clusterMap.get(targetClusterAlias), connectorSpec, mirrorMaker2Cluster); - log.debug("{}: creating/updating connector {} config: {}", reconciliation, connectorName, asJson(connectorSpec).toString()); + prepareMirrorMaker2ConnectorConfig(reconciliation, mirror, clusterMap.get(sourceClusterAlias), clusterMap.get(targetClusterAlias), connectorSpec, mirrorMaker2Cluster); + LOGGER.debugCr(reconciliation, "creating/updating connector {} config: {}", connectorName, asJson(reconciliation, connectorSpec).toString()); return reconcileMirrorMaker2Connector(reconciliation, mirrorMaker2, apiClient, host, connectorName, connectorSpec, mirrorMaker2Status); }) .collect(Collectors.toList())) - .map((Void) null).compose(i -> apiClient.updateConnectLoggers(host, KafkaConnectCluster.REST_API_PORT, desiredLogging, mirrorMaker2Cluster.getDefaultLogConfig())); + .map((Void) null).compose(i -> apiClient.updateConnectLoggers(reconciliation, host, KafkaConnectCluster.REST_API_PORT, desiredLogging, mirrorMaker2Cluster.getDefaultLogConfig())); } @SuppressWarnings("deprecation") - private static void prepareMirrorMaker2ConnectorConfig(KafkaMirrorMaker2MirrorSpec mirror, KafkaMirrorMaker2ClusterSpec sourceCluster, KafkaMirrorMaker2ClusterSpec targetCluster, KafkaConnectorSpec connectorSpec, KafkaMirrorMaker2Cluster mirrorMaker2Cluster) { + private static void prepareMirrorMaker2ConnectorConfig(Reconciliation reconciliation, KafkaMirrorMaker2MirrorSpec mirror, KafkaMirrorMaker2ClusterSpec sourceCluster, KafkaMirrorMaker2ClusterSpec targetCluster, KafkaConnectorSpec connectorSpec, KafkaMirrorMaker2Cluster mirrorMaker2Cluster) { Map config = connectorSpec.getConfig(); addClusterToMirrorMaker2ConnectorConfig(config, targetCluster, TARGET_CLUSTER_PREFIX); addClusterToMirrorMaker2ConnectorConfig(config, sourceCluster, SOURCE_CLUSTER_PREFIX); @@ -290,7 +290,7 @@ private static void prepareMirrorMaker2ConnectorConfig(KafkaMirrorMaker2MirrorSp String topicsExcludePattern = mirror.getTopicsExcludePattern(); String topicsBlacklistPattern = mirror.getTopicsBlacklistPattern(); if (topicsExcludePattern != null && topicsBlacklistPattern != null) { - log.warn("Both topicsExcludePattern and topicsBlacklistPattern mirror properties are present, ignoring topicsBlacklistPattern as it is deprecated"); + LOGGER.warnCr(reconciliation, "Both topicsExcludePattern and topicsBlacklistPattern mirror properties are present, ignoring topicsBlacklistPattern as it is deprecated"); } String topicsExclude = topicsExcludePattern != null ? topicsExcludePattern : topicsBlacklistPattern; if (topicsExclude != null) { @@ -304,7 +304,7 @@ private static void prepareMirrorMaker2ConnectorConfig(KafkaMirrorMaker2MirrorSp String groupsExcludePattern = mirror.getGroupsExcludePattern(); String groupsBlacklistPattern = mirror.getGroupsBlacklistPattern(); if (groupsExcludePattern != null && groupsBlacklistPattern != null) { - log.warn("Both groupsExcludePattern and groupsBlacklistPattern mirror properties are present, ignoring groupsBlacklistPattern as it is deprecated"); + LOGGER.warnCr(reconciliation, "Both groupsExcludePattern and groupsBlacklistPattern mirror properties are present, ignoring groupsBlacklistPattern as it is deprecated"); } String groupsExclude = groupsExcludePattern != null ? groupsExcludePattern : groupsBlacklistPattern; if (groupsExclude != null) { @@ -429,7 +429,7 @@ private Future reconcileMirrorMaker2Connector(Reconciliation reconciliatio private Future maybeUpdateMirrorMaker2Status(Reconciliation reconciliation, KafkaMirrorMaker2 mirrorMaker2, Throwable error) { KafkaMirrorMaker2Status status = new KafkaMirrorMaker2Status(); if (error != null) { - log.warn("{}: Error reconciling MirrorMaker 2.0 {}", reconciliation, mirrorMaker2.getMetadata().getName(), error); + LOGGER.warnCr(reconciliation, "Error reconciling MirrorMaker 2.0 {}", mirrorMaker2.getMetadata().getName(), error); } StatusUtils.setStatusConditionAndObservedGeneration(mirrorMaker2, status, error != null ? Future.failedFuture(error) : Future.succeededFuture()); return maybeUpdateStatusCommon(resourceOperator, mirrorMaker2, reconciliation, status, @@ -507,13 +507,13 @@ protected Future removeRestartTaskAnnotation(Reconciliation reconciliation * Patches the KafkaMirrorMaker2 CR to remove the supplied annotation */ protected Future removeAnnotation(Reconciliation reconciliation, KafkaMirrorMaker2 resource, String annotationKey) { - log.debug("{}: Removing annotation {}", reconciliation, annotationKey); + LOGGER.debugCr(reconciliation, "Removing annotation {}", annotationKey); KafkaMirrorMaker2 patchedKafkaMirrorMaker2 = new KafkaMirrorMaker2Builder(resource) .editMetadata() .removeFromAnnotations(annotationKey) .endMetadata() .build(); - return resourceOperator.patchAsync(patchedKafkaMirrorMaker2) + return resourceOperator.patchAsync(reconciliation, patchedKafkaMirrorMaker2) .compose(ignored -> Future.succeededFuture()); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperator.java index 53b9ee6c20..5d1e78eada 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperator.java @@ -20,6 +20,7 @@ import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.PasswordGenerator; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.ReconciliationException; @@ -30,8 +31,6 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.HashMap; import java.util.Map; @@ -44,7 +43,7 @@ */ public class KafkaMirrorMakerAssemblyOperator extends AbstractAssemblyOperator, KafkaMirrorMakerSpec, KafkaMirrorMakerStatus> { - private static final Logger log = LogManager.getLogger(KafkaMirrorMakerAssemblyOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaMirrorMakerAssemblyOperator.class.getName()); private final DeploymentOperator deploymentOperations; private final KafkaVersion.Lookup versions; @@ -73,8 +72,9 @@ protected Future createOrUpdate(Reconciliation reconcili KafkaMirrorMakerStatus kafkaMirrorMakerStatus = new KafkaMirrorMakerStatus(); try { - mirror = KafkaMirrorMakerCluster.fromCrd(assemblyResource, versions); + mirror = KafkaMirrorMakerCluster.fromCrd(reconciliation, assemblyResource, versions); } catch (Exception e) { + LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, Future.failedFuture(e)); return Future.failedFuture(new ReconciliationException(kafkaMirrorMakerStatus, e)); } @@ -85,20 +85,20 @@ protected Future createOrUpdate(Reconciliation reconcili boolean mirrorHasZeroReplicas = mirror.getReplicas() == 0; - log.debug("{}: Updating Kafka Mirror Maker cluster", reconciliation); - mirrorMakerServiceAccount(namespace, mirror) - .compose(i -> deploymentOperations.scaleDown(namespace, mirror.getName(), mirror.getReplicas())) - .compose(i -> Util.metricsAndLogging(configMapOperations, namespace, mirror.getLogging(), mirror.getMetricsConfigInCm())) + LOGGER.debugCr(reconciliation, "Updating Kafka Mirror Maker cluster"); + mirrorMakerServiceAccount(reconciliation, namespace, mirror) + .compose(i -> deploymentOperations.scaleDown(reconciliation, namespace, mirror.getName(), mirror.getReplicas())) + .compose(i -> Util.metricsAndLogging(reconciliation, configMapOperations, namespace, mirror.getLogging(), mirror.getMetricsConfigInCm())) .compose(metricsAndLoggingCm -> { ConfigMap logAndMetricsConfigMap = mirror.generateMetricsAndLogConfigMap(metricsAndLoggingCm); annotations.put(Annotations.STRIMZI_LOGGING_ANNOTATION, logAndMetricsConfigMap.getData().get(mirror.ANCILLARY_CM_KEY_LOG_CONFIG)); - return configMapOperations.reconcile(namespace, mirror.getAncillaryConfigMapName(), logAndMetricsConfigMap); + return configMapOperations.reconcile(reconciliation, namespace, mirror.getAncillaryConfigMapName(), logAndMetricsConfigMap); }) - .compose(i -> podDisruptionBudgetOperator.reconcile(namespace, mirror.getName(), mirror.generatePodDisruptionBudget())) - .compose(i -> deploymentOperations.reconcile(namespace, mirror.getName(), mirror.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) - .compose(i -> deploymentOperations.scaleUp(namespace, mirror.getName(), mirror.getReplicas())) - .compose(i -> deploymentOperations.waitForObserved(namespace, mirror.getName(), 1_000, operationTimeoutMs)) - .compose(i -> mirrorHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(namespace, mirror.getName(), 1_000, operationTimeoutMs)) + .compose(i -> podDisruptionBudgetOperator.reconcile(reconciliation, namespace, mirror.getName(), mirror.generatePodDisruptionBudget())) + .compose(i -> deploymentOperations.reconcile(reconciliation, namespace, mirror.getName(), mirror.generateDeployment(annotations, pfa.isOpenshift(), imagePullPolicy, imagePullSecrets))) + .compose(i -> deploymentOperations.scaleUp(reconciliation, namespace, mirror.getName(), mirror.getReplicas())) + .compose(i -> deploymentOperations.waitForObserved(reconciliation, namespace, mirror.getName(), 1_000, operationTimeoutMs)) + .compose(i -> mirrorHasZeroReplicas ? Future.succeededFuture() : deploymentOperations.readiness(reconciliation, namespace, mirror.getName(), 1_000, operationTimeoutMs)) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(assemblyResource, kafkaMirrorMakerStatus, reconciliationResult); @@ -121,8 +121,8 @@ protected KafkaMirrorMakerStatus createStatus() { return new KafkaMirrorMakerStatus(); } - Future> mirrorMakerServiceAccount(String namespace, KafkaMirrorMakerCluster mirror) { - return serviceAccountOperations.reconcile(namespace, + Future> mirrorMakerServiceAccount(Reconciliation reconciliation, String namespace, KafkaMirrorMakerCluster mirror) { + return serviceAccountOperations.reconcile(reconciliation, namespace, KafkaMirrorMakerResources.serviceAccountName(mirror.getCluster()), mirror.generateServiceAccount()); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperator.java index f55e22046a..7e96de2ef2 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperator.java @@ -39,6 +39,7 @@ import io.strimzi.operator.cluster.operator.resource.cruisecontrol.RebalanceOptions; import io.strimzi.operator.common.AbstractOperator; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; @@ -51,8 +52,6 @@ import io.vertx.core.Vertx; import io.vertx.core.json.JsonArray; import io.vertx.core.json.JsonObject; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.Arrays; @@ -138,7 +137,7 @@ public class KafkaRebalanceAssemblyOperator extends AbstractOperator>> { - private static final Logger log = LogManager.getLogger(KafkaRebalanceAssemblyOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaRebalanceAssemblyOperator.class.getName()); private static final long REBALANCE_POLLING_TIMER_MS = 5_000; private static final int MAX_API_RETRIES = 5; @@ -200,7 +199,7 @@ public void eventReceived(Action action, KafkaRebalance kafkaRebalance) { Reconciliation reconciliation = new Reconciliation("kafkarebalance-watch", kafkaRebalance.getKind(), kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName()); - log.debug("{}: EventReceived {} on {} with status [{}] and {}={}", reconciliation, action, + LOGGER.debugCr(reconciliation, "EventReceived {} on {} with status [{}] and {}={}", action, kafkaRebalance.getMetadata().getName(), kafkaRebalance.getStatus() != null ? rebalanceStateConditionType(kafkaRebalance.getStatus()) : null, ANNO_STRIMZI_IO_REBALANCE, rawRebalanceAnnotation(kafkaRebalance)); @@ -271,7 +270,8 @@ private String rebalanceStateConditionType(KafkaRebalanceStatus status) { return rebalanceStateCondition != null ? rebalanceStateCondition.getType() : null; } - private Future updateStatus(KafkaRebalance kafkaRebalance, + private Future updateStatus(Reconciliation reconciliation, + KafkaRebalance kafkaRebalance, KafkaRebalanceStatus desiredStatus, Throwable e) { // Leave the current status when the desired state is null @@ -299,7 +299,7 @@ private Future updateStatus(KafkaRebalance kafkaRebalance, StatusDiff diff = new StatusDiff(kafkaRebalance.getStatus(), desiredStatus); if (!diff.isEmpty()) { return kafkaRebalanceOperator - .updateStatusAsync(new KafkaRebalanceBuilder(kafkaRebalance).withStatus(desiredStatus).build()); + .updateStatusAsync(reconciliation, new KafkaRebalanceBuilder(kafkaRebalance).withStatus(desiredStatus).build()); } } return Future.succeededFuture(kafkaRebalance); @@ -342,17 +342,17 @@ private Future reconcile(Reconciliation reconciliation, String host, CruiseControlApi apiClient, KafkaRebalance kafkaRebalance, KafkaRebalanceState currentState, KafkaRebalanceAnnotation rebalanceAnnotation) { - log.info("{}: Rebalance action from state [{}]", reconciliation, currentState); + LOGGER.infoCr(reconciliation, "Rebalance action from state [{}]", currentState); if (Annotations.isReconciliationPausedWithAnnotation(kafkaRebalance)) { // we need to do this check again because it was triggered by a watcher KafkaRebalanceStatus status = new KafkaRebalanceStatus(); - Set unknownAndDeprecatedConditions = validate(kafkaRebalance); + Set unknownAndDeprecatedConditions = validate(reconciliation, kafkaRebalance); unknownAndDeprecatedConditions.add(StatusUtils.getPausedCondition()); status.setConditions(new ArrayList<>(unknownAndDeprecatedConditions)); - return updateStatus(kafkaRebalance, status, null).compose(i -> Future.succeededFuture()); + return updateStatus(reconciliation, kafkaRebalance, status, null).compose(i -> Future.succeededFuture()); } RebalanceOptions.RebalanceOptionsBuilder rebalanceOptionsBuilder = convertRebalanceSpecToRebalanceOptions(kafkaRebalance.getSpec()); @@ -365,17 +365,16 @@ private Future reconcile(Reconciliation reconciliation, String host, return kafkaRebalanceOperator.getAsync(reconciliation.namespace(), reconciliation.name()) .compose(currentKafkaRebalance -> { if (currentKafkaRebalance != null) { - return configMapOperator.reconcile(kafkaRebalance.getMetadata().getNamespace(), + return configMapOperator.reconcile(reconciliation, kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName(), desiredStatusAndMap.getLoadMap()) - .compose(i -> updateStatus(currentKafkaRebalance, desiredStatusAndMap.getStatus(), null)) + .compose(i -> updateStatus(reconciliation, currentKafkaRebalance, desiredStatusAndMap.getStatus(), null)) .compose(updatedKafkaRebalance -> { - log.info("{}: State updated to [{}] with annotation {}={} ", - reconciliation, + LOGGER.infoCr(reconciliation, "State updated to [{}] with annotation {}={} ", rebalanceStateConditionType(updatedKafkaRebalance.getStatus()), ANNO_STRIMZI_IO_REBALANCE, rawRebalanceAnnotation(updatedKafkaRebalance)); if (hasRebalanceAnnotation(updatedKafkaRebalance)) { - log.debug("{}: Removing annotation {}={}", reconciliation, + LOGGER.debugCr(reconciliation, "Removing annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, rawRebalanceAnnotation(updatedKafkaRebalance)); // Updated KafkaRebalance has rebalance annotation removed as @@ -386,9 +385,9 @@ private Future reconcile(Reconciliation reconciliation, String host, .endMetadata() .build(); - return kafkaRebalanceOperator.patchAsync(patchedKafkaRebalance); + return kafkaRebalanceOperator.patchAsync(reconciliation, patchedKafkaRebalance); } else { - log.debug("{}: No annotation {}", reconciliation, ANNO_STRIMZI_IO_REBALANCE); + LOGGER.debugCr(reconciliation, "No annotation {}", ANNO_STRIMZI_IO_REBALANCE); return Future.succeededFuture(); } }) @@ -397,13 +396,13 @@ private Future reconcile(Reconciliation reconciliation, String host, return Future.succeededFuture(); } }, exception -> { - log.error("{}: Status updated to [NotReady] due to error: {}", reconciliation, exception.getMessage()); - return updateStatus(kafkaRebalance, new KafkaRebalanceStatus(), exception) + LOGGER.errorCr(reconciliation, "Status updated to [NotReady] due to error: {}", exception.getMessage()); + return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), exception) .mapEmpty(); }); }, exception -> { - log.error("{}: Status updated to [NotReady] due to error: {}", reconciliation, exception.getMessage()); - return updateStatus(kafkaRebalance, new KafkaRebalanceStatus(), exception) + LOGGER.errorCr(reconciliation, "Status updated to [NotReady] due to error: {}", exception.getMessage()); + return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), exception) .mapEmpty(); }); } @@ -548,7 +547,7 @@ protected static JsonObject parseLoadStats(JsonArray brokerLoadBeforeArray, Json brokerStats.put(intLoadParameter.getKafkaRebalanceStatusKey(), intStats); } else { - log.warn("{} information was missing from the broker before/after load information", + LOGGER.warnOp("{} information was missing from the broker before/after load information", intLoadParameter.getKafkaRebalanceStatusKey()); } @@ -570,7 +569,7 @@ protected static JsonObject parseLoadStats(JsonArray brokerLoadBeforeArray, Json brokerStats.put(doubleLoadParameter.getKafkaRebalanceStatusKey(), doubleStats); } else { - log.warn("{} information was missing from the broker before/after load information", + LOGGER.warnOp("{} information was missing from the broker before/after load information", doubleLoadParameter.getKafkaRebalanceStatusKey()); } @@ -730,7 +729,7 @@ private Future> onPendingProposal( RebalanceOptions.RebalanceOptionsBuilder rebalanceOptionsBuilder) { Promise> p = Promise.promise(); if (rebalanceAnnotation == KafkaRebalanceAnnotation.none) { - log.debug("{}: Starting Cruise Control rebalance proposal request timer", reconciliation); + LOGGER.debugCr(reconciliation, "Starting Cruise Control rebalance proposal request timer"); vertx.setPeriodic(REBALANCE_POLLING_TIMER_MS, t -> kafkaRebalanceOperator.getAsync(kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName()) .onSuccess(currentKafkaRebalance -> { @@ -739,10 +738,10 @@ private Future> onPendingProposal( // Check resource is in the right state as previous execution might have set the status and completed the future // Safety check as timer might be called again (from a delayed timer firing) if (state(currentKafkaRebalance) == KafkaRebalanceState.PendingProposal) { - if (rebalanceAnnotation(currentKafkaRebalance) == KafkaRebalanceAnnotation.stop) { - log.debug("{}: Stopping current Cruise Control proposal request timer", reconciliation); + if (rebalanceAnnotation(reconciliation, currentKafkaRebalance) == KafkaRebalanceAnnotation.stop) { + LOGGER.debugCr(reconciliation, "Stopping current Cruise Control proposal request timer"); vertx.cancelTimer(t); - p.complete(buildRebalanceStatus(null, KafkaRebalanceState.Stopped, validate(currentKafkaRebalance))); + p.complete(buildRebalanceStatus(null, KafkaRebalanceState.Stopped, validate(reconciliation, currentKafkaRebalance))); } else { requestRebalance(reconciliation, host, apiClient, kafkaRebalance, true, rebalanceOptionsBuilder, @@ -753,15 +752,15 @@ private Future> onPendingProposal( if (rebalanceMapAndStatus.getStatus().getOptimizationResult() != null && !rebalanceMapAndStatus.getStatus().getOptimizationResult().isEmpty()) { vertx.cancelTimer(t); - log.debug("{}: Optimization proposal ready", reconciliation); + LOGGER.debugCr(reconciliation, "Optimization proposal ready"); p.complete(rebalanceMapAndStatus); } else { // The rebalance proposal is still not ready yet, keep the timer for polling - log.debug("{}: Waiting for optimization proposal to be ready", reconciliation); + LOGGER.debugCr(reconciliation, "Waiting for optimization proposal to be ready"); } }) .onFailure(e -> { - log.error("{}: Cruise Control getting rebalance proposal failed", reconciliation, e.getCause()); + LOGGER.errorCr(reconciliation, "Cruise Control getting rebalance proposal failed", e.getCause()); vertx.cancelTimer(t); p.fail(e.getCause()); }); @@ -770,13 +769,13 @@ private Future> onPendingProposal( p.complete(new MapAndStatus<>(null, currentKafkaRebalance.getStatus())); } } else { - log.debug("{}: Rebalance resource was deleted, stopping the request time", reconciliation); + LOGGER.debugCr(reconciliation, "Rebalance resource was deleted, stopping the request time"); vertx.cancelTimer(t); p.complete(); } }) .onFailure(e -> { - log.error("{}: Cruise Control getting rebalance resource failed", reconciliation, e.getCause()); + LOGGER.errorCr(reconciliation, "Cruise Control getting rebalance resource failed", e.getCause()); vertx.cancelTimer(t); p.fail(e.getCause()); }) @@ -811,16 +810,16 @@ private Future> onProposalReady(Re RebalanceOptions.RebalanceOptionsBuilder rebalanceOptionsBuilder) { switch (rebalanceAnnotation) { case none: - log.debug("{}: No {} annotation set", reconciliation, ANNO_STRIMZI_IO_REBALANCE); + LOGGER.debugCr(reconciliation, "No {} annotation set", ANNO_STRIMZI_IO_REBALANCE); return configMapOperator.getAsync(kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName()).compose(loadmap -> Future.succeededFuture(new MapAndStatus<>(loadmap, kafkaRebalance.getStatus()))); case approve: - log.debug("{}: Annotation {}={}", reconciliation, ANNO_STRIMZI_IO_REBALANCE, KafkaRebalanceAnnotation.approve); + LOGGER.debugCr(reconciliation, "Annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, KafkaRebalanceAnnotation.approve); return requestRebalance(reconciliation, host, apiClient, kafkaRebalance, false, rebalanceOptionsBuilder); case refresh: - log.debug("{}: Annotation {}={}", reconciliation, ANNO_STRIMZI_IO_REBALANCE, KafkaRebalanceAnnotation.refresh); + LOGGER.debugCr(reconciliation, "Annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, KafkaRebalanceAnnotation.refresh); return requestRebalance(reconciliation, host, apiClient, kafkaRebalance, true, rebalanceOptionsBuilder); default: - log.warn("{}: Ignore annotation {}={}", reconciliation, ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotation); + LOGGER.warnCr(reconciliation, "Ignore annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotation); return Future.succeededFuture(new MapAndStatus<>(null, kafkaRebalance.getStatus())); } } @@ -848,7 +847,7 @@ private Future> onRebalancing(Reco KafkaRebalanceAnnotation rebalanceAnnotation) { Promise> p = Promise.promise(); if (rebalanceAnnotation == KafkaRebalanceAnnotation.none) { - log.info("{}: Starting Cruise Control rebalance user task status timer", reconciliation); + LOGGER.infoCr(reconciliation, "Starting Cruise Control rebalance user task status timer"); String sessionId = kafkaRebalance.getStatus().getSessionId(); AtomicInteger ccApiErrorCount = new AtomicInteger(); vertx.setPeriodic(REBALANCE_POLLING_TIMER_MS, t -> { @@ -864,17 +863,17 @@ private Future> onRebalancing(Reco // Check resource is in the right state as previous execution might have set the status and completed the future // Safety check as timer might be called again (from a delayed timer firing) if (state(currentKafkaRebalance) == KafkaRebalanceState.Rebalancing) { - if (rebalanceAnnotation(currentKafkaRebalance) == KafkaRebalanceAnnotation.stop) { - log.debug("{}: Stopping current Cruise Control rebalance user task", reconciliation); + if (rebalanceAnnotation(reconciliation, currentKafkaRebalance) == KafkaRebalanceAnnotation.stop) { + LOGGER.debugCr(reconciliation, "Stopping current Cruise Control rebalance user task"); vertx.cancelTimer(t); apiClient.stopExecution(host, CruiseControl.REST_API_PORT) - .onSuccess(r -> p.complete(buildRebalanceStatus(null, KafkaRebalanceState.Stopped, validate(kafkaRebalance)))) + .onSuccess(r -> p.complete(buildRebalanceStatus(null, KafkaRebalanceState.Stopped, validate(reconciliation, kafkaRebalance)))) .onFailure(e -> { - log.error("{}: Cruise Control stopping execution failed", reconciliation, e.getCause()); + LOGGER.errorCr(reconciliation, "Cruise Control stopping execution failed", e.getCause()); p.fail(e.getCause()); }); } else { - log.info("{}: Getting Cruise Control rebalance user task status", reconciliation); + LOGGER.infoCr(reconciliation, "Getting Cruise Control rebalance user task status"); apiClient.getUserTaskStatus(host, CruiseControl.REST_API_PORT, sessionId) .onSuccess(cruiseControlResponse -> { JsonObject taskStatusJson = cruiseControlResponse.getJson(); @@ -882,7 +881,7 @@ private Future> onRebalancing(Reco switch (taskStatus) { case COMPLETED: vertx.cancelTimer(t); - log.info("{}: Rebalance ({}) is now complete", reconciliation, sessionId); + LOGGER.infoCr(reconciliation, "Rebalance ({}) is now complete", sessionId); p.complete(buildRebalanceStatus( kafkaRebalance, null, KafkaRebalanceState.Ready, taskStatusJson)); break; @@ -891,9 +890,9 @@ private Future> onRebalancing(Reco // We may need to propose an upstream PR for this. // TODO: Once we can get the error details we need to add an error field to the Rebalance Status to hold // details of any issues while rebalancing. - log.error("{}: Rebalance ({}) optimization proposal has failed to complete", reconciliation, sessionId); + LOGGER.errorCr(reconciliation, "Rebalance ({}) optimization proposal has failed to complete", sessionId); vertx.cancelTimer(t); - p.complete(buildRebalanceStatus(sessionId, KafkaRebalanceState.NotReady, validate(kafkaRebalance))); + p.complete(buildRebalanceStatus(sessionId, KafkaRebalanceState.NotReady, validate(reconciliation, kafkaRebalance))); break; case IN_EXECUTION: // Rebalance is still in progress // We need to check that the status has been updated with the ongoing optimisation proposal @@ -902,7 +901,7 @@ private Future> onRebalancing(Reco // the proposal is complete but the optimisation proposal summary will be missing. if (currentKafkaRebalance.getStatus().getOptimizationResult() == null || currentKafkaRebalance.getStatus().getOptimizationResult().isEmpty()) { - log.info("{}: Rebalance ({}) optimization proposal is now ready and has been added to the status", reconciliation, sessionId); + LOGGER.infoCr(reconciliation, "Rebalance ({}) optimization proposal is now ready and has been added to the status", sessionId); // Cancel the timer so that the status is returned and updated. vertx.cancelTimer(t); p.complete(buildRebalanceStatus( @@ -917,18 +916,18 @@ private Future> onRebalancing(Reco // If a rebalance(dryrun=false) was called and the proposal is still being prepared then the task // will be in an ACTIVE state. When the proposal is ready it will shift to IN_EXECUTION and we will // check that the optimisation proposal is added to the status on the next reconcile. - log.info("{}: Rebalance ({}) optimization proposal is still being prepared", reconciliation, sessionId); + LOGGER.infoCr(reconciliation, "Rebalance ({}) optimization proposal is still being prepared", sessionId); ccApiErrorCount.set(0); break; default: - log.error("{}: Unexpected state {}", reconciliation, taskStatus); + LOGGER.errorCr(reconciliation, "Unexpected state {}", taskStatus); vertx.cancelTimer(t); p.fail("Unexpected state " + taskStatus); break; } }) .onFailure(e -> { - log.error("{}: Cruise Control getting rebalance task status failed", reconciliation, e.getCause()); + LOGGER.errorCr(reconciliation, "Cruise Control getting rebalance task status failed", e.getCause()); // To make sure this error is not just a temporary problem with the network we retry several times. // If the number of errors pass the MAX_API_ERRORS limit then the period method will fail the promise. ccApiErrorCount.getAndIncrement(); @@ -938,13 +937,13 @@ private Future> onRebalancing(Reco p.complete(new MapAndStatus<>(null, currentKafkaRebalance.getStatus())); } } else { - log.debug("{}: Rebalance resource was deleted, stopping the request time", reconciliation); + LOGGER.debugCr(reconciliation, "Rebalance resource was deleted, stopping the request time"); vertx.cancelTimer(t); p.complete(); } }) .onFailure(e -> { - log.error("{}: Cruise Control getting rebalance resource failed", reconciliation, e.getCause()); + LOGGER.errorCr(reconciliation, "Cruise Control getting rebalance resource failed", e.getCause()); vertx.cancelTimer(t); p.fail(e.getCause()); }); @@ -976,8 +975,8 @@ private Future> onStop(Reconciliat if (rebalanceAnnotation == KafkaRebalanceAnnotation.refresh) { return requestRebalance(reconciliation, host, apiClient, kafkaRebalance, true, rebalanceOptionsBuilder); } else { - log.warn("{}: Ignore annotation {}={}", reconciliation, ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotation); - return Future.succeededFuture(buildRebalanceStatus(null, KafkaRebalanceState.Stopped, validate(kafkaRebalance))); + LOGGER.warnCr(reconciliation, "Ignore annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotation); + return Future.succeededFuture(buildRebalanceStatus(null, KafkaRebalanceState.Stopped, validate(reconciliation, kafkaRebalance))); } } @@ -1002,8 +1001,8 @@ private Future> onReady(Reconcilia if (rebalanceAnnotation == KafkaRebalanceAnnotation.refresh) { return requestRebalance(reconciliation, host, apiClient, kafkaRebalance, true, rebalanceOptionsBuilder); } else { - log.warn("{}: Ignore annotation {}={}", reconciliation, ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotation); - return Future.succeededFuture(new MapAndStatus<>(null, buildRebalanceStatusFromPreviousStatus(kafkaRebalance.getStatus(), validate(kafkaRebalance)))); + LOGGER.warnCr(reconciliation, "{}: Ignore annotation {}={}", ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotation); + return Future.succeededFuture(new MapAndStatus<>(null, buildRebalanceStatusFromPreviousStatus(kafkaRebalance.getStatus(), validate(reconciliation, kafkaRebalance)))); } } @@ -1012,15 +1011,15 @@ private Future> onReady(Reconcilia */ /* test */ Future reconcileRebalance(Reconciliation reconciliation, KafkaRebalance kafkaRebalance) { if (kafkaRebalance == null) { - log.info("{}: Rebalance resource deleted", reconciliation); + LOGGER.infoCr(reconciliation, "Rebalance resource deleted"); return Future.succeededFuture(); } String clusterName = kafkaRebalance.getMetadata().getLabels() == null ? null : kafkaRebalance.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL); String clusterNamespace = kafkaRebalance.getMetadata().getNamespace(); if (clusterName == null) { - log.warn("{}: Resource lacks label '{}': No cluster related to a possible rebalance.", reconciliation, Labels.STRIMZI_CLUSTER_LABEL); - return updateStatus(kafkaRebalance, new KafkaRebalanceStatus(), + LOGGER.warnCr(reconciliation, "Resource lacks label '{}': No cluster related to a possible rebalance.", Labels.STRIMZI_CLUSTER_LABEL); + return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), new InvalidResourceException("Resource lacks label '" + Labels.STRIMZI_CLUSTER_LABEL + "': No cluster related to a possible rebalance.")).mapEmpty(); @@ -1030,18 +1029,18 @@ private Future> onReady(Reconcilia return kafkaOperator.getAsync(clusterNamespace, clusterName) .compose(kafka -> { if (kafka == null) { - log.warn("{}: Kafka resource '{}' identified by label '{}' does not exist in namespace {}.", - reconciliation, clusterName, Labels.STRIMZI_CLUSTER_LABEL, clusterNamespace); - return updateStatus(kafkaRebalance, new KafkaRebalanceStatus(), + LOGGER.warnCr(reconciliation, "Kafka resource '{}' identified by label '{}' does not exist in namespace {}.", + clusterName, Labels.STRIMZI_CLUSTER_LABEL, clusterNamespace); + return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), new NoSuchResourceException("Kafka resource '" + clusterName + "' identified by label '" + Labels.STRIMZI_CLUSTER_LABEL + "' does not exist in namespace " + clusterNamespace + ".")).mapEmpty(); } else if (!Util.matchesSelector(kafkaSelector, kafka)) { - log.debug("{}: {} {} in namespace {} belongs to a Kafka cluster {} which does not match label selector {} and will be ignored", reconciliation, kind(), kafkaRebalance.getMetadata().getName(), clusterNamespace, clusterName, kafkaSelector.get().getMatchLabels()); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} belongs to a Kafka cluster {} which does not match label selector {} and will be ignored", kind(), kafkaRebalance.getMetadata().getName(), clusterNamespace, clusterName, kafkaSelector.get().getMatchLabels()); return Future.succeededFuture(); } else if (kafka.getSpec().getCruiseControl() == null) { - log.warn("{}: Kafka resource lacks 'cruiseControl' declaration : No deployed Cruise Control for doing a rebalance.", reconciliation); - return updateStatus(kafkaRebalance, new KafkaRebalanceStatus(), + LOGGER.warnCr(reconciliation, "Kafka resource lacks 'cruiseControl' declaration : No deployed Cruise Control for doing a rebalance."); + return updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), new InvalidResourceException("Kafka resource lacks 'cruiseControl' declaration " + ": No deployed Cruise Control for doing a rebalance.")).mapEmpty(); } @@ -1064,12 +1063,12 @@ private Future> onReady(Reconcilia currentState = KafkaRebalanceState.valueOf(rebalanceStateType); } // Check annotation - KafkaRebalanceAnnotation rebalanceAnnotation = rebalanceAnnotation(currentKafkaRebalance); + KafkaRebalanceAnnotation rebalanceAnnotation = rebalanceAnnotation(reconciliation, currentKafkaRebalance); return reconcile(reconciliation, cruiseControlHost(clusterName, clusterNamespace), apiClient, currentKafkaRebalance, currentState, rebalanceAnnotation).mapEmpty(); }, exception -> Future.failedFuture(exception).mapEmpty()); - }, exception -> updateStatus(kafkaRebalance, new KafkaRebalanceStatus(), exception).mapEmpty()); + }, exception -> updateStatus(reconciliation, kafkaRebalance, new KafkaRebalanceStatus(), exception).mapEmpty()); } @@ -1083,7 +1082,7 @@ private Future> requestRebalance(R private Future> requestRebalance(Reconciliation reconciliation, String host, CruiseControlApi apiClient, KafkaRebalance kafkaRebalance, boolean dryrun, RebalanceOptions.RebalanceOptionsBuilder rebalanceOptionsBuilder, String userTaskID) { - log.info("{}: Requesting Cruise Control rebalance [dryrun={}]", reconciliation, dryrun); + LOGGER.infoCr(reconciliation, "Requesting Cruise Control rebalance [dryrun={}]", dryrun); rebalanceOptionsBuilder.withVerboseResponse(); if (!dryrun) { rebalanceOptionsBuilder.withFullRun(); @@ -1094,22 +1093,22 @@ private Future> requestRebalance(R if (response.isNotEnoughDataForProposal()) { // If there is not enough data for a rebalance, it's an error at the Cruise Control level // Need to re-request the proposal at a later time so move to the PendingProposal State. - return buildRebalanceStatus(null, KafkaRebalanceState.PendingProposal, validate(kafkaRebalance)); + return buildRebalanceStatus(null, KafkaRebalanceState.PendingProposal, validate(reconciliation, kafkaRebalance)); } else if (response.isProposalStillCalaculating()) { // If rebalance proposal is still being processed, we need to re-request the proposal at a later time // with the corresponding session-id so we move to the PendingProposal State. - return buildRebalanceStatus(response.getUserTaskId(), KafkaRebalanceState.PendingProposal, validate(kafkaRebalance)); + return buildRebalanceStatus(response.getUserTaskId(), KafkaRebalanceState.PendingProposal, validate(reconciliation, kafkaRebalance)); } } else { if (response.isNotEnoughDataForProposal()) { // We do not include a session id with this status as we do not want to retrieve the state of // this failed tasks (COMPLETED_WITH_ERROR) - return buildRebalanceStatus(null, KafkaRebalanceState.PendingProposal, validate(kafkaRebalance)); + return buildRebalanceStatus(null, KafkaRebalanceState.PendingProposal, validate(reconciliation, kafkaRebalance)); } else if (response.isProposalStillCalaculating()) { // If dryrun=false and the proposal is not ready we are going to be in a rebalancing state as // soon as it is ready, so set the state to rebalancing. // In the onRebalancing method the optimization proposal will be added when it is ready. - return buildRebalanceStatus(response.getUserTaskId(), KafkaRebalanceState.Rebalancing, validate(kafkaRebalance)); + return buildRebalanceStatus(response.getUserTaskId(), KafkaRebalanceState.Rebalancing, validate(reconciliation, kafkaRebalance)); } } @@ -1130,10 +1129,11 @@ private Future> requestRebalance(R * If the annotation is not set it returns {@code RebalanceAnnotation.none} while if it's a not valid value, it * returns {@code RebalanceAnnotation.unknown}. * + * @param reconciliation The reconciliation * @param kafkaRebalance KafkaRebalance resource instance from which getting the value of the strimzio.io/rebalance annotation * @return the {@code RebalanceAnnotation} enum value for the raw String value of the strimzio.io/rebalance annotation */ - private KafkaRebalanceAnnotation rebalanceAnnotation(KafkaRebalance kafkaRebalance) { + private KafkaRebalanceAnnotation rebalanceAnnotation(Reconciliation reconciliation, KafkaRebalance kafkaRebalance) { String rebalanceAnnotationValue = rawRebalanceAnnotation(kafkaRebalance); KafkaRebalanceAnnotation rebalanceAnnotation; try { @@ -1141,7 +1141,7 @@ private KafkaRebalanceAnnotation rebalanceAnnotation(KafkaRebalance kafkaRebalan KafkaRebalanceAnnotation.none : KafkaRebalanceAnnotation.valueOf(rebalanceAnnotationValue); } catch (IllegalArgumentException e) { rebalanceAnnotation = KafkaRebalanceAnnotation.unknown; - log.warn("Wrong annotation value {}={} on {}/{}", + LOGGER.warnCr(reconciliation, "Wrong annotation value {}={} on {}/{}", ANNO_STRIMZI_IO_REBALANCE, rebalanceAnnotationValue, kafkaRebalance.getMetadata().getNamespace(), kafkaRebalance.getMetadata().getName()); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java index 853fae881c..0fc15c0c70 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/DefaultZookeeperScalerProvider.java @@ -5,6 +5,7 @@ package io.strimzi.operator.cluster.operator.resource; import io.fabric8.kubernetes.api.model.Secret; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import java.util.function.Function; @@ -18,6 +19,7 @@ public class DefaultZookeeperScalerProvider implements ZookeeperScalerProvider { /** * Creates an instance of ZookeeperScaler * + * @param reconciliation The reconciliation * @param vertx Vertx instance * @param zookeeperConnectionString Connection string to connect to the right Zookeeper * @param zkNodeAddress Function for generating the Zookeeper node addresses @@ -27,7 +29,7 @@ public class DefaultZookeeperScalerProvider implements ZookeeperScalerProvider { * * @return ZookeeperScaler instance */ - public ZookeeperScaler createZookeeperScaler(Vertx vertx, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) { - return new ZookeeperScaler(vertx, zooAdminProvider, zookeeperConnectionString, zkNodeAddress, clusterCaCertSecret, coKeySecret, operationTimeoutMs); + public ZookeeperScaler createZookeeperScaler(Reconciliation reconciliation, Vertx vertx, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) { + return new ZookeeperScaler(reconciliation, vertx, zooAdminProvider, zookeeperConnectionString, zkNodeAddress, clusterCaCertSecret, coKeySecret, operationTimeoutMs); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailability.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailability.java index fe56c10e31..4b77f326b9 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailability.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailability.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.cluster.operator.resource; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Promise; @@ -16,8 +17,6 @@ import org.apache.kafka.common.TopicPartitionInfo; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.config.TopicConfig; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Collection; import java.util.HashSet; @@ -34,7 +33,7 @@ */ class KafkaAvailability { - private static final Logger log = LogManager.getLogger(KafkaAvailability.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaAvailability.class.getName()); private final Admin ac; @@ -42,15 +41,15 @@ class KafkaAvailability { private final Future> descriptions; - KafkaAvailability(Admin ac, Reconciliation reconciliation) { + KafkaAvailability(Reconciliation reconciliation, Admin ac) { this.ac = ac; this.reconciliation = reconciliation; // 1. Get all topic names Future> topicNames = topicNames(); // 2. Get topic descriptions descriptions = topicNames.compose(names -> { - log.debug("{}: Got {} topic names", reconciliation, names.size()); - log.trace("{}: Topic names {}", reconciliation, names); + LOGGER.debugCr(reconciliation, "Got {} topic names", names.size()); + LOGGER.traceCr(reconciliation, "Topic names {}", names); return describeTopics(names); }); } @@ -60,17 +59,17 @@ class KafkaAvailability { * producers with acks=all publishing to topics with a {@code min.in.sync.replicas}. */ Future canRoll(int podId) { - log.debug("{}: Determining whether broker {} can be rolled", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Determining whether broker {} can be rolled", podId); return canRollBroker(descriptions, podId); } private Future canRollBroker(Future> descriptions, int podId) { Future> topicsOnGivenBroker = descriptions .compose(topicDescriptions -> { - log.debug("{}: Got {} topic descriptions", reconciliation, topicDescriptions.size()); + LOGGER.debugCr(reconciliation, "Got {} topic descriptions", topicDescriptions.size()); return Future.succeededFuture(groupTopicsByBroker(topicDescriptions, podId)); }).recover(error -> { - log.warn("{}: failed to get topic descriptions", reconciliation, error); + LOGGER.warnCr(reconciliation, "failed to get topic descriptions", error); return Future.failedFuture(error); }); @@ -84,11 +83,11 @@ private Future canRollBroker(Future> descr boolean canRoll = tds.stream().noneMatch( td -> wouldAffectAvailability(podId, topicNameToConfig, td)); if (!canRoll) { - log.debug("{}: Restart pod {} would remove it from ISR, stalling producers with acks=all", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Restart pod {} would remove it from ISR, stalling producers with acks=all", podId); } return canRoll; }).recover(error -> { - log.warn("{}: Error determining whether it is safe to restart pod {}", reconciliation, podId, error); + LOGGER.warnCr(reconciliation, "Error determining whether it is safe to restart pod {}", podId, error); return Future.failedFuture(error); }); } @@ -99,36 +98,36 @@ private boolean wouldAffectAvailability(int broker, Map nameToCo int minIsr; if (minIsrConfig != null && minIsrConfig.value() != null) { minIsr = parseInt(minIsrConfig.value()); - log.debug("{}: {} has {}={}.", reconciliation, td.name(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr); + LOGGER.debugCr(reconciliation, "{} has {}={}.", td.name(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr); } else { minIsr = -1; - log.debug("{}: {} lacks {}.", reconciliation, td.name(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG); + LOGGER.debugCr(reconciliation, "{} lacks {}.", td.name(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG); } for (TopicPartitionInfo pi : td.partitions()) { List isr = pi.isr(); if (minIsr >= 0) { if (pi.replicas().size() <= minIsr) { - log.debug("{}: {}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted, but there are only {} replicas.", - reconciliation, td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker, + LOGGER.debugCr(reconciliation, "{}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted, but there are only {} replicas.", + td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker, pi.replicas().size()); } else if (isr.size() < minIsr && contains(pi.replicas(), broker)) { logIsrReplicas(td, pi, isr); - log.info("{}: {}/{} is already underreplicated (|ISR|={}, {}={}); broker {} has a replica, " + + LOGGER.infoCr(reconciliation, "{}/{} is already underreplicated (|ISR|={}, {}={}); broker {} has a replica, " + "so should not be restarted right now (it might be first to catch up).", - reconciliation, td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker); + td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker); return true; } else if (isr.size() == minIsr && contains(isr, broker)) { if (minIsr < pi.replicas().size()) { logIsrReplicas(td, pi, isr); - log.info("{}: {}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted.", - reconciliation, td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker); + LOGGER.infoCr(reconciliation, "{}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted.", + td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker); return true; } else { - log.debug("{}: {}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted, but there are only {} replicas.", - reconciliation, td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker, + LOGGER.debugCr(reconciliation, "{}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted, but there are only {} replicas.", + td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker, pi.replicas().size()); } } @@ -138,9 +137,7 @@ && contains(isr, broker)) { } private void logIsrReplicas(TopicDescription td, TopicPartitionInfo pi, List isr) { - if (log.isDebugEnabled()) { - log.debug("{}: {}/{} has ISR={}, replicas={}", reconciliation, td.name(), pi.partition(), nodeList(isr), nodeList(pi.replicas())); - } + LOGGER.debugCr(reconciliation, "{}/{} has ISR={}, replicas={}", td.name(), pi.partition(), nodeList(isr), nodeList(pi.replicas())); } String nodeList(List nodes) { @@ -152,7 +149,7 @@ private boolean contains(List isr, int broker) { } private Future> topicConfigs(Collection topicNames) { - log.debug("{}: Getting topic configs for {} topics", reconciliation, topicNames.size()); + LOGGER.debugCr(reconciliation, "Getting topic configs for {} topics", topicNames.size()); List configs = topicNames.stream() .map((String topicName) -> new ConfigResource(ConfigResource.Type.TOPIC, topicName)) .collect(Collectors.toList()); @@ -161,7 +158,7 @@ private Future> topicConfigs(Collection topicNames) if (error != null) { promise.fail(error); } else { - log.debug("{}: Got topic configs for {} topics", reconciliation, topicNames.size()); + LOGGER.debugCr(reconciliation, "Got topic configs for {} topics", topicNames.size()); promise.complete(topicNameToConfig.entrySet().stream() .collect(Collectors.toMap( entry -> entry.getKey().name(), @@ -174,7 +171,7 @@ private Future> topicConfigs(Collection topicNames) private Set groupTopicsByBroker(Collection tds, int podId) { Set topicPartitionInfos = new HashSet<>(); for (TopicDescription td : tds) { - log.trace("{}: {}", reconciliation, td); + LOGGER.traceCr(reconciliation, td); for (TopicPartitionInfo pd : td.partitions()) { for (Node broker : pd.replicas()) { if (podId == broker.id()) { @@ -193,7 +190,7 @@ protected Future> describeTopics(Set names) if (error != null) { descPromise.fail(error); } else { - log.debug("{}: Got topic descriptions for {} topics", reconciliation, tds.size()); + LOGGER.debugCr(reconciliation, "Got topic descriptions for {} topics", tds.size()); descPromise.complete(tds.values()); } }); @@ -207,7 +204,7 @@ protected Future> topicNames() { if (error != null) { namesPromise.fail(error); } else { - log.debug("{}: Got {} topic names", reconciliation, names.size()); + LOGGER.debugCr(reconciliation, "Got {} topic names", names.size()); namesPromise.complete(names); } }); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java index b9946352c1..e36cb43d28 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiff.java @@ -20,13 +20,13 @@ import io.strimzi.kafka.config.model.Scope; import io.strimzi.operator.cluster.model.KafkaConfiguration; import io.strimzi.operator.cluster.model.KafkaVersion; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.operator.common.operator.resource.AbstractJsonDiff; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.ConfigEntry; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import static io.fabric8.kubernetes.client.internal.PatchUtils.patchMapper; @@ -41,7 +41,9 @@ */ public class KafkaBrokerConfigurationDiff extends AbstractJsonDiff { - private static final Logger log = LogManager.getLogger(KafkaBrokerConfigurationDiff.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaBrokerConfigurationDiff.class); + + private final Reconciliation reconciliation; private final Collection diff; private int brokerId; private Map configModel; @@ -67,7 +69,8 @@ public class KafkaBrokerConfigurationDiff extends AbstractJsonDiff { + "|zookeeper\\.clientCnxnSocket" + "|broker\\.rack)$"); - public KafkaBrokerConfigurationDiff(Config brokerConfigs, String desired, KafkaVersion kafkaVersion, int brokerId) { + public KafkaBrokerConfigurationDiff(Reconciliation reconciliation, Config brokerConfigs, String desired, KafkaVersion kafkaVersion, int brokerId) { + this.reconciliation = reconciliation; this.configModel = KafkaConfiguration.readConfigModel(kafkaVersion); this.brokerId = brokerId; this.diff = diff(brokerId, desired, brokerConfigs, configModel); @@ -98,7 +101,7 @@ public boolean canBeUpdatedDynamically() { for (AlterConfigOp entry : diff) { if (isEntryReadOnly(entry.configEntry())) { result = false; - log.debug("Configuration can't be updated dynamically due to: {}", entry); + LOGGER.debugCr(reconciliation, "Configuration can't be updated dynamically due to: {}", entry); break; } } @@ -140,7 +143,7 @@ private static boolean isIgnorableProperty(String key) { * @param configModel default configuration for {@code kafkaVersion} of broker * @return Collection of AlterConfigOp containing all entries which were changed from current in desired configuration */ - private static Collection diff(int brokerId, String desired, + private Collection diff(int brokerId, String desired, Config brokerConfigs, Map configModel) { if (brokerConfigs == null || desired == null) { @@ -191,50 +194,50 @@ private static Collection diff(int brokerId, String desired, if ("remove".equals(op)) { // there is a lot of properties set by default - not having them in desired causes very noisy log output - log.trace("Kafka Broker {} Config Differs : {}", brokerId, d); - log.trace("Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue)); - log.trace("Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue)); + LOGGER.traceCr(reconciliation, "Kafka Broker {} Config Differs : {}", brokerId, d); + LOGGER.traceCr(reconciliation, "Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue)); + LOGGER.traceCr(reconciliation, "Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue)); } else { - log.debug("Kafka Broker {} Config Differs : {}", brokerId, d); - log.debug("Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue)); - log.debug("Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue)); + LOGGER.debugCr(reconciliation, "Kafka Broker {} Config Differs : {}", brokerId, d); + LOGGER.debugCr(reconciliation, "Current Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(source, pathValue)); + LOGGER.debugCr(reconciliation, "Desired Kafka Broker Config path {} has value {}", pathValueWithoutSlash, lookupPath(target, pathValue)); } } return updatedCE; } - private static void updateOrAdd(String propertyName, Map configModel, Map desiredMap, Collection updatedCE) { + private void updateOrAdd(String propertyName, Map configModel, Map desiredMap, Collection updatedCE) { if (!isIgnorableProperty(propertyName)) { if (isCustomEntry(propertyName, configModel)) { - log.trace("custom property {} has been updated/added {}", propertyName, desiredMap.get(propertyName)); + LOGGER.traceCr(reconciliation, "custom property {} has been updated/added {}", propertyName, desiredMap.get(propertyName)); } else { - log.trace("property {} has been updated/added {}", propertyName, desiredMap.get(propertyName)); + LOGGER.traceCr(reconciliation, "property {} has been updated/added {}", propertyName, desiredMap.get(propertyName)); updatedCE.add(new AlterConfigOp(new ConfigEntry(propertyName, desiredMap.get(propertyName)), AlterConfigOp.OpType.SET)); } } else { - log.trace("{} is ignorable, not considering"); + LOGGER.traceCr(reconciliation, "{} is ignorable, not considering"); } } - private static void removeProperty(Map configModel, Collection updatedCE, String pathValueWithoutSlash, ConfigEntry entry) { + private void removeProperty(Map configModel, Collection updatedCE, String pathValueWithoutSlash, ConfigEntry entry) { if (isCustomEntry(entry.name(), configModel)) { // we are deleting custom option - log.trace("removing custom property {}", entry.name()); + LOGGER.traceCr(reconciliation, "removing custom property {}", entry.name()); } else if (entry.isDefault()) { // entry is in current, is not in desired, is default -> it uses default value, skip. // Some default properties do not have set ConfigEntry.ConfigSource.DEFAULT_CONFIG and thus // we are removing property. That might cause redundant RU. To fix this we would have to add defaultValue // to the configModel - log.trace("{} not set in desired, using default value", entry.name()); + LOGGER.traceCr(reconciliation, "{} not set in desired, using default value", entry.name()); } else { // entry is in current, is not in desired, is not default -> it was using non-default value and was removed // if the entry was custom, it should be deleted if (!isIgnorableProperty(pathValueWithoutSlash)) { updatedCE.add(new AlterConfigOp(new ConfigEntry(pathValueWithoutSlash, null), AlterConfigOp.OpType.DELETE)); - log.trace("{} not set in desired, unsetting back to default {}", entry.name(), "deleted entry"); + LOGGER.traceCr(reconciliation, "{} not set in desired, unsetting back to default {}", entry.name(), "deleted entry"); } else { - log.trace("{} is ignorable, not considering as removed"); + LOGGER.traceCr(reconciliation, "{} is ignorable, not considering as removed"); } } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiff.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiff.java index 575acbe158..9640329293 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiff.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiff.java @@ -5,13 +5,13 @@ package io.strimzi.operator.cluster.operator.resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.operator.resource.AbstractJsonDiff; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.ConfigEntry; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.BufferedReader; import java.io.StringReader; @@ -24,10 +24,12 @@ public class KafkaBrokerLoggingConfigurationDiff extends AbstractJsonDiff { - private static final Logger log = LogManager.getLogger(KafkaBrokerLoggingConfigurationDiff.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaBrokerLoggingConfigurationDiff.class); private final Collection diff; + private final Reconciliation reconciliation; - public KafkaBrokerLoggingConfigurationDiff(Config brokerConfigs, String desired, int brokerId) { + public KafkaBrokerLoggingConfigurationDiff(Reconciliation reconciliation, Config brokerConfigs, String desired, int brokerId) { + this.reconciliation = reconciliation; this.diff = diff(brokerId, desired, brokerConfigs); } @@ -53,7 +55,7 @@ public int getDiffSize() { * @param brokerConfigs current configuration * @return Collection of AlterConfigOp containing all entries which were changed from current in desired configuration */ - private static Collection diff(int brokerId, String desired, + private Collection diff(int brokerId, String desired, Config brokerConfigs) { if (brokerConfigs == null || desired == null) { return Collections.emptyList(); @@ -66,20 +68,20 @@ private static Collection diff(int brokerId, String desired, desiredMap.put("root", LoggingLevel.WARN.name()); } - LoggingLevelResolver levelResolver = new LoggingLevelResolver(desiredMap); + LoggingLevelResolver levelResolver = new LoggingLevelResolver(reconciliation, desiredMap); for (ConfigEntry entry: brokerConfigs.entries()) { LoggingLevel desiredLevel; try { desiredLevel = levelResolver.resolveLevel(entry.name()); } catch (IllegalArgumentException e) { - log.warn("Skipping {} - it is configured with an unsupported value (\"{}\")", entry.name(), e.getMessage()); + LOGGER.warnCr(reconciliation, "Skipping {} - it is configured with an unsupported value (\"{}\")", entry.name(), e.getMessage()); continue; } if (!desiredLevel.name().equals(entry.value())) { updatedCE.add(new AlterConfigOp(new ConfigEntry(entry.name(), desiredLevel.name()), AlterConfigOp.OpType.SET)); - log.trace("{} has an outdated value. Setting to {}", entry.name(), desiredLevel.name()); + LOGGER.traceCr(reconciliation, "{} has an outdated value. Setting to {}", entry.name(), desiredLevel.name()); } } @@ -87,16 +89,15 @@ private static Collection diff(int brokerId, String desired, String name = ent.getKey(); ConfigEntry configEntry = brokerConfigs.get(name); if (configEntry == null) { - String level = LoggingLevel.nameOrDefault(LoggingLevel.ofLog4jConfig(ent.getValue()), LoggingLevel.WARN); + String level = LoggingLevel.nameOrDefault(LoggingLevel.ofLog4jConfig(reconciliation, ent.getValue()), LoggingLevel.WARN); updatedCE.add(new AlterConfigOp(new ConfigEntry(name, level), AlterConfigOp.OpType.SET)); - log.trace("{} not set. Setting to {}", name, level); + LOGGER.traceCr(reconciliation, "{} not set. Setting to {}", name, level); } } return updatedCE; } - - protected static Map readLog4jConfig(String config) { + protected Map readLog4jConfig(String config) { Map parsed = new LinkedHashMap<>(); Map env = new HashMap<>(); BufferedReader firstPassReader = new BufferedReader(new StringReader(config)); @@ -121,12 +122,12 @@ protected static Map readLog4jConfig(String config) { } else { env.put(line.trim(), ""); } - log.debug("Treating the line as ENV var declaration: {}", line); + LOGGER.debugCr(reconciliation, "Treating the line as ENV var declaration: {}", line); continue; } } } catch (Exception e) { - log.error("Failed to parse logging configuration: " + config, e); + LOGGER.errorCr(reconciliation, "Failed to parse logging configuration: " + config, e); return Collections.emptyMap(); } @@ -145,7 +146,7 @@ protected static Map readLog4jConfig(String config) { int startIdx = "log4j.logger.".length(); int endIdx = line.indexOf("=", startIdx); if (endIdx == -1) { - log.debug("Skipping log4j.logger.* declaration without level: {}", line); + LOGGER.debugCr(reconciliation, "Skipping log4j.logger.* declaration without level: {}", line); continue; } String name = line.substring(startIdx, endIdx).trim(); @@ -159,11 +160,11 @@ protected static Map readLog4jConfig(String config) { parsed.put("root", Util.expandVar(line.substring(startIdx).split(",")[0].trim(), env)); } else { - log.debug("Skipping log4j line: {}", line); + LOGGER.debugCr(reconciliation, "Skipping log4j line: {}", line); } } } catch (Exception e) { - log.error("Failed to parse logging configuration: " + config, e); + LOGGER.errorCr(reconciliation, "Failed to parse logging configuration: " + config, e); return Collections.emptyMap(); } return parsed; @@ -187,8 +188,10 @@ public boolean isEmpty() { static class LoggingLevelResolver { private final Map config; + private final Reconciliation reconciliation; - LoggingLevelResolver(Map loggingConfig) { + LoggingLevelResolver(Reconciliation reconciliation, Map loggingConfig) { + this.reconciliation = reconciliation; this.config = loggingConfig; } @@ -221,7 +224,7 @@ static class LoggingLevelResolver { LoggingLevel resolveLevel(String name) { String level = config.get(name); if (level != null) { - LoggingLevel result = LoggingLevel.ofLog4jConfig(level); + LoggingLevel result = LoggingLevel.ofLog4jConfig(reconciliation, level); return result != null ? result : LoggingLevel.WARN; } @@ -234,7 +237,7 @@ LoggingLevel resolveLevel(String name) { level = config.get(name.substring(0, endIdx)); } if (level != null) { - LoggingLevel result = LoggingLevel.ofLog4jConfig(level); + LoggingLevel result = LoggingLevel.ofLog4jConfig(reconciliation, level); return result != null ? result : LoggingLevel.WARN; } endIdx -= 1; @@ -254,7 +257,7 @@ enum LoggingLevel { TRACE, ALL; - static LoggingLevel ofLog4jConfig(String value) { + static LoggingLevel ofLog4jConfig(Reconciliation reconciliation, String value) { if (value != null && !"".equals(value)) { String v = value.split(",")[0].trim(); if ("ALL".equals(v)) { @@ -265,7 +268,7 @@ static LoggingLevel ofLog4jConfig(String value) { try { return valueOf(v); } catch (RuntimeException e) { - log.warn("Invalid logging level: {}. Using WARN as a failover.", v); + LOGGER.warnCr(reconciliation, "Invalid logging level: {}. Using WARN as a failover.", v); } } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaRoller.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaRoller.java index a07c57386b..b64dddfb6d 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaRoller.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaRoller.java @@ -41,6 +41,7 @@ import io.strimzi.operator.common.AdminClientProvider; import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.DefaultAdminClientProvider; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; @@ -60,8 +61,6 @@ import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.errors.SslAuthenticationException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import static java.util.Collections.singletonList; @@ -106,7 +105,7 @@ @SuppressWarnings({"checkstyle:ClassFanOutComplexity", "checkstyle:ParameterNumber"}) public class KafkaRoller { - private static final Logger log = LogManager.getLogger(KafkaRoller.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaRoller.class); private final PodOperator podOperations; private final long pollingIntervalMs; @@ -126,19 +125,19 @@ public class KafkaRoller { private final boolean allowReconfiguration; private Admin allClient; - public KafkaRoller(Vertx vertx, Reconciliation reconciliation, PodOperator podOperations, - long pollingIntervalMs, long operationTimeoutMs, Supplier backOffSupplier, - StatefulSet sts, Secret clusterCaCertSecret, Secret coKeySecret, - String kafkaConfig, String kafkaLogging, KafkaVersion kafkaVersion, boolean allowReconfiguration) { - this(vertx, reconciliation, podOperations, pollingIntervalMs, operationTimeoutMs, backOffSupplier, + public KafkaRoller(Reconciliation reconciliation, Vertx vertx, PodOperator podOperations, + long pollingIntervalMs, long operationTimeoutMs, Supplier backOffSupplier, + StatefulSet sts, Secret clusterCaCertSecret, Secret coKeySecret, + String kafkaConfig, String kafkaLogging, KafkaVersion kafkaVersion, boolean allowReconfiguration) { + this(reconciliation, vertx, podOperations, pollingIntervalMs, operationTimeoutMs, backOffSupplier, sts, clusterCaCertSecret, coKeySecret, new DefaultAdminClientProvider(), kafkaConfig, kafkaLogging, kafkaVersion, allowReconfiguration); } - public KafkaRoller(Vertx vertx, Reconciliation reconciliation, PodOperator podOperations, - long pollingIntervalMs, long operationTimeoutMs, Supplier backOffSupplier, - StatefulSet sts, Secret clusterCaCertSecret, Secret coKeySecret, - AdminClientProvider adminClientProvider, - String kafkaConfig, String kafkaLogging, KafkaVersion kafkaVersion, boolean allowReconfiguration) { + public KafkaRoller(Reconciliation reconciliation, Vertx vertx, PodOperator podOperations, + long pollingIntervalMs, long operationTimeoutMs, Supplier backOffSupplier, + StatefulSet sts, Secret clusterCaCertSecret, Secret coKeySecret, + AdminClientProvider adminClientProvider, + String kafkaConfig, String kafkaLogging, KafkaVersion kafkaVersion, boolean allowReconfiguration) { this.namespace = sts.getMetadata().getNamespace(); this.cluster = Labels.cluster(sts); this.numPods = sts.getSpec().getReplicas(); @@ -205,7 +204,7 @@ public Future rollingRestart(Function> podNeedsRestart) // only for it not to become ready and thus drive the cluster to a worse state. podIds.add(podOperations.isReady(namespace, podName(podId)) ? podIds.size() : 0, podId); } - log.debug("{}: Initial order for rolling restart {}", reconciliation, podIds); + LOGGER.debugCr(reconciliation, "Initial order for rolling restart {}", podIds); List futures = new ArrayList<>(numPods); for (Integer podId: podIds) { futures.add(schedule(podId, 0, TimeUnit.MILLISECONDS)); @@ -217,7 +216,7 @@ public Future rollingRestart(Function> podNeedsRestart) allClient.close(Duration.ofSeconds(30)); } } catch (RuntimeException e) { - log.debug("{}: Exception closing admin client", reconciliation, e); + LOGGER.debugCr(reconciliation, "Exception closing admin client", e); } vertx.runOnContext(ignored -> result.handle(ar.map((Void) null))); }); @@ -273,7 +272,7 @@ private Future schedule(int podId, long delay, TimeUnit unit) { RestartContext ctx = podToContext.computeIfAbsent(podId, k -> new RestartContext(backoffSupplier)); singleExecutor.schedule(() -> { - log.debug("{}: Considering restart of pod {} after delay of {} {}", reconciliation, podId, delay, unit); + LOGGER.debugCr(reconciliation, "Considering restart of pod {} after delay of {} {}", podId, delay, unit); try { restartIfNecessary(podId, ctx); ctx.promise.complete(); @@ -281,8 +280,8 @@ private Future schedule(int podId, long delay, TimeUnit unit) { // Let the executor deal with interruption. Thread.currentThread().interrupt(); } catch (FatalProblem e) { - log.info("{}: Could not restart pod {}, giving up after {} attempts. Total delay between attempts {}ms", - reconciliation, podId, ctx.backOff.maxAttempts(), ctx.backOff.totalDelayMs(), e); + LOGGER.infoCr(reconciliation, "Could not restart pod {}, giving up after {} attempts. Total delay between attempts {}ms", + podId, ctx.backOff.maxAttempts(), ctx.backOff.totalDelayMs(), e); ctx.promise.fail(e); singleExecutor.shutdownNow(); podToContext.forEachValue(Integer.MAX_VALUE, f -> { @@ -290,15 +289,15 @@ private Future schedule(int podId, long delay, TimeUnit unit) { }); } catch (Exception e) { if (ctx.backOff.done()) { - log.info("{}: Could not roll pod {}, giving up after {} attempts. Total delay between attempts {}ms", - reconciliation, podId, ctx.backOff.maxAttempts(), ctx.backOff.totalDelayMs(), e); + LOGGER.infoCr(reconciliation, "Could not roll pod {}, giving up after {} attempts. Total delay between attempts {}ms", + podId, ctx.backOff.maxAttempts(), ctx.backOff.totalDelayMs(), e); ctx.promise.fail(e instanceof TimeoutException ? new io.strimzi.operator.common.operator.resource.TimeoutException() : e); } else { long delay1 = ctx.backOff.delayMs(); - log.info("{}: Could not roll pod {} due to {}, retrying after at least {}ms", - reconciliation, podId, e, delay1); + LOGGER.infoCr(reconciliation, "Could not roll pod {} due to {}, retrying after at least {}ms", + podId, e, delay1); schedule(podId, delay1, TimeUnit.MILLISECONDS); } } @@ -355,19 +354,19 @@ private void restartIfNecessary(int podId, RestartContext restartContext) RestartPlan restartPlan = restartPlan(podId, pod, restartContext); if (restartPlan.forceRestart || restartPlan.needsRestart || restartPlan.needsReconfig) { if (!restartPlan.forceRestart && deferController(podId, restartContext)) { - log.debug("{}: Pod {} is controller and there are other pods to roll", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} is controller and there are other pods to roll", podId); throw new ForceableProblem("Pod " + podName(podId) + " is currently the controller and there are other pods still to roll"); } else { if (restartPlan.forceRestart || canRoll(podId, 60_000, TimeUnit.MILLISECONDS, false)) { // Check for rollability before trying a dynamic update so that if the dynamic update fails we can go to a full restart if (restartPlan.forceRestart || !maybeDynamicUpdateBrokerConfig(podId, restartPlan)) { - log.debug("{}: Pod {} can be rolled now", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} can be rolled now", podId); restartAndAwaitReadiness(pod, operationTimeoutMs, TimeUnit.MILLISECONDS); } else { awaitReadiness(pod, operationTimeoutMs, TimeUnit.MILLISECONDS); } } else { - log.debug("{}: Pod {} cannot be rolled right now", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} cannot be rolled right now", podId); throw new UnforceableProblem("Pod " + podName(podId) + " is currently not rollable"); } } @@ -375,18 +374,18 @@ private void restartIfNecessary(int podId, RestartContext restartContext) // By testing even pods which don't need needsRestart for readiness we prevent successive reconciliations // from taking out a pod each time (due, e.g. to a configuration error). // We rely on Kube to try restarting such pods. - log.debug("{}: Pod {} does not need to be restarted", reconciliation, podId); - log.debug("{}: Waiting for non-restarted pod {} to become ready", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} does not need to be restarted", podId); + LOGGER.debugCr(reconciliation, "Waiting for non-restarted pod {} to become ready", podId); await(isReady(namespace, KafkaCluster.kafkaPodName(cluster, podId)), operationTimeoutMs, TimeUnit.MILLISECONDS, e -> new FatalProblem("Error while waiting for non-restarted pod " + podName(podId) + " to become ready", e)); - log.debug("{}: Pod {} is now ready", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} is now ready", podId); } } catch (ForceableProblem e) { if (isPodStuck(pod) || restartContext.backOff.done() || e.forceNow) { if (canRoll(podId, 60_000, TimeUnit.MILLISECONDS, true)) { - log.warn("{}: Pod {} will be force-rolled, due to error: {}", reconciliation, podName(podId), e.getCause() != null ? e.getCause().getMessage() : e.getMessage()); + LOGGER.warnCr(reconciliation, "Pod {} will be force-rolled, due to error: {}", podName(podId), e.getCause() != null ? e.getCause().getMessage() : e.getMessage()); restartAndAwaitReadiness(pod, operationTimeoutMs, TimeUnit.MILLISECONDS); } else { - log.warn("{}: Pod {} can't be safely force-rolled; original error: ", reconciliation, podName(podId), e.getCause() != null ? e.getCause().getMessage() : e.getMessage()); + LOGGER.warnCr(reconciliation, "Pod {} can't be safely force-rolled; original error: ", podName(podId), e.getCause() != null ? e.getCause().getMessage() : e.getMessage()); throw e; } } else { @@ -436,7 +435,7 @@ private boolean maybeDynamicUpdateBrokerConfig(int podId, RestartPlan restartPla dynamicUpdateBrokerConfig(podId, allClient, restartPlan.diff, restartPlan.logDiff); updatedDynamically = true; } catch (ForceableProblem e) { - log.debug("{}: Pod {} could not be updated dynamically ({}), will restart", reconciliation, podId, e); + LOGGER.debugCr(reconciliation, "Pod {} could not be updated dynamically ({}), will restart", podId, e); updatedDynamically = false; } } else { @@ -487,25 +486,25 @@ private RestartPlan restartPlan(int podId, Pod pod, RestartContext restartContex } if (!needsRestart && allowReconfiguration) { - log.trace("{}: Broker {}: description {}", reconciliation, podId, brokerConfig); - diff = new KafkaBrokerConfigurationDiff(brokerConfig, kafkaConfig, kafkaVersion, podId); + LOGGER.traceCr(reconciliation, "Broker {}: description {}", podId, brokerConfig); + diff = new KafkaBrokerConfigurationDiff(reconciliation, brokerConfig, kafkaConfig, kafkaVersion, podId); loggingDiff = logging(podId); if (diff.getDiffSize() > 0) { if (diff.canBeUpdatedDynamically()) { - log.debug("{}: Pod {} needs to be reconfigured.", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} needs to be reconfigured.", podId); needsReconfig = true; } else { - log.debug("{}: Pod {} needs to be restarted, because reconfiguration cannot be done dynamically", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} needs to be restarted, because reconfiguration cannot be done dynamically", podId); needsRestart = true; } } if (loggingDiff.getDiffSize() > 0) { - log.debug("{}: Pod {} logging needs to be reconfigured.", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Pod {} logging needs to be reconfigured.", podId); needsReconfig = true; } } else if (needsRestart) { - log.info("{}: Pod {} needs to be restarted. Reason: {}", reconciliation, podId, reasonToRestartPod); + LOGGER.infoCr(reconciliation, "Pod {} needs to be restarted. Reason: {}", podId, reasonToRestartPod); } return new RestartPlan(needsRestart, needsReconfig, podStuck, diff, loggingDiff); } @@ -517,7 +516,7 @@ private RestartPlan restartPlan(int podId, Pod pod, RestartContext restartContex */ protected Config brokerConfig(int brokerId) throws ForceableProblem, InterruptedException { ConfigResource resource = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(brokerId)); - return await(Util.kafkaFutureToVertxFuture(vertx, allClient.describeConfigs(singletonList(resource)).values().get(resource)), + return await(Util.kafkaFutureToVertxFuture(reconciliation, vertx, allClient.describeConfigs(singletonList(resource)).values().get(resource)), 30, TimeUnit.SECONDS, error -> new ForceableProblem("Error getting broker config", error) ); @@ -530,7 +529,7 @@ protected Config brokerConfig(int brokerId) throws ForceableProblem, Interrupted */ protected Config brokerLogging(int brokerId) throws ForceableProblem, InterruptedException { ConfigResource resource = Util.getBrokersLogging(brokerId); - return await(Util.kafkaFutureToVertxFuture(vertx, allClient.describeConfigs(singletonList(resource)).values().get(resource)), + return await(Util.kafkaFutureToVertxFuture(reconciliation, vertx, allClient.describeConfigs(singletonList(resource)).values().get(resource)), 30, TimeUnit.SECONDS, error -> new ForceableProblem("Error getting broker logging", error) ); @@ -542,31 +541,31 @@ protected void dynamicUpdateBrokerConfig(int podId, Admin ac, KafkaBrokerConfigu updatedConfig.put(Util.getBrokersConfig(podId), configurationDiff.getConfigDiff()); updatedConfig.put(Util.getBrokersLogging(podId), logDiff.getLoggingDiff()); - log.debug("{}: Altering broker configuration {}", reconciliation, podId); - log.trace("{}: Altering broker configuration {} with {}", reconciliation, podId, updatedConfig); + LOGGER.debugCr(reconciliation, "Altering broker configuration {}", podId); + LOGGER.traceCr(reconciliation, "Altering broker configuration {} with {}", podId, updatedConfig); AlterConfigsResult alterConfigResult = ac.incrementalAlterConfigs(updatedConfig); KafkaFuture brokerConfigFuture = alterConfigResult.values().get(Util.getBrokersConfig(podId)); KafkaFuture brokerLoggingConfigFuture = alterConfigResult.values().get(Util.getBrokersLogging(podId)); - await(Util.kafkaFutureToVertxFuture(vertx, brokerConfigFuture), 30, TimeUnit.SECONDS, + await(Util.kafkaFutureToVertxFuture(reconciliation, vertx, brokerConfigFuture), 30, TimeUnit.SECONDS, error -> { - log.error("Error doing dynamic config update", error); + LOGGER.errorCr(reconciliation, "Error doing dynamic config update", error); return new ForceableProblem("Error doing dynamic update", error); }); - await(Util.kafkaFutureToVertxFuture(vertx, brokerLoggingConfigFuture), 30, TimeUnit.SECONDS, + await(Util.kafkaFutureToVertxFuture(reconciliation, vertx, brokerLoggingConfigFuture), 30, TimeUnit.SECONDS, error -> { - log.error("Error performing dynamic logging update for pod {}", podId, error); + LOGGER.errorCr(reconciliation, "Error performing dynamic logging update for pod {}", podId, error); return new ForceableProblem("Error performing dynamic logging update for pod " + podId, error); }); - log.info("{}: Dynamic reconfiguration for broker {} was successful.", reconciliation, podId); + LOGGER.infoCr(reconciliation, "Dynamic reconfiguration for broker {} was successful.", podId); } private KafkaBrokerLoggingConfigurationDiff logging(int podId) throws ForceableProblem, InterruptedException { Config brokerLogging = brokerLogging(podId); - log.trace("{}: Broker {}: logging description {}", reconciliation, podId, brokerLogging); - return new KafkaBrokerLoggingConfigurationDiff(brokerLogging, kafkaLogging, podId); + LOGGER.traceCr(reconciliation, "Broker {}: logging description {}", podId, brokerLogging); + return new KafkaBrokerLoggingConfigurationDiff(reconciliation, brokerLogging, kafkaLogging, podId); } /** Exceptions which we're prepared to ignore (thus forcing a restart) in some circumstances. */ @@ -636,16 +635,16 @@ private boolean canRoll(int podId, long timeout, TimeUnit unit, boolean ignoreSs private void restartAndAwaitReadiness(Pod pod, long timeout, TimeUnit unit) throws InterruptedException, UnforceableProblem, FatalProblem { String podName = pod.getMetadata().getName(); - log.debug("{}: Rolling pod {}", reconciliation, podName); + LOGGER.debugCr(reconciliation, "Rolling pod {}", podName); await(restart(pod), timeout, unit, e -> new UnforceableProblem("Error while trying to restart pod " + podName + " to become ready", e)); awaitReadiness(pod, timeout, unit); } private void awaitReadiness(Pod pod, long timeout, TimeUnit unit) throws FatalProblem, InterruptedException { String podName = pod.getMetadata().getName(); - log.debug("{}: Waiting for restarted pod {} to become ready", reconciliation, podName); + LOGGER.debugCr(reconciliation, "Waiting for restarted pod {} to become ready", podName); await(isReady(pod), timeout, unit, e -> new FatalProblem("Error while waiting for restarted pod " + podName + " to become ready", e)); - log.debug("{}: Pod {} is now ready", reconciliation, podName); + LOGGER.debugCr(reconciliation, "Pod {} is now ready", podName); } /** @@ -688,7 +687,7 @@ private static T await(Future future, long timeout, * @return a Future which completes when the Pod has been recreated */ protected Future restart(Pod pod) { - return podOperations.restart("Rolling update of " + namespace + "/" + KafkaCluster.kafkaClusterName(cluster), pod, operationTimeoutMs); + return podOperations.restart(reconciliation, pod, operationTimeoutMs); } /** @@ -698,7 +697,7 @@ protected Admin adminClient(List bootstrapPods, boolean ceShouldBeFatal List podNames = bootstrapPods.stream().map(podId -> podName(podId)).collect(Collectors.toList()); try { String bootstrapHostnames = podNames.stream().map(podName -> KafkaCluster.podDnsName(this.namespace, this.cluster, podName) + ":" + KafkaCluster.REPLICATION_PORT).collect(Collectors.joining(",")); - log.debug("{}: Creating AdminClient for {}", reconciliation, bootstrapHostnames); + LOGGER.debugCr(reconciliation, "Creating AdminClient for {}", bootstrapHostnames); return adminClientProvider.createAdminClient(bootstrapHostnames, this.clusterCaCertSecret, this.coKeySecret, "cluster-operator"); } catch (KafkaException e) { if (ceShouldBeFatal && (e instanceof ConfigException @@ -713,7 +712,7 @@ protected Admin adminClient(List bootstrapPods, boolean ceShouldBeFatal } protected KafkaAvailability availability(Admin ac) { - return new KafkaAvailability(ac, reconciliation); + return new KafkaAvailability(reconciliation, ac); } String podName(int podId) { @@ -751,7 +750,7 @@ int controller(int podId, long timeout, TimeUnit unit, RestartContext restartCon maybeTcpProbe(podId, e, restartContext); } int id = controllerNode == null || Node.noNode().equals(controllerNode) ? -1 : controllerNode.id(); - log.debug("{}: Controller is {}", reconciliation, id); + LOGGER.debugCr(reconciliation, "Controller is {}", id); return id; } } @@ -764,7 +763,7 @@ int controller(int podId, long timeout, TimeUnit unit, RestartContext restartCon private void maybeTcpProbe(int podId, Exception executionException, RestartContext restartContext) throws Exception { if (restartContext.connectionError() + numPods * 120_000L >= System.currentTimeMillis()) { try { - log.debug("{}: Probing TCP port due to previous problems connecting to pod {}", reconciliation, podId); + LOGGER.debugCr(reconciliation, "Probing TCP port due to previous problems connecting to pod {}", podId); // do a tcp connect and close (with a short connect timeout) tcpProbe(podName(podId), KafkaCluster.REPLICATION_PORT); } catch (IOException connectionException) { @@ -802,9 +801,9 @@ protected Future isReady(Pod pod) { } protected Future isReady(String namespace, String podName) { - return podOperations.readiness(namespace, podName, pollingIntervalMs, operationTimeoutMs) + return podOperations.readiness(reconciliation, namespace, podName, pollingIntervalMs, operationTimeoutMs) .recover(error -> { - log.warn("{}: Error waiting for pod {}/{} to become ready: {}", reconciliation, namespace, podName, error); + LOGGER.warnCr(reconciliation, "Error waiting for pod {}/{} to become ready: {}", namespace, podName, error); return Future.failedFuture(error); }); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperator.java index 95a4ba29d7..cde7191565 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperator.java @@ -12,17 +12,17 @@ import io.fabric8.kubernetes.api.model.apps.StatefulSet; import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.operator.common.AdminClientProvider; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.Future; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; /** * Specialization of {@link StatefulSetOperator} for StatefulSets of Kafka brokers */ public class KafkaSetOperator extends StatefulSetOperator { - private static final Logger log = LogManager.getLogger(KafkaSetOperator.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaSetOperator.class); private final AdminClientProvider adminClientProvider; @@ -41,37 +41,37 @@ public KafkaSetOperator(Vertx vertx, KubernetesClient client, long operationTime } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { - return !diff.isEmpty() && needsRollingUpdate(diff); + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { + return !diff.isEmpty() && needsRollingUpdate(reconciliation, diff); } - public static boolean needsRollingUpdate(StatefulSetDiff diff) { + public static boolean needsRollingUpdate(Reconciliation reconciliation, StatefulSetDiff diff) { if (diff.changesLabels()) { - log.debug("Changed labels => needs rolling update"); + LOGGER.debugCr(reconciliation, "Changed labels => needs rolling update"); return true; } if (diff.changesSpecTemplate()) { - log.debug("Changed template spec => needs rolling update"); + LOGGER.debugCr(reconciliation, "Changed template spec => needs rolling update"); return true; } if (diff.changesVolumeClaimTemplates()) { - log.debug("Changed volume claim template => needs rolling update"); + LOGGER.debugCr(reconciliation, "Changed volume claim template => needs rolling update"); return true; } if (diff.changesVolumeSize()) { - log.debug("Changed size of the volume claim template => no need for rolling update"); + LOGGER.debugCr(reconciliation, "Changed size of the volume claim template => no need for rolling update"); return false; } return false; } @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart) { - return maybeRollingUpdate(sts, podNeedsRestart, null, null); + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart) { + return maybeRollingUpdate(reconciliation, sts, podNeedsRestart, null, null); } @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaCertSecret, Secret coKeySecret) { throw new UnsupportedOperationException(); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiff.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiff.java index defd214e03..a898244ece 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiff.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiff.java @@ -10,9 +10,9 @@ import io.fabric8.zjsonpatch.JsonDiff; import io.strimzi.operator.cluster.model.StorageUtils; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.operator.resource.AbstractJsonDiff; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -21,7 +21,7 @@ public class StatefulSetDiff extends AbstractJsonDiff { - private static final Logger log = LogManager.getLogger(StatefulSetDiff.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(StatefulSetDiff.class.getName()); private static final String SHORTENED_STRIMZI_DOMAIN = Annotations.STRIMZI_DOMAIN.substring(0, Annotations.STRIMZI_DOMAIN.length() - 1); @@ -69,7 +69,7 @@ private static boolean equalsOrPrefix(String path, String pathValue) { private final boolean changesLabels; private final boolean changesSpecReplicas; - public StatefulSetDiff(StatefulSet current, StatefulSet desired) { + public StatefulSetDiff(Reconciliation reconciliation, StatefulSet current, StatefulSet desired) { JsonNode source = patchMapper().valueToTree(current); JsonNode target = patchMapper().valueToTree(desired); JsonNode diff = JsonDiff.asJson(source, target); @@ -83,7 +83,7 @@ public StatefulSetDiff(StatefulSet current, StatefulSet desired) { String pathValue = d.get("path").asText(); if (IGNORABLE_PATHS.matcher(pathValue).matches()) { ObjectMeta md = current.getMetadata(); - log.debug("StatefulSet {}/{} ignoring diff {}", md.getNamespace(), md.getName(), d); + LOGGER.debugCr(reconciliation, "StatefulSet {}/{} ignoring diff {}", md.getNamespace(), md.getName(), d); continue; } Matcher resourceMatchers = RESOURCE_PATH.matcher(pathValue); @@ -92,16 +92,17 @@ public StatefulSetDiff(StatefulSet current, StatefulSet desired) { boolean same = compareMemoryAndCpuResources(source, target, pathValue, resourceMatchers); if (same) { ObjectMeta md = current.getMetadata(); - log.debug("StatefulSet {}/{} ignoring diff {}", md.getNamespace(), md.getName(), d); + LOGGER.debugCr(reconciliation, "StatefulSet {}/{} ignoring diff {}", md.getNamespace(), md.getName(), d); continue; } } } - if (log.isDebugEnabled()) { + + if (LOGGER.isDebugEnabled()) { ObjectMeta md = current.getMetadata(); - log.debug("StatefulSet {}/{} differs: {}", md.getNamespace(), md.getName(), d); - log.debug("Current StatefulSet path {} has value {}", pathValue, lookupPath(source, pathValue)); - log.debug("Desired StatefulSet path {} has value {}", pathValue, lookupPath(target, pathValue)); + LOGGER.debugCr(reconciliation, "StatefulSet {}/{} differs: {}", md.getNamespace(), md.getName(), d); + LOGGER.debugCr(reconciliation, "Current StatefulSet path {} has value {}", pathValue, lookupPath(source, pathValue)); + LOGGER.debugCr(reconciliation, "Desired StatefulSet path {} has value {}", pathValue, lookupPath(target, pathValue)); } num++; diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java index 7d1e5c2062..3eac3c88b4 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java @@ -18,6 +18,8 @@ import io.strimzi.operator.cluster.ClusterOperator; import io.strimzi.operator.cluster.model.KafkaCluster; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.resource.AbstractScalableResourceOperator; @@ -29,8 +31,6 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -38,14 +38,14 @@ import java.util.function.Function; /** - * Operations for {@code StatefulSets}s, which supports {@link #maybeRollingUpdate(StatefulSet, Function)} + * Operations for {@code StatefulSets}s, which supports {@link #maybeRollingUpdate(Reconciliation, StatefulSet, Function)} * in addition to the usual operations. */ public abstract class StatefulSetOperator extends AbstractScalableResourceOperator> { private static final int NO_GENERATION = -1; private static final int INIT_GENERATION = 0; - private static final Logger log = LogManager.getLogger(StatefulSetOperator.class.getName()); + protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(StatefulSetOperator.class.getName()); protected final PodOperator podOperations; private final PvcOperator pvcOperations; protected final long operationTimeoutMs; @@ -88,13 +88,14 @@ protected MixedOperation maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart) { return getSecrets(sts).compose(compositeFuture -> { - return maybeRollingUpdate(sts, podNeedsRestart, compositeFuture.resultAt(0), compositeFuture.resultAt(1)); + return maybeRollingUpdate(reconciliation, sts, podNeedsRestart, compositeFuture.resultAt(0), compositeFuture.resultAt(1)); }); } @@ -120,12 +121,12 @@ protected CompositeFuture getSecrets(StatefulSet sts) { return CompositeFuture.join(clusterCaCertSecretFuture, coKeySecretFuture); } - public abstract Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret); + public abstract Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret); - public Future deletePvc(StatefulSet sts, String pvcName) { + public Future deletePvc(Reconciliation reconciliation, StatefulSet sts, String pvcName) { String namespace = sts.getMetadata().getNamespace(); Promise promise = Promise.promise(); - Future> r = pvcOperations.reconcile(namespace, pvcName, null); + Future> r = pvcOperations.reconcile(reconciliation, namespace, pvcName, null); r.onComplete(h -> { if (h.succeeded()) { promise.complete(); @@ -140,12 +141,13 @@ public Future deletePvc(StatefulSet sts, String pvcName) { * Asynchronously apply the given {@code podNeedsRestart}, if it returns true then restart the pod * given by {@code podName} by deleting it and letting it be recreated by K8s; * in any case return a Future which completes when the given (possibly recreated) pod is ready. + * @param reconciliation Reconciliation object * @param sts The StatefulSet. * @param podName The name of the Pod to possibly restart. * @param podNeedsRestart The function for deciding whether to restart the pod. * @return a Future which completes when the given (possibly recreated) pod is ready. */ - Future maybeRestartPod(StatefulSet sts, String podName, Function> podNeedsRestart) { + Future maybeRestartPod(Reconciliation reconciliation, StatefulSet sts, String podName, Function> podNeedsRestart) { long pollingIntervalMs = 1_000; long timeoutMs = operationTimeoutMs; String namespace = sts.getMetadata().getNamespace(); @@ -154,15 +156,15 @@ Future maybeRestartPod(StatefulSet sts, String podName, Function fut; List reasons = podNeedsRestart.apply(pod); if (reasons != null && !reasons.isEmpty()) { - log.debug("Rolling update of {}/{}: pod {} due to {}", namespace, name, podName, reasons); - fut = restartPod(sts, pod); + LOGGER.debugCr(reconciliation, "Rolling update of {}/{}: pod {} due to {}", namespace, name, podName, reasons); + fut = restartPod(reconciliation, pod); } else { - log.debug("Rolling update of {}/{}: pod {} no need to roll", namespace, name, podName); + LOGGER.debugCr(reconciliation, "Rolling update of {}/{}: pod {} no need to roll", namespace, name, podName); fut = Future.succeededFuture(); } return fut.compose(ignored -> { - log.debug("Rolling update of {}/{}: wait for pod {} readiness", namespace, name, podName); - return podOperations.readiness(namespace, podName, pollingIntervalMs, timeoutMs); + LOGGER.debugCr(reconciliation, "Rolling update of {}/{}: wait for pod {} readiness", namespace, name, podName); + return podOperations.readiness(reconciliation, namespace, podName, pollingIntervalMs, timeoutMs); }); }); } @@ -170,13 +172,12 @@ Future maybeRestartPod(StatefulSet sts, String podName, Function restartPod(StatefulSet sts, Pod pod) { - return podOperations.restart("Rolling update of " + sts.getMetadata().getNamespace() + "/" + sts.getMetadata().getName(), - pod, operationTimeoutMs); + private Future restartPod(Reconciliation reconciliation, Pod pod) { + return podOperations.restart(reconciliation, pod, operationTimeoutMs); } @Override @@ -214,7 +215,7 @@ protected void incrementGeneration(StatefulSet current, StatefulSet desired) { setGeneration(desired, nextGeneration); } - protected abstract boolean shouldIncrementGeneration(StatefulSetDiff diff); + protected abstract boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff); /** * Gets the {@code strimzi.io/generation} of the given StatefulSet. @@ -241,19 +242,19 @@ public static int getPodGeneration(Pod resource) { } @Override - protected Future> internalCreate(String namespace, String name, StatefulSet desired) { + protected Future> internalCreate(Reconciliation reconciliation, String namespace, String name, StatefulSet desired) { // Create the STS... Promise> result = Promise.promise(); setGeneration(desired, INIT_GENERATION); - Future> crt = super.internalCreate(namespace, name, desired); + Future> crt = super.internalCreate(reconciliation, namespace, name, desired); if (crt.failed()) { return crt; } // ... then wait for the STS to be ready... - crt.compose(res -> readiness(namespace, desired.getMetadata().getName(), 1_000, operationTimeoutMs).map(res)) + crt.compose(res -> readiness(reconciliation, namespace, desired.getMetadata().getName(), 1_000, operationTimeoutMs).map(res)) // ... then wait for all the pods to be ready - .compose(res -> podReadiness(namespace, desired, 1_000, operationTimeoutMs).map(res)) + .compose(res -> podReadiness(reconciliation, namespace, desired, 1_000, operationTimeoutMs).map(res)) .onComplete(result); return result.future(); @@ -262,12 +263,12 @@ protected Future> internalCreate(String namespace, /** * Returns a future that completes when all the pods [0..replicas-1] in the given statefulSet are ready. */ - protected Future podReadiness(String namespace, StatefulSet desired, long pollInterval, long operationTimeoutMs) { + protected Future podReadiness(Reconciliation reconciliation, String namespace, StatefulSet desired, long pollInterval, long operationTimeoutMs) { final int replicas = desired.getSpec().getReplicas(); List waitPodResult = new ArrayList<>(replicas); for (int i = 0; i < replicas; i++) { String podName = getPodName(desired, i); - waitPodResult.add(podOperations.readiness(namespace, podName, pollInterval, operationTimeoutMs)); + waitPodResult.add(podOperations.readiness(reconciliation, namespace, podName, pollInterval, operationTimeoutMs)); } return CompositeFuture.join(waitPodResult); } @@ -278,10 +279,10 @@ protected Future podReadiness(String namespace, StatefulSet desired, long pol * {@inheritDoc} */ @Override - protected Future> internalPatch(String namespace, String name, StatefulSet current, StatefulSet desired) { - StatefulSetDiff diff = new StatefulSetDiff(current, desired); + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, StatefulSet current, StatefulSet desired) { + StatefulSetDiff diff = new StatefulSetDiff(reconciliation, current, desired); - if (shouldIncrementGeneration(diff)) { + if (shouldIncrementGeneration(reconciliation, diff)) { incrementGeneration(current, desired); } else { setGeneration(desired, getStsGeneration(current)); @@ -289,17 +290,14 @@ protected Future> internalPatch(String namespace, S // Don't scale via patch desired.getSpec().setReplicas(current.getSpec().getReplicas()); - if (log.isTraceEnabled()) { - log.trace("Patching {} {}/{} to match desired state {}", resourceKind, namespace, name, desired); - } else { - log.debug("Patching {} {}/{}", resourceKind, namespace, name); - } + LOGGER.traceCr(reconciliation, "Patching {} {}/{} to match desired state {}", resourceKind, namespace, name, desired); + LOGGER.debugCr(reconciliation, "Patching {} {}/{}", resourceKind, namespace, name); if (diff.changesVolumeClaimTemplates() || diff.changesVolumeSize()) { // When volume claim templates change, we need to delete the STS and re-create it - return internalReplace(namespace, name, current, desired, false); + return internalReplace(reconciliation, namespace, name, current, desired, false); } else { - return super.internalPatch(namespace, name, current, desired, false); + return super.internalPatch(reconciliation, namespace, name, current, desired, false); } } @@ -317,7 +315,7 @@ protected Future> internalPatch(String namespace, S * * @return Future with result of the reconciliation */ - protected Future> internalReplace(String namespace, String name, StatefulSet current, StatefulSet desired, boolean cascading) { + protected Future> internalReplace(Reconciliation reconciliation, String namespace, String name, StatefulSet current, StatefulSet desired, boolean cascading) { try { Promise> promise = Promise.promise(); @@ -326,16 +324,16 @@ protected Future> internalReplace(String namespace, operation().inNamespace(namespace).withName(name).withPropagationPolicy(cascading ? DeletionPropagation.FOREGROUND : DeletionPropagation.ORPHAN).withGracePeriod(-1L).delete(); - Future deletedFut = waitFor(namespace, name, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { + Future deletedFut = waitFor(reconciliation, namespace, name, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { StatefulSet sts = get(namespace, name); - log.trace("Checking if {} {} in namespace {} has been deleted", resourceKind, name, namespace); + LOGGER.traceCr(reconciliation, "Checking if {} {} in namespace {} has been deleted", resourceKind, name, namespace); return sts == null; }); deletedFut.onComplete(res -> { if (res.succeeded()) { StatefulSet result = operation().inNamespace(namespace).withName(name).create(desired); - log.debug("{} {} in namespace {} has been replaced", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been replaced", resourceKind, name, namespace); promise.complete(wasChanged(current, result) ? ReconcileResult.patched(result) : ReconcileResult.noop(result)); } else { promise.fail(res.cause()); @@ -344,7 +342,7 @@ protected Future> internalReplace(String namespace, return promise.future(); } catch (Exception e) { - log.debug("Caught exception while replacing {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.debugCr(reconciliation, "Caught exception while replacing {} {} in namespace {}", resourceKind, name, namespace, e); return Future.failedFuture(e); } } @@ -352,13 +350,14 @@ protected Future> internalReplace(String namespace, /** * Asynchronously deletes the resource with the given {@code name} in the given {@code namespace}. * + * @param reconciliation The reconciliation * @param namespace Namespace of the resource which should be deleted * @param name Name of the resource which should be deleted * @param cascading Defines whether the deletion should be cascading or not * * @return A Future with True if the deletion succeeded and False when it failed. */ - public Future deleteAsync(String namespace, String name, boolean cascading) { + public Future deleteAsync(Reconciliation reconciliation, String namespace, String name, boolean cascading) { Promise result = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { @@ -366,14 +365,14 @@ public Future deleteAsync(String namespace, String name, boolean cascading Boolean deleted = operation().inNamespace(namespace).withName(name).withPropagationPolicy(cascading ? DeletionPropagation.FOREGROUND : DeletionPropagation.ORPHAN).withGracePeriod(-1L).delete(); if (deleted) { - log.debug("{} {} in namespace {} has been deleted", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been deleted", resourceKind, name, namespace); future.complete(); } else { - log.debug("{} {} in namespace {} has been not been deleted", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been not been deleted", resourceKind, name, namespace); future.fail(resourceKind + " " + name + " in namespace " + namespace + " has been not been deleted"); } } catch (Exception e) { - log.debug("Caught exception while deleting {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.debugCr(reconciliation, "Caught exception while deleting {} {} in namespace {}", resourceKind, name, namespace, e); future.fail(e); } }, true, result diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java index dddd12ef2d..4e57754dc8 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinder.java @@ -11,6 +11,8 @@ import io.strimzi.operator.cluster.model.Ca; import io.strimzi.operator.cluster.model.ZookeeperCluster; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.resource.SecretOperator; @@ -23,8 +25,6 @@ import io.vertx.core.net.NetSocket; import io.vertx.core.net.PemKeyCertOptions; import io.vertx.core.net.PemTrustOptions; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.ByteArrayInputStream; import java.security.cert.CertificateException; @@ -43,7 +43,7 @@ */ public class ZookeeperLeaderFinder { - private static final Logger log = LogManager.getLogger(ZookeeperLeaderFinder.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZookeeperLeaderFinder.class); private static final Pattern LEADER_MODE_PATTERN = Pattern.compile("^Mode: leader$", Pattern.MULTILINE); @@ -59,13 +59,13 @@ public ZookeeperLeaderFinder(Vertx vertx, SecretOperator secretOperator, Supplie this.backOffSupplier = backOffSupplier; } - /*test*/ NetClientOptions clientOptions(Secret coCertKeySecret, Secret clusterCaCertificateSecret) { + /*test*/ NetClientOptions clientOptions(Reconciliation reconciliation, Secret coCertKeySecret, Secret clusterCaCertificateSecret) { return new NetClientOptions() .setConnectTimeout(10_000) .setSsl(true) .setHostnameVerificationAlgorithm("HTTPS") .setPemKeyCertOptions(keyCertOptions(coCertKeySecret)) - .setPemTrustOptions(trustOptions(clusterCaCertificateSecret)); + .setPemTrustOptions(trustOptions(reconciliation, clusterCaCertificateSecret)); } private CertificateFactory x509Factory() { @@ -82,14 +82,14 @@ private CertificateFactory x509Factory() { * Validate the cluster CA certificate(s) passed in the given Secret * and return the PemTrustOptions for trusting them. */ - protected PemTrustOptions trustOptions(Secret clusterCaCertificateSecret) { + protected PemTrustOptions trustOptions(Reconciliation reconciliation, Secret clusterCaCertificateSecret) { Base64.Decoder decoder = Base64.getDecoder(); CertificateFactory x509 = x509Factory(); PemTrustOptions pto = new PemTrustOptions(); for (Map.Entry entry : clusterCaCertificateSecret.getData().entrySet()) { String entryName = entry.getKey(); if (entryName.endsWith(".crt")) { - log.info("Trusting certificate {} from Secret {}", entryName, clusterCaCertificateSecret.getMetadata().getName()); + LOGGER.infoCr(reconciliation, "Trusting certificate {} from Secret {}", entryName, clusterCaCertificateSecret.getMetadata().getName()); byte[] certBytes = decoder.decode(entry.getValue()); try { x509.generateCertificate(new ByteArrayInputStream(certBytes)); @@ -98,7 +98,7 @@ protected PemTrustOptions trustOptions(Secret clusterCaCertificateSecret) { } pto.addCertValue(Buffer.buffer(certBytes)); } else { - log.warn("Ignoring non-certificate {} in Secret {}", entryName, clusterCaCertificateSecret.getMetadata().getName()); + LOGGER.warnCr(reconciliation, "Ignoring non-certificate {} in Secret {}", entryName, clusterCaCertificateSecret.getMetadata().getName()); } } return pto; @@ -136,7 +136,7 @@ protected PemKeyCertOptions keyCertOptions(Secret coCertKeySecret) { * An exponential backoff is used if no ZK node is leader on the attempt to find it. * If there is no leader after 3 attempts then the returned Future completes with {@link #UNKNOWN_LEADER}. */ - Future findZookeeperLeader(String cluster, String namespace, List pods, Secret coKeySecret) { + Future findZookeeperLeader(Reconciliation reconciliation, String cluster, String namespace, List pods, Secret coKeySecret) { if (pods.size() <= 1) { return Future.succeededFuture(pods.size() - 1); } @@ -147,48 +147,48 @@ Future findZookeeperLeader(String cluster, String namespace, List return Future.failedFuture(Util.missingSecretException(namespace, clusterCaSecretName)); } try { - NetClientOptions netClientOptions = clientOptions(coKeySecret, clusterCaCertificateSecret); - return zookeeperLeader(cluster, namespace, pods, netClientOptions); + NetClientOptions netClientOptions = clientOptions(reconciliation, coKeySecret, clusterCaCertificateSecret); + return zookeeperLeader(reconciliation, cluster, namespace, pods, netClientOptions); } catch (Throwable e) { return Future.failedFuture(e); } }); } - private Future zookeeperLeader(String cluster, String namespace, List pods, + private Future zookeeperLeader(Reconciliation reconciliation, String cluster, String namespace, List pods, NetClientOptions netClientOptions) { Promise result = Promise.promise(); BackOff backOff = backOffSupplier.get(); Handler handler = new Handler() { @Override public void handle(Long tid) { - zookeeperLeader(pods, netClientOptions).onComplete(leader -> { + zookeeperLeader(reconciliation, pods, netClientOptions).onComplete(leader -> { if (leader.succeeded()) { if (leader.result() != UNKNOWN_LEADER) { result.complete(leader.result()); } else { - rescheduleOrComplete(tid); + rescheduleOrComplete(reconciliation, tid); } } else { - log.debug("Ignoring error", leader.cause()); + LOGGER.debugOp("Ignoring error", leader.cause()); if (backOff.done()) { result.complete(UNKNOWN_LEADER); } else { - rescheduleOrComplete(tid); + rescheduleOrComplete(reconciliation, tid); } } }); } - void rescheduleOrComplete(Long tid) { + void rescheduleOrComplete(Reconciliation reconciliation, Long tid) { if (backOff.done()) { - log.warn("Giving up trying to find the leader of {}/{} after {} attempts taking {}ms", + LOGGER.warnCr(reconciliation, "Giving up trying to find the leader of {}/{} after {} attempts taking {}ms", namespace, cluster, backOff.maxAttempts(), backOff.totalDelayMs()); result.complete(UNKNOWN_LEADER); } else { // Schedule ourselves to run again long delay = backOff.delayMs(); - log.info("No leader found for cluster {} in namespace {}; " + + LOGGER.infoCr(reconciliation, "No leader found for cluster {} in namespace {}; " + "backing off for {}ms (cumulative {}ms)", cluster, namespace, delay, backOff.cumulativeDelayMs()); if (delay < 1) { @@ -205,9 +205,9 @@ void rescheduleOrComplete(Long tid) { /** * Synchronously find the leader by testing each pod in the given list - * using {@link #isLeader(Pod, NetClientOptions)}. + * using {@link #isLeader(Reconciliation, Pod, NetClientOptions)}. */ - private Future zookeeperLeader(List pods, NetClientOptions netClientOptions) { + private Future zookeeperLeader(Reconciliation reconciliation, List pods, NetClientOptions netClientOptions) { try { Future f = Future.succeededFuture(UNKNOWN_LEADER); for (int i = 0; i < pods.size(); i++) { @@ -216,13 +216,13 @@ private Future zookeeperLeader(List pods, NetClientOptions netClie String podName = pod.getMetadata().getName(); f = f.compose(leader -> { if (leader == UNKNOWN_LEADER) { - log.debug("Checker whether {} is leader", podName); - return isLeader(pod, netClientOptions).map(isLeader -> { + LOGGER.debugCr(reconciliation, "Checker whether {} is leader", podName); + return isLeader(reconciliation, pod, netClientOptions).map(isLeader -> { if (isLeader != null && isLeader) { - log.info("Pod {} is leader", podName); + LOGGER.infoCr(reconciliation, "Pod {} is leader", podName); return podNum; } else { - log.info("Pod {} is not a leader", podName); + LOGGER.infoCr(reconciliation, "Pod {} is not a leader", podName); return UNKNOWN_LEADER; } }); @@ -240,23 +240,23 @@ private Future zookeeperLeader(List pods, NetClientOptions netClie /** * Returns whether the given pod is the zookeeper leader. */ - protected Future isLeader(Pod pod, NetClientOptions netClientOptions) { + protected Future isLeader(Reconciliation reconciliation, Pod pod, NetClientOptions netClientOptions) { Promise promise = Promise.promise(); String host = host(pod); int port = port(pod); - log.debug("Connecting to zookeeper on {}:{}", host, port); + LOGGER.debugCr(reconciliation, "Connecting to zookeeper on {}:{}", host, port); vertx.createNetClient(netClientOptions) .connect(port, host, ar -> { if (ar.failed()) { - log.warn("ZK {}:{}: failed to connect to zookeeper:", host, port, ar.cause().getMessage()); + LOGGER.warnCr(reconciliation, "ZK {}:{}: failed to connect to zookeeper:", host, port, ar.cause().getMessage()); promise.fail(ar.cause()); } else { - log.debug("ZK {}:{}: connected", host, port); + LOGGER.debugCr(reconciliation, "ZK {}:{}: connected", host, port); NetSocket socket = ar.result(); socket.exceptionHandler(ex -> { if (!promise.tryFail(ex)) { - log.debug("ZK {}:{}: Ignoring error, since leader status of pod {} is already known: {}", + LOGGER.debugCr(reconciliation, "ZK {}:{}: Ignoring error, since leader status of pod {} is already known: {}", host, port, pod.getMetadata().getName(), ex); } }); @@ -264,7 +264,7 @@ protected Future isLeader(Pod pod, NetClientOptions netClientOptions) { // We could use socket idle timeout, but this times out even if the server just responds // very slowly long timerId = vertx.setTimer(10_000, tid -> { - log.debug("ZK {}:{}: Timeout waiting for Zookeeper {} to close socket", + LOGGER.debugCr(reconciliation, "ZK {}:{}: Timeout waiting for Zookeeper {} to close socket", host, port, socket.remoteAddress()); socket.close(); }); @@ -272,24 +272,24 @@ protected Future isLeader(Pod pod, NetClientOptions netClientOptions) { vertx.cancelTimer(timerId); Matcher matcher = LEADER_MODE_PATTERN.matcher(sb); boolean isLeader = matcher.find(); - log.debug("ZK {}:{}: {} leader", host, port, isLeader ? "is" : "is not"); + LOGGER.debugCr(reconciliation, "ZK {}:{}: {} leader", host, port, isLeader ? "is" : "is not"); if (!promise.tryComplete(isLeader)) { - log.debug("ZK {}:{}: Ignoring leader result: Future is already complete", + LOGGER.debugCr(reconciliation, "ZK {}:{}: Ignoring leader result: Future is already complete", host, port); } }); - log.debug("ZK {}:{}: upgrading to TLS", host, port); + LOGGER.debugCr(reconciliation, "ZK {}:{}: upgrading to TLS", host, port); socket.handler(buffer -> { - log.trace("buffer: {}", buffer); + LOGGER.traceCr(reconciliation, "buffer: {}", buffer); sb.append(buffer.toString()); }); - log.debug("ZK {}:{}: sending stat", host, port); + LOGGER.debugCr(reconciliation, "ZK {}:{}: sending stat", host, port); socket.write("stat"); } }); return promise.future().recover(error -> { - log.debug("ZK {}:{}: Error trying to determine whether leader ({}) => not leader", host, port, error); + LOGGER.debugOp("ZK {}:{}: Error trying to determine whether leader ({}) => not leader", host, port, error); return Future.succeededFuture(Boolean.FALSE); }); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java index 7b4dc3d93b..8e25a22eb3 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScaler.java @@ -8,12 +8,12 @@ import io.strimzi.operator.cluster.model.Ca; import io.strimzi.operator.cluster.model.ZookeeperCluster; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.admin.ZooKeeperAdmin; import org.apache.zookeeper.client.ZKClientConfig; @@ -31,7 +31,7 @@ * Class for scaling Zookeeper 3.5 using the ZookeeperAdmin client */ public class ZookeeperScaler implements AutoCloseable { - private static final Logger log = LogManager.getLogger(ZookeeperScaler.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZookeeperScaler.class); private final Vertx vertx; private final ZooKeeperAdminProvider zooAdminProvider; @@ -51,9 +51,12 @@ public class ZookeeperScaler implements AutoCloseable { private final String keyStorePassword; private File keyStoreFile; + private final Reconciliation reconciliation; + /** * ZookeeperScaler constructor * + * @param reconciliation The reconciliation * @param vertx Vertx instance * @param zookeeperConnectionString Connection string to connect to the right Zookeeper * @param zkNodeAddress Function for generating the Zookeeper node addresses @@ -63,8 +66,10 @@ public class ZookeeperScaler implements AutoCloseable { * * @return ZookeeperScaler instance */ - protected ZookeeperScaler(Vertx vertx, ZooKeeperAdminProvider zooAdminProvider, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) { - log.debug("Creating Zookeeper Scaler for cluster {}", zookeeperConnectionString); + protected ZookeeperScaler(Reconciliation reconciliation, Vertx vertx, ZooKeeperAdminProvider zooAdminProvider, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) { + this.reconciliation = reconciliation; + + LOGGER.debugCr(reconciliation, "Creating Zookeeper Scaler for cluster {}", zookeeperConnectionString); this.vertx = vertx; this.zooAdminProvider = zooAdminProvider; @@ -121,13 +126,13 @@ public Future scale(int scaleTo) { public void close() { if (trustStoreFile != null) { if (!trustStoreFile.delete()) { - log.debug("Failed to delete file {}", trustStoreFile); + LOGGER.debugCr(reconciliation, "Failed to delete file {}", trustStoreFile); } } if (keyStoreFile != null) { if (!keyStoreFile.delete()) { - log.debug("Failed to delete file {}", keyStoreFile); + LOGGER.debugCr(reconciliation, "Failed to delete file {}", keyStoreFile); } } } @@ -144,10 +149,10 @@ private Future connect(ZKClientConfig clientConfig) { ZooKeeperAdmin zkAdmin = zooAdminProvider.createZookeeperAdmin( this.zookeeperConnectionString, 10_000, - watchedEvent -> log.debug("Received event {} from ZooKeeperAdmin client connected to {}", watchedEvent, zookeeperConnectionString), + watchedEvent -> LOGGER.debugCr(reconciliation, "Received event {} from ZooKeeperAdmin client connected to {}", watchedEvent, zookeeperConnectionString), clientConfig); - Util.waitFor(vertx, + Util.waitFor(reconciliation, vertx, String.format("ZooKeeperAdmin connection to %s", zookeeperConnectionString), "connected", 1_000, @@ -156,13 +161,13 @@ private Future connect(ZKClientConfig clientConfig) { .onSuccess(nothing -> connected.complete(zkAdmin)) .onFailure(cause -> { String message = String.format("Failed to connect to Zookeeper %s. Connection was not ready in %d ms.", zookeeperConnectionString, operationTimeoutMs); - log.warn(message); + LOGGER.warnCr(reconciliation, message); closeConnection(zkAdmin) .onComplete(nothing -> connected.fail(new ZookeeperScalingException(message, cause))); }); } catch (IOException e) { - log.warn("Failed to connect to {} to scale Zookeeper", zookeeperConnectionString, e); + LOGGER.warnCr(reconciliation, "Failed to connect to {} to scale Zookeeper", zookeeperConnectionString, e); connected.fail(new ZookeeperScalingException("Failed to connect to Zookeeper " + zookeeperConnectionString, e)); } @@ -182,10 +187,10 @@ private Future scaleTo(ZooKeeperAdmin zkAdmin, Map current Map desiredServers = generateConfig(scaleTo, zkNodeAddress); if (isDifferent(currentServers, desiredServers)) { - log.debug("The Zookeeper server configuration needs to be updated"); + LOGGER.debugCr(reconciliation, "The Zookeeper server configuration needs to be updated"); return updateConfig(zkAdmin, desiredServers).map((Void) null); } else { - log.debug("The Zookeeper server configuration is already up to date"); + LOGGER.debugCr(reconciliation, "The Zookeeper server configuration is already up to date"); return Future.succeededFuture(); } } @@ -202,10 +207,10 @@ private Future> getCurrentConfig(ZooKeeperAdmin zkAdmin) try { byte[] config = zkAdmin.getConfig(false, null); Map servers = parseConfig(config); - log.debug("Current Zookeeper configuration is {}", servers); + LOGGER.debugCr(reconciliation, "Current Zookeeper configuration is {}", servers); promise.complete(servers); } catch (KeeperException | InterruptedException e) { - log.warn("Failed to get current Zookeeper server configuration", e); + LOGGER.warnCr(reconciliation, "Failed to get current Zookeeper server configuration", e); promise.fail(new ZookeeperScalingException("Failed to get current Zookeeper server configuration", e)); } }, false, configPromise); @@ -224,14 +229,14 @@ private Future> updateConfig(ZooKeeperAdmin zkAdmin, Map { try { - log.debug("Updating Zookeeper configuration to {}", newServers); + LOGGER.debugCr(reconciliation, "Updating Zookeeper configuration to {}", newServers); byte[] newConfig = zkAdmin.reconfigure(null, null, serversMapToList(newServers), -1, null); Map servers = parseConfig(newConfig); - log.debug("New Zookeeper configuration is {}", servers); + LOGGER.debugCr(reconciliation, "New Zookeeper configuration is {}", servers); promise.complete(servers); } catch (KeeperException | InterruptedException e) { - log.warn("Failed to update Zookeeper server configuration", e); + LOGGER.warnCr(reconciliation, "Failed to update Zookeeper server configuration", e); promise.fail(new ZookeeperScalingException("Failed to update Zookeeper server configuration", e)); } }, false, configPromise); @@ -251,7 +256,7 @@ private Future closeConnection(ZooKeeperAdmin zkAdmin) { zkAdmin.close((int) operationTimeoutMs); promise.complete(); } catch (Exception e) { - log.warn("Failed to close the ZooKeeperAdmin", e); + LOGGER.warnCr(reconciliation, "Failed to close the ZooKeeperAdmin", e); promise.fail(e); } }, false, closePromise); @@ -289,7 +294,7 @@ private Future getClientConfig() { promise.complete(clientConfig); } catch (Exception e) { - log.warn("Failed to create Zookeeper client configuration", e); + LOGGER.warnCr(reconciliation, "Failed to create Zookeeper client configuration", e); promise.fail(new ZookeeperScalingException("Failed to create Zookeeper client configuration", e)); } }, false, configPromise); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java index 7094b8ec0f..0857446c60 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerProvider.java @@ -5,6 +5,7 @@ package io.strimzi.operator.cluster.operator.resource; import io.fabric8.kubernetes.api.model.Secret; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import java.util.function.Function; @@ -16,6 +17,7 @@ public interface ZookeeperScalerProvider { /** * Creates an instance of ZookeeperScaler * + * @param reconciliation The reconciliation * @param vertx Vertx instance * @param zookeeperConnectionString Connection string to connect to the right Zookeeper * @param zkNodeAddress Function for generating the Zookeeper node addresses @@ -25,5 +27,5 @@ public interface ZookeeperScalerProvider { * * @return ZookeeperScaler instance */ - ZookeeperScaler createZookeeperScaler(Vertx vertx, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs); + ZookeeperScaler createZookeeperScaler(Reconciliation reconciliation, Vertx vertx, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs); } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperator.java index df543d960f..3b9326af16 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperator.java @@ -9,12 +9,12 @@ import io.fabric8.kubernetes.api.model.apps.StatefulSet; import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.api.kafka.model.KafkaResources; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.model.Labels; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.List; @@ -26,7 +26,7 @@ */ public class ZookeeperSetOperator extends StatefulSetOperator { - private static final Logger log = LogManager.getLogger(ZookeeperSetOperator.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ZookeeperSetOperator.class); private final ZookeeperLeaderFinder leaderFinder; /** @@ -43,36 +43,36 @@ public ZookeeperSetOperator(Vertx vertx, KubernetesClient client, ZookeeperLeade } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { - return !diff.isEmpty() && needsRollingUpdate(diff); + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { + return !diff.isEmpty() && needsRollingUpdate(reconciliation, diff); } - public static boolean needsRollingUpdate(StatefulSetDiff diff) { + public static boolean needsRollingUpdate(Reconciliation reconciliation, StatefulSetDiff diff) { if (diff.changesLabels()) { - log.debug("Changed labels => needs rolling update"); + LOGGER.debugCr(reconciliation, "Changed labels => needs rolling update"); return true; } if (diff.changesSpecTemplate()) { - log.debug("Changed template spec => needs rolling update"); + LOGGER.debugCr(reconciliation, "Changed template spec => needs rolling update"); return true; } if (diff.changesVolumeClaimTemplates()) { - log.debug("Changed volume claim template => needs rolling update"); + LOGGER.debugCr(reconciliation, "Changed volume claim template => needs rolling update"); return true; } if (diff.changesVolumeSize()) { - log.debug("Changed size of the volume claim template => no need for rolling update"); + LOGGER.debugCr(reconciliation, "Changed size of the volume claim template => no need for rolling update"); return false; } return false; } @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podRestart, Secret clusterCaSecret, Secret coKeySecret) { String namespace = sts.getMetadata().getNamespace(); String name = sts.getMetadata().getName(); final int replicas = sts.getSpec().getReplicas(); - log.debug("Considering rolling update of {}/{}", namespace, name); + LOGGER.debugCr(reconciliation, "Considering rolling update of {}/{}", namespace, name); boolean zkRoll = false; ArrayList pods = new ArrayList<>(replicas); @@ -89,20 +89,20 @@ public Future maybeRollingUpdate(StatefulSet sts, Function promise = Promise.promise(); rollFuture = promise.future(); - Future leaderFuture = leaderFinder.findZookeeperLeader(cluster, namespace, pods, coKeySecret); + Future leaderFuture = leaderFinder.findZookeeperLeader(reconciliation, cluster, namespace, pods, coKeySecret); leaderFuture.compose(leader -> { - log.debug("Zookeeper leader is " + (leader == ZookeeperLeaderFinder.UNKNOWN_LEADER ? "unknown" : "pod " + leader)); + LOGGER.debugCr(reconciliation, "Zookeeper leader is " + (leader == ZookeeperLeaderFinder.UNKNOWN_LEADER ? "unknown" : "pod " + leader)); Future fut = Future.succeededFuture(); // Then roll each non-leader pod for (int i = 0; i < replicas; i++) { String podName = KafkaResources.zookeeperPodName(cluster, i); if (i != leader) { - log.debug("Possibly restarting non-leader pod {}", podName); + LOGGER.debugCr(reconciliation, "Possibly restarting non-leader pod {}", podName); // roll the pod and wait until it is ready // this prevents rolling into faulty state (note: this applies just for ZK pods) - fut = fut.compose(ignore -> maybeRestartPod(sts, podName, podRestart)); + fut = fut.compose(ignore -> maybeRestartPod(reconciliation, sts, podName, podRestart)); } else { - log.debug("Deferring restart of leader {}", podName); + LOGGER.debugCr(reconciliation, "Deferring restart of leader {}", podName); } } if (leader == ZookeeperLeaderFinder.UNKNOWN_LEADER) { @@ -111,8 +111,8 @@ public Future maybeRollingUpdate(StatefulSet sts, Function { // the leader is rolled as the last - log.debug("Possibly restarting leader pod (previously deferred) {}", leader); - return maybeRestartPod(sts, KafkaResources.zookeeperPodName(cluster, leader), podRestart); + LOGGER.debugCr(reconciliation, "Possibly restarting leader pod (previously deferred) {}", leader); + return maybeRestartPod(reconciliation, sts, KafkaResources.zookeeperPodName(cluster, leader), podRestart); }); } }).onComplete(promise); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorTest.java index eb7db95e00..9ce7186f10 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ClusterOperatorTest.java @@ -25,8 +25,8 @@ import io.vertx.micrometer.MicrometerMetricsOptions; import io.vertx.micrometer.VertxPrometheusOptions; import okhttp3.OkHttpClient; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -54,7 +54,7 @@ @ExtendWith(VertxExtension.class) public class ClusterOperatorTest { private static Vertx vertx; - private static final Logger log = LogManager.getLogger(ClusterOperatorTest.class); + private static final Logger LOGGER = LogManager.getLogger(ClusterOperatorTest.class); private static Map buildEnv(String namespaces) { Map env = new HashMap<>(); @@ -114,6 +114,7 @@ public void testStartStopAllNamespacesOnK8s(VertxTestContext context) throws Int /** * Asserts that Cluster Operator starts and then stops a verticle in each namespace + * * @param context test context passed in for assertions * @param namespaces namespaces the operator should be watching and operating on */ @@ -176,7 +177,7 @@ private void startStop(VertxTestContext context, String namespaces, boolean open for (String deploymentId: vertx.deploymentIDs()) { vertx.undeploy(deploymentId, asyncResult -> { if (asyncResult.failed()) { - log.error("Failed to undeploy {}", deploymentId); + LOGGER.error("Failed to undeploy {}", deploymentId); context.failNow(asyncResult.cause()); } latch.countDown(); @@ -194,6 +195,7 @@ private void startStop(VertxTestContext context, String namespaces, boolean open /** * Asserts that Cluster Operator starts and then stops a verticle in every namespace using the namespace wildcard (*) + * * @param context test context passed in for assertions * @param namespaces namespaces the operator should be watching and operating on */ @@ -251,7 +253,7 @@ private void startStopAllNamespaces(VertxTestContext context, String namespaces, for (String deploymentId: vertx.deploymentIDs()) { vertx.undeploy(deploymentId, asyncResult -> { if (asyncResult.failed()) { - log.error("Failed to undeploy {}", deploymentId); + LOGGER.error("Failed to undeploy {}", deploymentId); context.failNow(asyncResult.cause()); } latch.countDown(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java index ebbc1a90d5..476e5c0009 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java @@ -69,6 +69,7 @@ import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.MetricsProvider; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.MockCertManager; import io.strimzi.operator.common.operator.resource.BuildConfigOperator; @@ -223,17 +224,17 @@ public static List createKafkaInitialSecrets(String namespace, String na return secrets; } - public static ClusterCa createInitialClusterCa(String clusterName, Secret initialClusterCaCert, Secret initialClusterCaKey) { - return new ClusterCa(new MockCertManager(), new PasswordGenerator(10, "a", "a"), clusterName, initialClusterCaCert, initialClusterCaKey); + public static ClusterCa createInitialClusterCa(Reconciliation reconciliation, String clusterName, Secret initialClusterCaCert, Secret initialClusterCaKey) { + return new ClusterCa(reconciliation, new MockCertManager(), new PasswordGenerator(10, "a", "a"), clusterName, initialClusterCaCert, initialClusterCaKey); } - public static ClientsCa createInitialClientsCa(String clusterName, Secret initialClientsCaCert, Secret initialClientsCaKey) { - return new ClientsCa(new MockCertManager(), new PasswordGenerator(10, "a", "a"), + public static ClientsCa createInitialClientsCa(Reconciliation reconciliation, String clusterName, Secret initialClientsCaCert, Secret initialClientsCaKey) { + return new ClientsCa(reconciliation, new MockCertManager(), + new PasswordGenerator(10, "a", "a"), KafkaCluster.clientsCaCertSecretName(clusterName), initialClientsCaCert, KafkaCluster.clientsCaKeySecretName(clusterName), - initialClientsCaKey, - 365, 30, true, null); + initialClientsCaKey, 365, 30, true, null); } public static Secret createInitialCaCertSecret(String clusterNamespace, String clusterName, String secretName, @@ -621,12 +622,12 @@ public static ZookeeperLeaderFinder zookeeperLeaderFinder(Vertx vertx, Kubernete return new ZookeeperLeaderFinder(vertx, new SecretOperator(vertx, client), () -> new BackOff(5_000, 2, 4)) { @Override - protected Future isLeader(Pod pod, NetClientOptions options) { + protected Future isLeader(Reconciliation reconciliation, Pod pod, NetClientOptions options) { return Future.succeededFuture(true); } @Override - protected PemTrustOptions trustOptions(Secret s) { + protected PemTrustOptions trustOptions(Reconciliation reconciliation, Secret s) { return new PemTrustOptions(); } @@ -692,7 +693,7 @@ public Admin createAdminClient(String bootstrapHostnames, Secret clusterCaCertSe public static ZookeeperScalerProvider zookeeperScalerProvider() { return new ZookeeperScalerProvider() { @Override - public ZookeeperScaler createZookeeperScaler(Vertx vertx, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) { + public ZookeeperScaler createZookeeperScaler(Reconciliation reconciliation, Vertx vertx, String zookeeperConnectionString, Function zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) { ZookeeperScaler mockZooScaler = mock(ZookeeperScaler.class); when(mockZooScaler.scale(anyInt())).thenReturn(Future.succeededFuture()); return mockZooScaler; @@ -769,14 +770,14 @@ public static ResourceOperatorSupplier supplierWithMocks(boolean openShift) { metricsProvider(), adminClientProvider()); - when(supplier.serviceAccountOperations.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(supplier.roleBindingOperations.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(supplier.roleOperations.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(supplier.clusterRoleBindingOperator.reconcile(anyString(), any())).thenReturn(Future.succeededFuture()); + when(supplier.serviceAccountOperations.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(supplier.roleBindingOperations.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(supplier.roleOperations.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(supplier.clusterRoleBindingOperator.reconcile(any(), anyString(), any())).thenReturn(Future.succeededFuture()); if (openShift) { - when(supplier.routeOperations.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(supplier.routeOperations.hasAddress(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(supplier.routeOperations.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(supplier.routeOperations.hasAddress(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(supplier.routeOperations.get(anyString(), anyString())).thenAnswer(i -> { return new RouteBuilder() .withNewStatus() @@ -788,8 +789,8 @@ public static ResourceOperatorSupplier supplierWithMocks(boolean openShift) { }); } - when(supplier.serviceOperations.hasIngressAddress(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(supplier.serviceOperations.hasNodePort(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(supplier.serviceOperations.hasIngressAddress(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(supplier.serviceOperations.hasNodePort(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(supplier.serviceOperations.get(anyString(), anyString())).thenAnswer(i -> new ServiceBuilder() .withNewStatus() diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java index 5f6f9050fb..5166c5e5ed 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractConfigurationTest.java @@ -9,6 +9,7 @@ import java.util.Map; import io.strimzi.operator.common.InvalidConfigParameterException; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -232,7 +233,7 @@ public void testKafkaZookeeperTimeout() { conf.put("zookeeper.connection.timeout.ms", "42"); // valid conf.put("zookeeper.connection.timeout", "42"); // invalid - KafkaConfiguration kc = new KafkaConfiguration(conf.entrySet()); + KafkaConfiguration kc = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, conf.entrySet()); assertThat(kc.asOrderedProperties().asMap().get("valid"), is("validValue")); assertThat(kc.asOrderedProperties().asMap().get("zookeeper.connection.whatever"), is(nullValue())); @@ -246,7 +247,7 @@ public void testKafkaCipherSuiteOverride() { Map conf = new HashMap<>(); conf.put("ssl.cipher.suites", "cipher1,cipher2,cipher3"); // valid - KafkaConfiguration kc = new KafkaConfiguration(conf.entrySet()); + KafkaConfiguration kc = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, conf.entrySet()); assertThat(kc.asOrderedProperties().asMap().get("ssl.cipher.suites"), is("cipher1,cipher2,cipher3")); } @@ -258,7 +259,7 @@ public void testKafkaConnectHostnameVerification() { conf.put("ssl.endpoint.identification.algorithm", ""); // valid conf.put("ssl.keystore.location", "/tmp/my.keystore"); // invalid - KafkaConnectConfiguration configuration = new KafkaConnectConfiguration(conf.entrySet()); + KafkaConnectConfiguration configuration = new KafkaConnectConfiguration(Reconciliation.DUMMY_RECONCILIATION, conf.entrySet()); assertThat(configuration.asOrderedProperties().asMap().get("key.converter"), is("my.package.Converter")); assertThat(configuration.asOrderedProperties().asMap().get("ssl.keystore.location"), is(nullValue())); @@ -272,7 +273,7 @@ public void testKafkaMirrorMakerConsumerHostnameVerification() { conf.put("ssl.endpoint.identification.algorithm", ""); // valid conf.put("ssl.keystore.location", "/tmp/my.keystore"); // invalid - KafkaMirrorMakerConsumerConfiguration configuration = new KafkaMirrorMakerConsumerConfiguration(conf.entrySet()); + KafkaMirrorMakerConsumerConfiguration configuration = new KafkaMirrorMakerConsumerConfiguration(Reconciliation.DUMMY_RECONCILIATION, conf.entrySet()); assertThat(configuration.asOrderedProperties().asMap().get("compression.type"), is("zstd")); assertThat(configuration.asOrderedProperties().asMap().get("ssl.keystore.location"), is(nullValue())); @@ -286,7 +287,7 @@ public void testKafkaMirrorMakerProducerHostnameVerification() { conf.put("ssl.endpoint.identification.algorithm", ""); // valid conf.put("ssl.keystore.location", "/tmp/my.keystore"); // invalid - KafkaMirrorMakerProducerConfiguration configuration = new KafkaMirrorMakerProducerConfiguration(conf.entrySet()); + KafkaMirrorMakerProducerConfiguration configuration = new KafkaMirrorMakerProducerConfiguration(Reconciliation.DUMMY_RECONCILIATION, conf.entrySet()); assertThat(configuration.asOrderedProperties().asMap().get("compression.type"), is("zstd")); assertThat(configuration.asOrderedProperties().asMap().get("ssl.keystore.location"), is(nullValue())); @@ -322,7 +323,7 @@ class TestConfiguration extends AbstractConfiguration { * pairs. */ public TestConfiguration(String configuration) { - super(configuration, FORBIDDEN_PREFIXES, DEFAULTS); + super(Reconciliation.DUMMY_RECONCILIATION, configuration, FORBIDDEN_PREFIXES, DEFAULTS); } /** @@ -332,7 +333,7 @@ public TestConfiguration(String configuration) { * @param jsonOptions Json object with configuration options as key ad value pairs. */ public TestConfiguration(JsonObject jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES, DEFAULTS); + super(Reconciliation.DUMMY_RECONCILIATION, jsonOptions, FORBIDDEN_PREFIXES, DEFAULTS); } } @@ -352,7 +353,7 @@ class TestConfigurationWithoutDefaults extends AbstractConfiguration { * pairs. */ public TestConfigurationWithoutDefaults(String configuration) { - super(configuration, FORBIDDEN_PREFIXES); + super(Reconciliation.DUMMY_RECONCILIATION, configuration, FORBIDDEN_PREFIXES); } /** @@ -362,6 +363,6 @@ public TestConfigurationWithoutDefaults(String configuration) { * @param jsonOptions Json object with configuration options as key ad value pairs. */ public TestConfigurationWithoutDefaults(JsonObject jsonOptions) { - super(jsonOptions, FORBIDDEN_PREFIXES); + super(Reconciliation.DUMMY_RECONCILIATION, jsonOptions, FORBIDDEN_PREFIXES); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractModelTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractModelTest.java index d11bad96ba..bdb2bad069 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractModelTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/AbstractModelTest.java @@ -13,6 +13,7 @@ import io.strimzi.api.kafka.model.JvmOptions; import io.strimzi.api.kafka.model.Kafka; import io.strimzi.api.kafka.model.KafkaBuilder; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; @@ -35,7 +36,7 @@ public class AbstractModelTest { // Implement AbstractModel to test the abstract class private class Model extends AbstractModel { public Model(HasMetadata resource) { - super(resource, "model-app"); + super(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, "model-app"); } @Override diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CaRenewalTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CaRenewalTest.java index 58aefde006..f92c4bce3b 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CaRenewalTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CaRenewalTest.java @@ -8,6 +8,7 @@ import io.fabric8.kubernetes.api.model.SecretBuilder; import io.strimzi.certs.CertAndKey; import io.strimzi.certs.Subject; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; import io.vertx.junit5.VertxExtension; @@ -29,7 +30,7 @@ public class CaRenewalTest { @ParallelTest public void renewalOfStatefulSetCertificatesWithNullSecret() throws IOException { - Ca mockedCa = new Ca(null, null, null, null, null, null, null, 2, 1, true, null) { + Ca mockedCa = new Ca(Reconciliation.DUMMY_RECONCILIATION, null, null, null, null, null, null, null, 2, 1, true, null) { private AtomicInteger invocationCount = new AtomicInteger(0); @Override @@ -62,7 +63,7 @@ protected CertAndKey generateSignedCert(Subject subject, Function podNameFn = i -> "pod" + i; boolean isMaintenanceTimeWindowsSatisfied = true; - Map newCerts = mockedCa.maybeCopyOrGenerateCerts(replicas, + Map newCerts = mockedCa.maybeCopyOrGenerateCerts(Reconciliation.DUMMY_RECONCILIATION, replicas, subjectFn, null, podNameFn, @@ -86,7 +87,7 @@ protected CertAndKey generateSignedCert(Subject subject, @ParallelTest public void renewalOfStatefulSetCertificatesWithCaRenewal() throws IOException { - Ca mockedCa = new Ca(null, null, null, null, null, null, null, 2, 1, true, null) { + Ca mockedCa = new Ca(Reconciliation.DUMMY_RECONCILIATION, null, null, null, null, null, null, null, 2, 1, true, null) { private AtomicInteger invocationCount = new AtomicInteger(0); @Override @@ -137,7 +138,8 @@ protected CertAndKey generateSignedCert(Subject subject, Function podNameFn = i -> "pod" + i; boolean isMaintenanceTimeWindowsSatisfied = true; - Map newCerts = mockedCa.maybeCopyOrGenerateCerts(replicas, + Map newCerts = mockedCa.maybeCopyOrGenerateCerts(Reconciliation.DUMMY_RECONCILIATION, + replicas, subjectFn, initialSecret, podNameFn, @@ -161,7 +163,7 @@ protected CertAndKey generateSignedCert(Subject subject, @ParallelTest public void renewalOfStatefulSetCertificatesDelayedRenewalInWindow() throws IOException { - Ca mockedCa = new Ca(null, null, null, null, null, null, null, 2, 1, true, null) { + Ca mockedCa = new Ca(Reconciliation.DUMMY_RECONCILIATION, null, null, null, null, null, null, null, 2, 1, true, null) { private AtomicInteger invocationCount = new AtomicInteger(0); @Override @@ -222,7 +224,8 @@ protected CertAndKey generateSignedCert(Subject subject, Function podNameFn = i -> "pod" + i; boolean isMaintenanceTimeWindowsSatisfied = true; - Map newCerts = mockedCa.maybeCopyOrGenerateCerts(replicas, + Map newCerts = mockedCa.maybeCopyOrGenerateCerts(Reconciliation.DUMMY_RECONCILIATION, + replicas, subjectFn, initialSecret, podNameFn, @@ -246,7 +249,7 @@ protected CertAndKey generateSignedCert(Subject subject, @ParallelTest public void renewalOfStatefulSetCertificatesDelayedRenewalOutsideWindow() throws IOException { - Ca mockedCa = new Ca(null, null, null, null, null, null, null, 2, 1, true, null) { + Ca mockedCa = new Ca(Reconciliation.DUMMY_RECONCILIATION, null, null, null, null, null, null, null, 2, 1, true, null) { private AtomicInteger invocationCount = new AtomicInteger(0); @Override @@ -307,7 +310,8 @@ protected CertAndKey generateSignedCert(Subject subject, Function podNameFn = i -> "pod" + i; boolean isMaintenanceTimeWindowsSatisfied = false; - Map newCerts = mockedCa.maybeCopyOrGenerateCerts(replicas, + Map newCerts = mockedCa.maybeCopyOrGenerateCerts(Reconciliation.DUMMY_RECONCILIATION, + replicas, subjectFn, initialSecret, podNameFn, diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java index d404175840..45a82f43fe 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/CruiseControlTest.java @@ -52,6 +52,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.cluster.model.cruisecontrol.Capacity; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -111,7 +112,7 @@ public class CruiseControlTest { private final Map kafkaConfig = singletonMap(CruiseControl.MIN_INSYNC_REPLICAS, minInsyncReplicas); private final Map zooConfig = singletonMap("foo", "bar"); - CruiseControlConfiguration configuration = new CruiseControlConfiguration(new HashMap() {{ + CruiseControlConfiguration configuration = new CruiseControlConfiguration(Reconciliation.DUMMY_RECONCILIATION, new HashMap() {{ putAll(CruiseControlConfiguration.getCruiseControlDefaultPropertiesMap()); put("num.partition.metrics.windows", "2"); }}.entrySet() @@ -146,7 +147,7 @@ public class CruiseControlTest { .endSpec() .build(); - private final CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + private final CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); private Map expectedLabels(String name) { return TestUtils.map(Labels.STRIMZI_CLUSTER_LABEL, this.cluster, @@ -184,7 +185,7 @@ private List getExpectedEnvVars() { } public String getCapacityConfigurationFromEnvVar(Kafka resource, String envVar) { - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); List containers = dep.getSpec().getTemplate().getSpec().getContainers(); @@ -391,7 +392,7 @@ public void testContainerTemplateEnvVars() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, cruiseControlSpec); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); List envVarList = cc.getEnvVars(); @@ -425,7 +426,7 @@ public void testContainerTemplateEnvVarsWithKeyConflict() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, cruiseControlSpec); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); List envVarList = cc.getEnvVars(); @@ -438,7 +439,7 @@ public void testCruiseControlNotDeployed() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, null); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); try { assertThat(cc.generateDeployment(true, null, null, null), is(nullValue())); @@ -471,7 +472,7 @@ public void testGenerateServiceWhenDisabled() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, null); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); assertThrows(NullPointerException.class, () -> cc.generateService()); } @@ -561,7 +562,7 @@ public void testTemplate() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = cc.generateDeployment(true, depAnots, null, null); @@ -615,7 +616,7 @@ public void testPodDisruptionBudget() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); List containers = dep.getSpec().getTemplate().getSpec().getContainers(); Container ccContainer = containers.stream().filter(container -> ccImage.equals(container.getImage())).findFirst().get(); @@ -649,7 +650,7 @@ public void testResources() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); List containers = dep.getSpec().getTemplate().getSpec().getContainers(); Container ccContainer = containers.stream().filter(container -> ccImage.equals(container.getImage())).findFirst().get(); @@ -682,7 +683,7 @@ public void testProbeConfiguration() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); List containers = dep.getSpec().getTemplate().getSpec().getContainers(); @@ -717,7 +718,7 @@ public void testSecurityContext() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -764,7 +765,7 @@ public void testCruiseControlContainerSecurityContext() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); @@ -807,7 +808,7 @@ public void testTlsSidecarContainerSecurityContext() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(resource, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = cc.generateDeployment(true, null, null, null); @@ -901,7 +902,7 @@ public void testGoalsCheck() { .endSpec() .build(); - CruiseControl cruiseControlWithCustomGoals = CruiseControl.fromCrd(resourceWithCustomGoals, VERSIONS); + CruiseControl cruiseControlWithCustomGoals = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resourceWithCustomGoals, VERSIONS); String anomalyDetectionGoals = cruiseControlWithCustomGoals .getConfiguration().asOrderedProperties().asMap() @@ -927,7 +928,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(kafkaAssembly, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(cc.isMetricsEnabled(), is(true)); assertThat(cc.getMetricsConfigInCm(), is(metrics)); @@ -943,7 +944,7 @@ public void testMetricsParsingNoMetrics() { .endSpec() .build(); - CruiseControl cc = CruiseControl.fromCrd(kafkaAssembly, VERSIONS); + CruiseControl cc = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(cc.isMetricsEnabled(), is(false)); assertThat(cc.getMetricsConfigInCm(), is(nullValue())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityOperatorTest.java index 93073be7d8..540b41a663 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityOperatorTest.java @@ -38,6 +38,7 @@ import io.strimzi.api.kafka.model.template.ContainerTemplate; import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -105,7 +106,7 @@ static Map volumeMounts(List mounts) { .endSpec() .build(); - private final EntityOperator entityOperator = EntityOperator.fromCrd(resource, VERSIONS); + private final EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); @ParallelTest public void testGenerateDeployment() { @@ -155,7 +156,7 @@ public void testFromCrdNoTopicAndUserOperatorInEntityOperator() { .withEntityOperator(entityOperatorSpec) .endSpec() .build(); - EntityOperator entityOperator = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); assertThat(entityOperator.getTopicOperator(), is(nullValue())); assertThat(entityOperator.getUserOperator(), is(nullValue())); @@ -163,7 +164,7 @@ public void testFromCrdNoTopicAndUserOperatorInEntityOperator() { @ParallelTest public void withAffinityAndTolerations() throws IOException { - ResourceTester helper = new ResourceTester<>(Kafka.class, VERSIONS, EntityOperator::fromCrd, this.getClass().getSimpleName() + ".withAffinityAndTolerations"); + ResourceTester helper = new ResourceTester<>(Kafka.class, VERSIONS, (kAssembly, versions) -> EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), kAssembly, versions), this.getClass().getSimpleName() + ".withAffinityAndTolerations"); helper.assertDesiredResource("-DeploymentAffinity.yaml", zc -> zc.generateDeployment(true, Collections.EMPTY_MAP, null, null).getSpec().getTemplate().getSpec().getAffinity()); helper.assertDesiredResource("-DeploymentTolerations.yaml", zc -> zc.generateDeployment(true, Collections.EMPTY_MAP, null, null).getSpec().getTemplate().getSpec().getTolerations()); } @@ -241,7 +242,7 @@ public void testTemplate() { .endEntityOperator() .endSpec() .build(); - EntityOperator entityOperator = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator entityOperator = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); // Check Deployment Deployment dep = entityOperator.generateDeployment(true, Collections.EMPTY_MAP, null, null); @@ -278,7 +279,7 @@ public void testGracePeriod() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); @@ -296,7 +297,7 @@ public void testDefaultGracePeriod() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -322,7 +323,7 @@ public void testImagePullSecrets() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -347,7 +348,7 @@ public void testImagePullSecretsFromCo() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, secrets); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -373,7 +374,7 @@ public void testImagePullSecretsFromBoth() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, singletonList(secret1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); @@ -391,7 +392,7 @@ public void testDefaultImagePullSecrets() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); @@ -412,7 +413,7 @@ public void testSecurityContext() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -431,7 +432,7 @@ public void testDefaultSecurityContext() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); @@ -457,7 +458,7 @@ public void testStunnelImage() { .endKafka() .endSpec() .build(); - assertThat(EntityOperator.fromCrd(kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is("foo1")); + assertThat(EntityOperator.fromCrd(new Reconciliation("test", kafka.getKind(), kafka.getMetadata().getNamespace(), kafka.getMetadata().getName()), kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is("foo1")); kafka = new KafkaBuilder(resource) .editSpec() @@ -471,7 +472,7 @@ public void testStunnelImage() { .endKafka() .endSpec() .build(); - assertThat(EntityOperator.fromCrd(kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is("foo2")); + assertThat(EntityOperator.fromCrd(new Reconciliation("test", kafka.getKind(), kafka.getMetadata().getNamespace(), kafka.getMetadata().getName()), kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is("foo2")); kafka = new KafkaBuilder(resource) .editSpec() @@ -486,7 +487,7 @@ public void testStunnelImage() { .endKafka() .endSpec() .build(); - assertThat(EntityOperator.fromCrd(kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is(KafkaVersionTestUtils.DEFAULT_KAFKA_IMAGE)); + assertThat(EntityOperator.fromCrd(new Reconciliation("test", kafka.getKind(), kafka.getMetadata().getNamespace(), kafka.getMetadata().getName()), kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is(KafkaVersionTestUtils.DEFAULT_KAFKA_IMAGE)); kafka = new KafkaBuilder(resource) .editSpec() @@ -501,7 +502,7 @@ public void testStunnelImage() { .endKafka() .endSpec() .build(); - assertThat(EntityOperator.fromCrd(kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is(KafkaVersionTestUtils.DEFAULT_KAFKA_IMAGE)); + assertThat(EntityOperator.fromCrd(new Reconciliation("test", kafka.getKind(), kafka.getMetadata().getNamespace(), kafka.getMetadata().getName()), kafka, VERSIONS).getContainers(ImagePullPolicy.ALWAYS).get(2).getImage(), is(KafkaVersionTestUtils.DEFAULT_KAFKA_IMAGE)); } @ParallelTest @@ -514,7 +515,7 @@ public void testImagePullPolicy() { .endEntityOperator() .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment dep = eo.generateDeployment(true, Collections.EMPTY_MAP, ImagePullPolicy.ALWAYS, null); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); @@ -567,7 +568,7 @@ public void testTopicOperatorContainerEnvVars() { .endSpec() .build(); - List containerEnvVars = EntityOperator.fromCrd(resource, VERSIONS).getTopicOperator().getEnvVars(); + List containerEnvVars = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS).getTopicOperator().getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, containerEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -612,7 +613,7 @@ public void testTopicOperatorContainerEnvVarsConflict() { .endSpec() .build(); - List containerEnvVars = EntityOperator.fromCrd(resource, VERSIONS).getTopicOperator().getEnvVars(); + List containerEnvVars = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS).getTopicOperator().getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, containerEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -658,7 +659,7 @@ public void testUserOperatorContainerEnvVars() { .endSpec() .build(); - List containerEnvVars = EntityOperator.fromCrd(resource, VERSIONS).getUserOperator().getEnvVars(); + List containerEnvVars = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS).getUserOperator().getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, containerEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -702,7 +703,7 @@ public void testUserOperatorContainerEnvVarsConflict() { .endSpec() .build(); - List containerEnvVars = EntityOperator.fromCrd(resource, VERSIONS).getUserOperator().getEnvVars(); + List containerEnvVars = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS).getUserOperator().getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, containerEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -747,7 +748,7 @@ public void testTlsSideCarContainerEnvVars() { .build(); - List containerEnvVars = EntityOperator.fromCrd(resource, VERSIONS).getTlsSidecarEnvVars(); + List containerEnvVars = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS).getTlsSidecarEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, containerEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -786,7 +787,7 @@ public void testTlsSidecarContainerEnvVarsConflict() { .build(); - List containerEnvVars = EntityOperator.fromCrd(resource, VERSIONS).getTlsSidecarEnvVars(); + List containerEnvVars = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS).getTlsSidecarEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, containerEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -820,7 +821,7 @@ public void testUserOperatorContainerSecurityContext() { .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment deployment = eo.generateDeployment(false, null, null, null); assertThat(deployment.getSpec().getTemplate().getSpec().getContainers(), @@ -857,7 +858,7 @@ public void testTopicOperatorContainerSecurityContext() { .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment deployment = eo.generateDeployment(false, null, null, null); assertThat(deployment.getSpec().getTemplate().getSpec().getContainers(), @@ -894,7 +895,7 @@ public void testTlsSidecarContainerSecurityContext() { .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Deployment deployment = eo.generateDeployment(false, null, null, null); assertThat(deployment.getSpec().getTemplate().getSpec().getContainers(), @@ -913,7 +914,7 @@ public void testRole() { .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Role role = eo.generateRole(namespace, namespace); assertThat(role.getMetadata().getName(), is("foo-entity-operator")); @@ -947,7 +948,7 @@ public void testRoleInDifferentNamespace() { .endSpec() .build(); - EntityOperator eo = EntityOperator.fromCrd(resource, VERSIONS); + EntityOperator eo = EntityOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); Role role = eo.generateRole(namespace, namespace); assertThat(role.getMetadata().getOwnerReferences().get(0), is(entityOperator.createOwnerReference())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityTopicOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityTopicOperatorTest.java index 65cfe8180f..b94a4e2935 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityTopicOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityTopicOperatorTest.java @@ -19,6 +19,7 @@ import io.strimzi.api.kafka.model.SystemProperty; import io.strimzi.api.kafka.model.SystemPropertyBuilder; import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -101,7 +102,7 @@ public class EntityTopicOperatorTest { .endSpec() .build(); - private final EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(resource); + private final EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); private List getExpectedEnvVars() { List expected = new ArrayList<>(); @@ -163,7 +164,7 @@ public void testFromCrdDefault() { .withEntityOperator(entityOperatorSpec) .endSpec() .build(); - EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(resource); + EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); assertThat(entityTopicOperator.getWatchedNamespace(), is(namespace)); assertThat(entityTopicOperator.getImage(), is("quay.io/strimzi/operator:latest")); @@ -184,7 +185,7 @@ public void testFromCrdDefault() { public void testFromCrdNoEntityOperator() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout); - EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(resource); + EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); assertThat(entityTopicOperator, is(nullValue())); } @@ -197,7 +198,7 @@ public void testFromCrdNoTopicOperatorInEntityOperator() { .withEntityOperator(entityOperatorSpec) .endSpec() .build(); - EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(resource); + EntityTopicOperator entityTopicOperator = EntityTopicOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); assertThat(entityTopicOperator, is(nullValue())); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityUserOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityUserOperatorTest.java index bc177e11e3..03e61e2b47 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityUserOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/EntityUserOperatorTest.java @@ -22,6 +22,7 @@ import io.strimzi.api.kafka.model.SystemProperty; import io.strimzi.api.kafka.model.SystemPropertyBuilder; import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -108,7 +109,7 @@ public class EntityUserOperatorTest { .endSpec() .build(); - private final EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(resource); + private final EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); private List getExpectedEnvVars() { List expected = new ArrayList<>(); @@ -184,7 +185,7 @@ public void testFromCrdDefault() { .withEntityOperator(entityOperatorSpec) .endSpec() .build(); - EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(resource); + EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); assertThat(entityUserOperator.getWatchedNamespace(), is(namespace)); assertThat(entityUserOperator.getImage(), is("quay.io/strimzi/operator:latest")); @@ -204,7 +205,7 @@ public void testFromCrdDefault() { public void testFromCrdNoEntityOperator() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout); - EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(resource); + EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); assertThat(entityUserOperator, is(nullValue())); } @@ -217,7 +218,7 @@ public void testFromCrdNoUserOperatorInEntityOperator() { .withEntityOperator(entityOperatorSpec) .endSpec() .build(); - EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(resource); + EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource); assertThat(entityUserOperator, is(nullValue())); } @@ -262,7 +263,7 @@ public void testFromCrdCaValidityAndRenewal() { .withClientsCa(ca) .endSpec() .build(); - EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(customValues); + EntityUserOperator entityUserOperator = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), customValues); Kafka defaultValues = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout)) @@ -270,7 +271,7 @@ public void testFromCrdCaValidityAndRenewal() { .withEntityOperator(entityOperatorSpec) .endSpec() .build(); - EntityUserOperator entityUserOperator2 = EntityUserOperator.fromCrd(defaultValues); + EntityUserOperator entityUserOperator2 = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), defaultValues); assertThat(entityUserOperator.getClientsCaValidityDays(), is(42L)); assertThat(entityUserOperator.getClientsCaRenewalDays(), is(69L)); @@ -297,7 +298,7 @@ image, healthDelay, healthTimeout, singletonMap("animal", "wombat"), jmxMetricsC .endSpec() .build(); - EntityUserOperator f = EntityUserOperator.fromCrd(kafkaAssembly); + EntityUserOperator f = EntityUserOperator.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), kafkaAssembly); List envvar = f.getEnvVars(); assertThat(Integer.parseInt(envvar.stream().filter(a -> a.getName().equals(EntityUserOperator.ENV_VAR_CLIENTS_CA_VALIDITY)).findFirst().get().getValue()), is(validity)); assertThat(Integer.parseInt(envvar.stream().filter(a -> a.getName().equals(EntityUserOperator.ENV_VAR_CLIENTS_CA_RENEWAL)).findFirst().get().getValue()), is(renewal)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/JmxTransTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/JmxTransTest.java index 088b8d82b2..6c0930dfb3 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/JmxTransTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/JmxTransTest.java @@ -33,6 +33,7 @@ import io.strimzi.operator.cluster.model.components.JmxTransOutputWriter; import io.strimzi.operator.cluster.model.components.JmxTransQueries; import io.strimzi.operator.cluster.model.components.JmxTransServer; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -94,7 +95,7 @@ public class JmxTransTest { .endSpec() .build(); - private final JmxTrans jmxTrans = JmxTrans.fromCrd(kafkaAssembly, VERSIONS); + private final JmxTrans jmxTrans = JmxTrans.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); @ParallelTest public void testOutputDefinitionWriterDeserialization() { @@ -256,7 +257,7 @@ public void testTemplate() { .endJmxTrans() .endSpec() .build(); - JmxTrans jmxTrans = JmxTrans.fromCrd(resource, VERSIONS); + JmxTrans jmxTrans = JmxTrans.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = jmxTrans.generateDeployment(null, null); @@ -311,7 +312,7 @@ public void testContainerEnvVars() { .endSpec() .build(); - List envVars = JmxTrans.fromCrd(resource, VERSIONS).getEnvVars(); + List envVars = JmxTrans.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, envVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -346,7 +347,7 @@ public void testContainerEnvVarsConflict() { .endSpec() .build(); - List envVars = JmxTrans.fromCrd(resource, VERSIONS).getEnvVars(); + List envVars = JmxTrans.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, envVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -378,7 +379,7 @@ public void testContainerSecurityContext() { .endSpec() .build(); - JmxTrans jmxTrans = JmxTrans.fromCrd(resource, VERSIONS); + JmxTrans jmxTrans = JmxTrans.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); assertThat(jmxTrans.templateContainerSecurityContext, is(securityContext)); Deployment deployment = jmxTrans.generateDeployment(null, null); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java index d7047471ed..a670b344b3 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBridgeClusterTest.java @@ -44,6 +44,7 @@ import io.strimzi.kafka.oauth.server.ServerConfig; import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -90,7 +91,7 @@ public class KafkaBridgeClusterTest { .withNewHttp(8080) .endSpec() .build(); - private final KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + private final KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); private Map expectedLabels(String name) { return TestUtils.map(Labels.STRIMZI_CLUSTER_LABEL, this.cluster, @@ -133,7 +134,7 @@ protected List getExpectedEnvVars() { @ParallelTest public void testDefaultValues() { - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(ResourceUtils.createEmptyKafkaBridge(namespace, cluster), VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createEmptyKafkaBridge(namespace, cluster), VERSIONS); assertThat(kbc.image, is("quay.io/strimzi/kafka-bridge:latest")); assertThat(kbc.replicas, is(KafkaBridgeCluster.DEFAULT_REPLICAS)); @@ -218,7 +219,7 @@ public void testGenerateDeploymentWithTls() { .endTls() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-secret")); @@ -250,7 +251,7 @@ public void testGenerateDeploymentWithTlsAuth() { .build()) .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(3).getName(), is("user-secret")); @@ -281,7 +282,7 @@ public void testGenerateDeploymentWithTlsSameSecret() { .build()) .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); // 2 = 1 volume from logging/metrics + just 1 from above certs Secret @@ -302,7 +303,7 @@ public void testGenerateDeploymentWithScramSha512Auth() { .endKafkaClientAuthenticationScramSha512() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user1-secret")); @@ -329,7 +330,7 @@ public void testGenerateDeploymentWithPlainAuth() { .endKafkaClientAuthenticationPlain() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user1-secret")); @@ -444,7 +445,7 @@ public void testTemplate() { .endTemplate() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); @@ -497,7 +498,7 @@ public void testGracePeriod() { .endTemplate() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); @@ -506,7 +507,7 @@ public void testGracePeriod() { @ParallelTest public void testDefaultGracePeriod() { KafkaBridge resource = new KafkaBridgeBuilder(this.resource).build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -526,7 +527,7 @@ public void testImagePullSecrets() { .endTemplate() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -543,7 +544,7 @@ public void testImagePullSecretsCO() { secrets.add(secret1); secrets.add(secret2); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(this.resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, secrets); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -565,7 +566,7 @@ public void testImagePullSecretsBoth() { .endTemplate() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, singletonList(secret1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); @@ -576,7 +577,7 @@ public void testImagePullSecretsBoth() { @ParallelTest public void testDefaultImagePullSecrets() { KafkaBridge resource = new KafkaBridgeBuilder(this.resource).build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); @@ -593,7 +594,7 @@ public void testSecurityContext() { .endTemplate() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -605,7 +606,7 @@ public void testSecurityContext() { @ParallelTest public void testDefaultSecurityContext() { KafkaBridge resource = new KafkaBridgeBuilder(this.resource).build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); @@ -622,7 +623,7 @@ public void testPodDisruptionBudget() { .endTemplate() .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kbc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -631,7 +632,7 @@ public void testPodDisruptionBudget() { @ParallelTest public void testDefaultPodDisruptionBudget() { KafkaBridge resource = new KafkaBridgeBuilder(this.resource).build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kbc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -639,7 +640,7 @@ public void testDefaultPodDisruptionBudget() { @ParallelTest public void testImagePullPolicy() { - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); @@ -663,7 +664,7 @@ public void testResources() { .withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build()) .endSpec() .build(); - KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kbc = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kbc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -700,7 +701,7 @@ public void testKafkaBridgeContainerEnvVars() { .endSpec() .build(); - List kafkaEnvVars = KafkaBridgeCluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -738,7 +739,7 @@ public void testKafkaBridgeContainerEnvVarsConflict() { .endSpec() .build(); - List kafkaEnvVars = KafkaBridgeCluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -762,7 +763,7 @@ public void testGenerateDeploymentWithOAuthWithAccessToken() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kb.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -788,7 +789,7 @@ public void testGenerateDeploymentWithOAuthWithRefreshToken() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kb.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -815,7 +816,7 @@ public void testGenerateDeploymentWithOAuthWithClientSecret() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kb.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -839,7 +840,7 @@ public void testGenerateDeploymentWithOAuthWithMissingClientSecret() { .endSpec() .build(); - KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -859,7 +860,7 @@ public void testGenerateDeploymentWithOAuthWithMissingUri() { .endSpec() .build(); - KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -897,7 +898,7 @@ public void testGenerateDeploymentWithOAuthWithTls() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kb.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -944,7 +945,7 @@ public void testGenerateDeploymentWithOAuthUsingOpaqueTokens() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kb.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -963,7 +964,7 @@ public void testDifferentHttpPort() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check ports in container Deployment dep = kb.generateDeployment(emptyMap(), true, null, null); @@ -1005,7 +1006,7 @@ public void testProbeConfiguration() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kb.generateDeployment(new HashMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1025,7 +1026,7 @@ public void testTracingConfiguration() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment deployment = kb.generateDeployment(new HashMap<>(), true, null, null); Container container = deployment.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1045,7 +1046,7 @@ public void testCorsConfiguration() { .endSpec() .build(); - KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(resource, VERSIONS); + KafkaBridgeCluster kb = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment deployment = kb.generateDeployment(new HashMap<>(), true, null, null); Container container = deployment.getSpec().getTemplate().getSpec().getContainers().get(0); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java index f36ef5f94b..dacfe6d29a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaBrokerConfigurationBuilderTest.java @@ -27,6 +27,7 @@ import io.strimzi.api.kafka.model.storage.Storage; import io.strimzi.kafka.oauth.server.ServerConfig; import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; import org.hamcrest.Description; @@ -302,7 +303,7 @@ public void testNullUserConfiguration() { @ParallelTest public void testEmptyUserConfiguration() { Map userConfiguration = new HashMap<>(); - KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(userConfiguration.entrySet()); + KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); String configuration = new KafkaBrokerConfigurationBuilder() .withUserConfiguration(kafkaConfiguration) @@ -319,7 +320,7 @@ public void testUserConfiguration() { userConfiguration.put("transaction.state.log.replication.factor", 3); userConfiguration.put("transaction.state.log.min.isr", 2); - KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(userConfiguration.entrySet()); + KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, userConfiguration.entrySet()); String configuration = new KafkaBrokerConfigurationBuilder() .withUserConfiguration(kafkaConfiguration) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java index 63bcc80af5..db4f8cda0a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterOAuthValidationTest.java @@ -16,6 +16,7 @@ import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType; import io.strimzi.api.kafka.model.storage.EphemeralStorage; import io.strimzi.operator.cluster.KafkaVersionTestUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -51,7 +52,7 @@ public void testOAuthValidationWithIntrospectionMinimalPlain() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); } @ParallelTest @@ -102,7 +103,7 @@ public void testOAuthAuthnAuthz() { .endSpec() .build(); - KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); } @ParallelTest @@ -143,7 +144,7 @@ public void testOAuthAuthzWithoutAuthn() { .endSpec() .build(); - KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); }); } @@ -161,7 +162,7 @@ public void testOAuthValidationWithJwksMinRefreshPauseAndIntrospection() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -179,7 +180,7 @@ public void testOAuthValidationWithJwksExpiryAndIntrospection() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -197,7 +198,7 @@ public void testOAuthValidationWithJwksRefreshAndIntrospection() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -214,7 +215,7 @@ public void testOAuthValidationWithReauthAndIntrospection() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); } @ParallelTest @@ -229,7 +230,7 @@ public void testOAuthValidationMissingValidIssuerUri() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -243,7 +244,7 @@ public void testOAuthValidationRefreshSecondsRelationWithExpirySeconds() { .withJwksExpirySeconds(89) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -256,7 +257,7 @@ public void testOAuthValidationRefreshSecondsSetWithExpirySecondsNotSet() { .withJwksRefreshSeconds(333) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -269,7 +270,7 @@ public void testOAuthValidationRefreshSecondsNotSetWithExpirySecondsSet() { .withJwksExpirySeconds(150) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -278,7 +279,7 @@ public void testOAuthValidationNoUriSpecified() { assertThrows(InvalidResourceException.class, () -> { KafkaListenerAuthenticationOAuth auth = new KafkaListenerAuthenticationOAuthBuilder().build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -293,7 +294,7 @@ public void testOAuthValidationIntrospectionEndpointUriWithoutClientId() { .endClientSecret() .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -305,7 +306,7 @@ public void testOAuthValidationIntrospectionEndpointUriWithoutClientSecret() { .withIntrospectionEndpointUri("http://introspection") .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -318,7 +319,7 @@ public void testOAuthValidationExpirySecondsWithoutEndpointUri() { .withJwksExpirySeconds(100) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -331,7 +332,7 @@ public void testOAuthValidationRefreshSecondsWithoutEndpointUri() { .withJwksRefreshSeconds(40) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -349,7 +350,7 @@ public void testOAuthValidationWithOAuthWithIntrospectionWithNoTypeCheck() { .withCheckAccessTokenType(false) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } @@ -365,7 +366,7 @@ public void testOAuthValidationWithOAuthWithJwksWithNotJwt() { .withAccessTokenIsJwt(false) .build(); - ListenersValidator.validate(3, getListeners(auth)); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, getListeners(auth)); }); } } \ No newline at end of file diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java index 123af4db27..5b7b070449 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaClusterTest.java @@ -79,6 +79,7 @@ import io.strimzi.operator.common.MetricsAndLogging; import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; @@ -156,7 +157,7 @@ public class KafkaClusterTest { .endSpec() .build(); - private final KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + private final KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); private void checkOwnerReference(OwnerReference ownerRef, HasMetadata resource) { assertThat(resource.getMetadata().getOwnerReferences().size(), is(1)); @@ -233,7 +234,7 @@ public void testGenerateServiceWithoutMetrics() { .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); Service headful = kc.generateService(); assertThat(headful.getSpec().getType(), is("ClusterIP")); @@ -268,7 +269,7 @@ public void testGenerateHeadlessServiceWithJmxMetrics() { .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); Service headless = kc.generateHeadlessService(); assertThat(headless.getSpec().getType(), is("ClusterIP")); @@ -345,7 +346,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getVolumeClaimTemplates().get(0).getSpec().getSelector().getMatchLabels(), is(selector)); } @@ -360,7 +361,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getVolumeClaimTemplates().get(0).getSpec().getSelector(), is(nullValue())); } @@ -376,7 +377,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(new Quantity("1", "Gi"))); } @@ -391,7 +392,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(nullValue())); } @@ -405,7 +406,7 @@ public void testGenerateStatefulSetWithRack() { .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(editKafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, editKafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); checkStatefulSet(sts, editKafkaAssembly, true); } @@ -420,7 +421,7 @@ public void testGenerateStatefulSetWithInitContainers() { .withNewRack().withTopologyKey("rack-key").endRack() .endKafka() .endSpec().build(); - KafkaCluster kc = KafkaCluster.fromCrd(editKafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, editKafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); checkStatefulSet(sts, editKafkaAssembly, false); } @@ -438,7 +439,7 @@ public void testGenerateStatefulSetWithPodManagementPolicy() { .endTemplate() .endKafka() .endSpec().build(); - KafkaCluster kc = KafkaCluster.fromCrd(editKafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, editKafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.ORDERED_READY.toValue())); } @@ -535,7 +536,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(assembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, assembly, VERSIONS); List pvcs = kc.getVolumeClaims(); @@ -555,7 +556,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - kc = KafkaCluster.fromCrd(assembly, VERSIONS); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, assembly, VERSIONS); pvcs = kc.getVolumeClaims(); @@ -570,35 +571,35 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e @ParallelTest public void withAffinityWithoutRack() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, KafkaCluster::fromCrd, this.getClass().getSimpleName() + ".withAffinityWithoutRack"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, versions), this.getClass().getSimpleName() + ".withAffinityWithoutRack"); resourceTester.assertDesiredResource("-STS.yaml", kc -> kc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withRackWithoutAffinity() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, KafkaCluster::fromCrd, this.getClass().getSimpleName() + ".withRackWithoutAffinity"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, versions), this.getClass().getSimpleName() + ".withRackWithoutAffinity"); resourceTester.assertDesiredResource("-STS.yaml", kc -> kc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withRackAndAffinityWithMoreTerms() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, KafkaCluster::fromCrd, this.getClass().getSimpleName() + ".withRackAndAffinityWithMoreTerms"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, versions), this.getClass().getSimpleName() + ".withRackAndAffinityWithMoreTerms"); resourceTester.assertDesiredResource("-STS.yaml", kc -> kc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withRackAndAffinity() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, KafkaCluster::fromCrd, this.getClass().getSimpleName() + ".withRackAndAffinity"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, versions), this.getClass().getSimpleName() + ".withRackAndAffinity"); resourceTester.assertDesiredResource("-STS.yaml", kc -> kc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withTolerations() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, KafkaCluster::fromCrd, this.getClass().getSimpleName() + ".withTolerations"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly1, versions) -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly1, versions), this.getClass().getSimpleName() + ".withTolerations"); resourceTester.assertDesiredResource("-STS.yaml", kc -> kc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getTolerations()); } @@ -623,7 +624,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet changes StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -708,7 +709,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check bootstrap route Route brt = kc.generateExternalBootstrapRoutes().get(0); @@ -765,7 +766,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check bootstrap route Route brt = kc.generateExternalBootstrapRoutes().get(0); @@ -801,7 +802,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet changes StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -856,7 +857,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -889,7 +890,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -924,7 +925,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -961,7 +962,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check external bootstrap service Service ext = kc.generateExternalBootstrapServices().get(0); @@ -1012,7 +1013,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check annotations assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "bootstrap.myingress.com."))); @@ -1060,7 +1061,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check annotations assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getLoadBalancerIP(), is("10.0.0.1")); @@ -1107,7 +1108,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check annotations assertThat(kc.generateExternalBootstrapServices().get(0).getMetadata().getAnnotations(), is(Collections.singletonMap("external-dns.alpha.kubernetes.io/hostname", "bootstrap.myingress.com."))); @@ -1140,7 +1141,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet changes StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -1187,7 +1188,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet changes StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -1225,7 +1226,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet changes StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -1283,7 +1284,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.generateExternalServices(0).get(0).getSpec().getPorts().get(0).getNodePort(), is(32101)); assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32001)); @@ -1319,7 +1320,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.generateExternalServices(0).get(0).getSpec().getPorts().get(0).getNodePort(), is(32101)); assertThat(kc.generateExternalBootstrapServices().get(0).getSpec().getPorts().get(0).getNodePort(), is(32001)); @@ -1413,7 +1414,7 @@ public void testGenerateBrokerSecretExternalWithManyDNS() throws CertificatePars } private Secret generateBrokerSecret(Set externalBootstrapAddress, Map> externalAddresses) { - ClusterCa clusterCa = new ClusterCa(new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, null, null); + ClusterCa clusterCa = new ClusterCa(Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, null, null); clusterCa.createRenewOrReplace(namespace, cluster, emptyMap(), emptyMap(), emptyMap(), null, true); kc.generateCertificates(kafkaAssembly, clusterCa, externalBootstrapAddress, externalAddresses, true); @@ -1582,7 +1583,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -1658,7 +1659,7 @@ public void testControlPlanePortNetworkPolicy() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap()); - KafkaCluster k = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster k = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Network Policies => Different namespace NetworkPolicy np = k.generateNetworkPolicy("operator-namespace", null); @@ -1721,7 +1722,7 @@ public void testReplicationPortNetworkPolicy() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap()); - KafkaCluster k = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster k = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Network Policies => Different namespace NetworkPolicy np = k.generateNetworkPolicy("operator-namespace", null); @@ -1810,7 +1811,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster k = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster k = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Network Policies NetworkPolicy np = k.generateNetworkPolicy(null, null); @@ -1859,7 +1860,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster k = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster k = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Network Policies NetworkPolicy np = k.generateNetworkPolicy(null, null); @@ -1891,7 +1892,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); @@ -1902,7 +1903,7 @@ public void testDefaultGracePeriod() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap())) .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -1925,7 +1926,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -1944,7 +1945,7 @@ public void testImagePullSecretsFromCO() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap()); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, secrets); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -1969,7 +1970,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, singletonList(secret1)); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); @@ -1982,7 +1983,7 @@ public void testDefaultImagePullSecrets() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap())) .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); @@ -2002,7 +2003,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -2016,7 +2017,7 @@ public void testDefaultSecurityContext() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap())) .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); @@ -2036,7 +2037,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -2047,7 +2048,7 @@ public void testDefaultPodDisruptionBudget() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap())) .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -2058,7 +2059,7 @@ public void testImagePullPolicy() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap()); kafkaAssembly.getSpec().getKafka().setRack(new RackBuilder().withTopologyKey("topology-key").build()); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, ImagePullPolicy.ALWAYS, null); assertThat(sts.getSpec().getTemplate().getSpec().getInitContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); @@ -2101,7 +2102,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); List services = new ArrayList<>(); services.addAll(kc.generateExternalBootstrapServices()); @@ -2151,7 +2152,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 0), is(10000)); assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), 0), is("my-host-0.cz")); @@ -2181,7 +2182,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(ListenersUtils.brokerAdvertisedPort(kc.getListeners().get(0), 0), is(nullValue())); assertThat(ListenersUtils.brokerAdvertisedHost(kc.getListeners().get(0), 0), is(nullValue())); @@ -2219,7 +2220,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.getAdvertisedHostname(kc.getListeners().get(0), 0, "some-host.com"), is("EXTERNAL_9094_0://my-host-0.cz")); assertThat(kc.getAdvertisedHostname(kc.getListeners().get(0), 0, ""), is("EXTERNAL_9094_0://my-host-0.cz")); @@ -2250,7 +2251,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.getAdvertisedHostname(kc.getListeners().get(0), 0, "some-host.com"), is("EXTERNAL_9094_0://some-host.com")); assertThat(kc.getAdvertisedHostname(kc.getListeners().get(0), 0, ""), is("EXTERNAL_9094_0://")); @@ -2273,7 +2274,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Storage annotation on STS assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -2303,7 +2304,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Storage annotation on STS assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -2341,7 +2342,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Storage annotation on STS assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -2393,7 +2394,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Storage annotation on STS assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -2467,7 +2468,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check PVCs List pvcs = kc.generatePersistentVolumeClaims(kc.getStorage()); @@ -2494,7 +2495,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Storage annotation on STS assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -2536,7 +2537,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); }); } @@ -2556,7 +2557,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster.fromCrd(kafkaAssembly, VERSIONS, oldStorage, replicas); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, oldStorage, replicas); }); } @@ -2570,7 +2571,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Storage annotation on STS assertThat(kc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -2603,7 +2604,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS, ephemeral, replicas); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, ephemeral, replicas); // Storage is reverted assertThat(kc.getStorage(), is(ephemeral)); @@ -2620,7 +2621,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS, persistent, replicas); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, persistent, replicas); // Storage is reverted assertThat(kc.getStorage(), is(persistent)); @@ -2637,7 +2638,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS, jbod, replicas); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas); // Storage is reverted assertThat(kc.getStorage(), is(jbod)); @@ -2654,7 +2655,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endKafka() .endSpec() .build(); - kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS, jbod, replicas); + kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, jbod, replicas); // Storage is reverted assertThat(kc.getStorage(), is(jbod)); @@ -2711,7 +2712,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.isExposedWithIngress(), is(true)); @@ -2849,7 +2850,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check bootstrap ingress Ingress bing = kc.generateExternalBootstrapIngresses().get(0); @@ -2899,7 +2900,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .build(); try { - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); fail("Expected exception was not thrown"); } catch (InvalidResourceException e) { // pass @@ -2928,7 +2929,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); assertThat(crb.getMetadata().getName(), is(KafkaResources.initContainerClusterRoleBindingName(cluster, testNamespace))); @@ -2950,7 +2951,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); assertThat(crb.getMetadata().getName(), is(KafkaResources.initContainerClusterRoleBindingName(cluster, testNamespace))); @@ -2966,7 +2967,7 @@ public void testNullClusterRoleBinding() { Kafka kafkaAssembly = ResourceUtils.createKafka(testNamespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, emptyMap()); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); ClusterRoleBinding crb = kc.generateClusterRoleBinding(testNamespace); assertThat(crb, is(nullValue())); @@ -3004,7 +3005,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); List kafkaEnvVars = kc.getEnvVars(); @@ -3052,7 +3053,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); List kafkaEnvVars = kc.getEnvVars(); @@ -3097,7 +3098,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - List kafkaEnvVars = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS).getInitContainerEnvVars(); + List kafkaEnvVars = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS).getInitContainerEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -3149,7 +3150,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - List kafkaEnvVars = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS).getInitContainerEnvVars(); + List kafkaEnvVars = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS).getInitContainerEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -3186,7 +3187,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.templateKafkaContainerSecurityContext, is(securityContext)); StatefulSet sts = kc.generateStatefulSet(false, null, null); @@ -3228,7 +3229,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getInitContainers(), @@ -3266,7 +3267,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); Container cont = sts.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -3319,7 +3320,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); Container cont = sts.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -3426,7 +3427,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); Container cont = sts.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -3513,7 +3514,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); @@ -3562,7 +3563,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); Volume vol = sts.getSpec().getTemplate().getSpec().getVolumes().stream().filter(v -> "custom-tls-9093-certs".equals(v.getName())).findFirst().orElse(null); @@ -3595,7 +3596,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); }); } @@ -3650,7 +3651,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = kc.generateStatefulSet(true, null, null); Container cont = sts.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -3718,7 +3719,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); InvalidResourceException ex = assertThrows(InvalidResourceException.class, () -> { - KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); }); assertThat(ex.getMessage(), is("Kafka " + namespace + "/" + cluster + " has invalid configuration. " + "Cruise Control cannot be deployed with a single-node Kafka cluster. It requires at least two Kafka nodes.")); @@ -3742,7 +3743,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configuration, e .endSpec() .build(); - assertThrows(IllegalArgumentException.class, () -> KafkaCluster.fromCrd(kafkaAssembly, VERSIONS)); + assertThrows(IllegalArgumentException.class, () -> KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS)); } @ParallelTest @@ -3762,7 +3763,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - KafkaCluster kc = KafkaCluster.fromCrd(kafkaAssembly, VERSIONS); + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(kc.isMetricsEnabled(), is(true)); assertThat(kc.getMetricsConfigInCm(), is(metrics)); @@ -3770,7 +3771,7 @@ public void testMetricsParsingFromConfigMap() { @ParallelTest public void testMetricsParsingNoMetrics() { - KafkaCluster kc = KafkaCluster.fromCrd(ResourceUtils.createKafka(namespace, cluster, replicas, + KafkaCluster kc = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout), VERSIONS); assertThat(kc.isMetricsEnabled(), is(false)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConfigurationTests.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConfigurationTests.java index 3766f6ed97..0b16aeb021 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConfigurationTests.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConfigurationTests.java @@ -5,6 +5,7 @@ package io.strimzi.operator.cluster.model; import io.strimzi.operator.cluster.KafkaVersionTestUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -19,7 +20,7 @@ public class KafkaConfigurationTests { KafkaVersion kafkaVersion = KafkaVersionTestUtils.getKafkaVersionLookup().defaultVersion(); void assertConfigError(String key, Object value, String errorMsg) { - KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(singletonMap(key, value).entrySet()); + KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, singletonMap(key, value).entrySet()); assertThat(kafkaConfiguration.validate(kafkaVersion), is(singletonList(errorMsg))); } @@ -29,7 +30,7 @@ public void unknownConfigIsNotAnError() { } private void assertNoError(String foo, Object value) { - KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(singletonMap(foo, value).entrySet()); + KafkaConfiguration kafkaConfiguration = new KafkaConfiguration(Reconciliation.DUMMY_RECONCILIATION, singletonMap(foo, value).entrySet()); kafkaConfiguration.validate(kafkaVersion); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectBuildTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectBuildTest.java index 0297d5dd43..04290b3590 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectBuildTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectBuildTest.java @@ -18,6 +18,7 @@ import io.strimzi.api.kafka.model.connect.build.JarArtifactBuilder; import io.strimzi.api.kafka.model.connect.build.PluginBuilder; import io.strimzi.operator.cluster.KafkaVersionTestUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -75,7 +76,7 @@ public void testFromCrd() { .endSpec() .build(); - KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); } @ParallelTest @@ -97,7 +98,7 @@ public void testValidationPluginsExist() { .build(); assertThrows(InvalidResourceException.class, () -> { - KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); }); } @@ -121,7 +122,7 @@ public void testValidationArtifactsExist() { .build(); assertThrows(InvalidResourceException.class, () -> { - KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); }); } @@ -146,7 +147,7 @@ public void testValidationUniqueNames() { .build(); assertThrows(InvalidResourceException.class, () -> { - KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); }); } @@ -179,7 +180,7 @@ public void testDeployment() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); Pod pod = build.generateBuilderPod(true, ImagePullPolicy.IFNOTPRESENT, null, null); assertThat(pod.getMetadata().getName(), is(KafkaConnectResources.buildPodName(cluster))); @@ -238,7 +239,7 @@ public void testDeploymentWithoutPushSecret() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); Pod pod = build.generateBuilderPod(true, ImagePullPolicy.IFNOTPRESENT, null, null); assertThat(pod.getSpec().getVolumes().size(), is(2)); @@ -274,7 +275,7 @@ public void testConfigMap() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); KafkaConnectDockerfile dockerfile = new KafkaConnectDockerfile("my-image:latest", kc.getSpec().getBuild()); ConfigMap cm = build.generateDockerfileConfigMap(dockerfile); @@ -315,7 +316,7 @@ public void testBuildconfigWithDockerOutput() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); KafkaConnectDockerfile dockerfile = new KafkaConnectDockerfile("my-image:latest", kc.getSpec().getBuild()); BuildConfig bc = build.generateBuildConfig(dockerfile); @@ -360,7 +361,7 @@ public void testBuildconfigWithImageStreamOutput() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); KafkaConnectDockerfile dockerfile = new KafkaConnectDockerfile("my-image:latest", kc.getSpec().getBuild()); BuildConfig bc = build.generateBuildConfig(dockerfile); @@ -438,7 +439,7 @@ public void testTemplate() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); Pod pod = build.generateBuilderPod(true, ImagePullPolicy.IFNOTPRESENT, null, null); assertThat(pod.getMetadata().getLabels().entrySet().containsAll(buildPodLabels.entrySet()), is(true)); @@ -485,7 +486,7 @@ public void testValidKanikoOptions() { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS); Pod pod = build.generateBuilderPod(true, ImagePullPolicy.IFNOTPRESENT, null, null); assertThat(pod.getSpec().getContainers().get(0).getArgs(), is(expectedArgs)); @@ -512,7 +513,7 @@ public void testInvalidKanikoOptions() { .endSpec() .build(); - InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KafkaConnectBuild.fromCrd(kc, VERSIONS)); + InvalidResourceException e = assertThrows(InvalidResourceException.class, () -> KafkaConnectBuild.fromCrd(new Reconciliation("test", kc.getKind(), kc.getMetadata().getNamespace(), kc.getMetadata().getName()), kc, VERSIONS)); assertThat(e.getMessage(), containsString(".spec.build.additionalKanikoOptions contains forbidden options: [--reproducible-something, --build-arg, --digest-file]")); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java index 3ab8324864..2eece06c7c 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectClusterTest.java @@ -61,6 +61,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.common.MetricsAndLogging; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.test.TestUtils; @@ -140,7 +141,7 @@ public class KafkaConnectClusterTest { .endSpec() .build(); - private final KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resourceWithMetrics, VERSIONS); + private final KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resourceWithMetrics, VERSIONS); @ParallelTest public void testMetricsConfigMap() { @@ -184,7 +185,7 @@ protected List getExpectedEnvVars() { @ParallelTest public void testDefaultValues() { - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(ResourceUtils.createEmptyKafkaConnect(namespace, cluster), VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createEmptyKafkaConnect(namespace, cluster), VERSIONS); assertThat(kc.image, is(KafkaVersionTestUtils.DEFAULT_KAFKA_CONNECT_IMAGE)); assertThat(kc.replicas, is(KafkaConnectCluster.DEFAULT_REPLICAS)); @@ -237,7 +238,7 @@ public void testGenerateServiceWithoutMetrics() { .withMetrics(null) .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Service svc = kc.generateService(); assertThat(svc.getSpec().getType(), is("ClusterIP")); @@ -272,7 +273,7 @@ public void testGenerateDeploymentWithRack() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment deployment = kc.generateDeployment(new HashMap<>(), false, null, null); checkDeployment(deployment, resource); @@ -280,14 +281,14 @@ public void testGenerateDeploymentWithRack() { @ParallelTest public void withAffinity() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(KafkaConnect.class, VERSIONS, KafkaConnectCluster::fromCrd, this.getClass().getSimpleName() + ".withAffinity"); + ResourceTester resourceTester = new ResourceTester<>(KafkaConnect.class, VERSIONS, (connect, lookup) -> KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, connect, lookup), this.getClass().getSimpleName() + ".withAffinity"); resourceTester .assertDesiredResource("-Deployment.yaml", kcc -> kcc.generateDeployment(new HashMap(), true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withTolerations() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(KafkaConnect.class, VERSIONS, KafkaConnectCluster::fromCrd, this.getClass().getSimpleName() + ".withTolerations"); + ResourceTester resourceTester = new ResourceTester<>(KafkaConnect.class, VERSIONS, (connect, lookup) -> KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, connect, lookup), this.getClass().getSimpleName() + ".withTolerations"); resourceTester .assertDesiredResource("-Deployment.yaml", kcc -> kcc.generateDeployment(new HashMap(), true, null, null).getSpec().getTemplate().getSpec().getTolerations()); } @@ -303,7 +304,7 @@ public void testGenerateDeploymentWithTls() { .endTls() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-secret")); @@ -336,7 +337,7 @@ public void testGenerateDeploymentWithTlsAuth() { .build()) .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(3).getName(), is("user-secret")); @@ -367,7 +368,7 @@ public void testGenerateDeploymentWithTlsSameSecret() { .build()) .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); // 3 = 1 temp volume + 1 volume from logging/metrics + just 1 from above certs Secret @@ -388,7 +389,7 @@ public void testGenerateDeploymentWithScramSha512Auth() { .endKafkaClientAuthenticationScramSha512() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user1-secret")); @@ -423,7 +424,7 @@ public void testGenerateDeploymentWithScramSha512AuthAndTLSSameSecret() { .endKafkaClientAuthenticationScramSha512() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(3)); @@ -462,7 +463,7 @@ public void testGenerateDeploymentWithPlainAuth() { .endKafkaClientAuthenticationPlain() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user1-secret")); @@ -497,7 +498,7 @@ public void testGenerateDeploymentWithPlainAuthAndTLSSameSecret() { .endKafkaClientAuthenticationPlain() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(3)); @@ -621,7 +622,7 @@ public void testTemplate() { .endTemplate() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -679,7 +680,7 @@ public void testExternalConfigurationSecretEnvs() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -706,7 +707,7 @@ public void testExternalConfigurationConfigEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -731,7 +732,7 @@ public void testExternalConfigurationSecretVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -762,7 +763,7 @@ public void testExternalConfigurationSecretVolumesWithDots() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -793,7 +794,7 @@ public void testExternalConfigurationConfigVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -824,7 +825,7 @@ public void testExternalConfigurationConfigVolumesWithDots() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -856,7 +857,7 @@ public void testExternalConfigurationInvalidVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -865,7 +866,7 @@ public void testExternalConfigurationInvalidVolumes() { assertThat(selected.size(), is(0)); List volumeMounths = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getVolumeMounts(); - List selectedVolumeMounths = volumeMounths.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); + List selectedVolumeMounts = volumeMounths.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); } @@ -882,7 +883,7 @@ public void testNoExternalConfigurationVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -912,7 +913,7 @@ public void testInvalidExternalConfigurationEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -936,7 +937,7 @@ public void testNoExternalConfigurationEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); @@ -956,7 +957,7 @@ public void testGracePeriod() { .endTemplate() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); @@ -965,7 +966,7 @@ public void testGracePeriod() { @ParallelTest public void testDefaultGracePeriod() { KafkaConnect resource = new KafkaConnectBuilder(this.resource).build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -985,7 +986,7 @@ public void testImagePullSecrets() { .endTemplate() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -1002,7 +1003,7 @@ public void testImagePullSecretsCO() { secrets.add(secret1); secrets.add(secret2); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(this.resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, secrets); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -1024,7 +1025,7 @@ public void testImagePullSecretsBoth() { .endTemplate() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, singletonList(secret1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); @@ -1035,7 +1036,7 @@ public void testImagePullSecretsBoth() { @ParallelTest public void testDefaultImagePullSecrets() { KafkaConnect resource = new KafkaConnectBuilder(this.resource).build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); @@ -1052,7 +1053,7 @@ public void testSecurityContext() { .endTemplate() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -1064,7 +1065,7 @@ public void testSecurityContext() { @ParallelTest public void testDefaultSecurityContext() { KafkaConnect resource = new KafkaConnectBuilder(this.resource).build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); @@ -1081,7 +1082,7 @@ public void testPodDisruptionBudget() { .endTemplate() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -1090,7 +1091,7 @@ public void testPodDisruptionBudget() { @ParallelTest public void testDefaultPodDisruptionBudget() { KafkaConnect resource = new KafkaConnectBuilder(this.resource).build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -1098,7 +1099,7 @@ public void testDefaultPodDisruptionBudget() { @ParallelTest public void testImagePullPolicy() { - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); @@ -1122,7 +1123,7 @@ public void testResources() { .withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build()) .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1145,7 +1146,7 @@ public void testJvmOptions() { .endJvmOptions() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1184,7 +1185,7 @@ public void testKafkaConnectContainerEnvVars() { .endSpec() .build(); - List kafkaEnvVars = KafkaConnectCluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -1222,7 +1223,7 @@ public void testKafkaContainerEnvVarsConflict() { .endSpec() .build(); - List kafkaEnvVars = KafkaConnectCluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -1255,7 +1256,7 @@ public void testKafkaConnectContainerSecurityContext() { .endSpec() .build(); - KafkaConnectCluster kcc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kcc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment deployment = kcc.generateDeployment(null, false, null, null); assertThat(deployment.getSpec().getTemplate().getSpec().getContainers(), @@ -1273,7 +1274,7 @@ public void testTracing() { .endJaegerTracing() .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1296,7 +1297,7 @@ public void testGenerateDeploymentWithOAuthWithAccessToken() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1322,7 +1323,7 @@ public void testGenerateDeploymentWithOAuthWithRefreshToken() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1349,7 +1350,7 @@ public void testGenerateDeploymentWithOAuthWithClientSecret() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1373,7 +1374,7 @@ public void testGenerateDeploymentWithOAuthWithMissingClientSecret() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1393,7 +1394,7 @@ public void testGenerateDeploymentWithOAuthWithMissingUri() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1430,7 +1431,7 @@ public void testGenerateDeploymentWithOAuthWithTls() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1463,7 +1464,7 @@ public void testGenerateDeploymentWithOAuthWithTls() { public void testNetworkPolicyWithConnectorOperator() { KafkaConnect resource = new KafkaConnectBuilder(this.resourceWithMetrics) .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); NetworkPolicy np = kc.generateNetworkPolicy(true, "operator-namespace", null); @@ -1485,7 +1486,7 @@ public void testNetworkPolicyWithConnectorOperator() { public void testNetworkPolicyWithConnectorOperatorSameNamespace() { KafkaConnect resource = new KafkaConnectBuilder(this.resourceWithMetrics) .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); NetworkPolicy np = kc.generateNetworkPolicy(true, namespace, null); @@ -1507,7 +1508,7 @@ public void testNetworkPolicyWithConnectorOperatorSameNamespace() { public void testNetworkPolicyWithConnectorOperatorWithNamespaceLabels() { KafkaConnect resource = new KafkaConnectBuilder(this.resourceWithMetrics) .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); NetworkPolicy np = kc.generateNetworkPolicy(true, "operator-namespace", Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue"))); @@ -1529,7 +1530,7 @@ public void testNetworkPolicyWithConnectorOperatorWithNamespaceLabels() { public void testNetworkPolicyWithoutConnectorOperator() { KafkaConnect resource = new KafkaConnectBuilder(this.resource) .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); assertThat(kc.generateNetworkPolicy(false, null, null), is(nullValue())); } @@ -1547,7 +1548,7 @@ public void testClusterRoleBindingRack() { .endSpec() .build(); - KafkaConnectCluster kafkaConnectCluster = KafkaConnectCluster.fromCrd(kafkaConnect, VERSIONS); + KafkaConnectCluster kafkaConnectCluster = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaConnect, VERSIONS); ClusterRoleBinding crb = kafkaConnectCluster.generateClusterRoleBinding(); assertThat(crb.getMetadata().getName(), is(KafkaConnectResources.initContainerClusterRoleBindingName(cluster, testNamespace))); @@ -1566,7 +1567,7 @@ public void testNullClusterRoleBinding() { .endMetadata() .build(); - KafkaConnectCluster kafkaConnectCluster = KafkaConnectCluster.fromCrd(kafkaConnect, VERSIONS); + KafkaConnectCluster kafkaConnectCluster = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaConnect, VERSIONS); ClusterRoleBinding crb = kafkaConnectCluster.generateClusterRoleBinding(); assertThat(crb, is(nullValue())); @@ -1637,7 +1638,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(kafkaConnect, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaConnect, VERSIONS); assertThat(kc.isMetricsEnabled(), is(true)); assertThat(kc.getMetricsConfigInCm(), is(metrics)); @@ -1645,7 +1646,7 @@ public void testMetricsParsingFromConfigMap() { @ParallelTest public void testMetricsParsingNoMetrics() { - KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(this.resource, VERSIONS); + KafkaConnectCluster kc = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); assertThat(kc.isMetricsEnabled(), is(false)); assertThat(kc.getMetricsConfigInCm(), is(nullValue())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectS2IClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectS2IClusterTest.java index 2e920b2767..4fe4e379bd 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectS2IClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaConnectS2IClusterTest.java @@ -63,6 +63,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.common.MetricsAndLogging; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.test.TestUtils; @@ -147,7 +148,7 @@ public class KafkaConnectS2IClusterTest { .endSpec() .build(); - private final KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resourceWithMetrics, VERSIONS); + private final KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resourceWithMetrics, VERSIONS); @ParallelTest public void testMetricsConfigMap() { @@ -190,7 +191,7 @@ protected List getExpectedEnvVars() { @ParallelTest public void testDefaultValues() { - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(ResourceUtils.createEmptyKafkaConnectS2I(namespace, cluster), VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createEmptyKafkaConnectS2I(namespace, cluster), VERSIONS); assertThat(kc.image, is(KafkaConnectS2IResources.deploymentName(cluster) + ":latest")); assertThat(kc.replicas, is(KafkaConnectS2ICluster.DEFAULT_REPLICAS)); @@ -247,7 +248,7 @@ public void testGenerateServiceWithoutMetrics() { .withMetrics(null) .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Service svc = kc.generateService(); assertThat(svc.getSpec().getType(), is("ClusterIP")); @@ -350,7 +351,7 @@ public void testGenerateSourceImageStream() { @ParallelTest public void testInsecureSourceRepo() { - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(ResourceUtils.createKafkaConnectS2I(namespace, cluster, replicas, image, + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createKafkaConnectS2I(namespace, cluster, replicas, image, healthDelay, healthTimeout, jmxMetricsConfig, metricsCmJson, configurationJson, true, bootstrapServers, buildResourceRequirements), VERSIONS); assertThat(kc.isInsecureSourceRepository(), is(true)); @@ -383,17 +384,19 @@ public void testGenerateTargetImageStream() { @ParallelTest public void withAffinity() throws IOException { ResourceTester resourceTester = new ResourceTester<>(KafkaConnectS2I.class, - x -> KafkaConnectS2ICluster.fromCrd(x, VERSIONS), this.getClass().getSimpleName() + ".withAffinity"); + x -> KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, x, VERSIONS), this.getClass().getSimpleName() + ".withAffinity"); resourceTester - .assertDesiredResource("-DeploymentConfig.yaml", kcc -> kcc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null).getSpec().getTemplate().getSpec().getAffinity()); + .assertDesiredResource("-DeploymentConfig.yaml", kcc -> kcc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withTolerations() throws IOException { ResourceTester resourceTester = new ResourceTester<>(KafkaConnectS2I.class, - x -> KafkaConnectS2ICluster.fromCrd(x, VERSIONS), this.getClass().getSimpleName() + ".withTolerations"); + x -> KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, x, VERSIONS), this.getClass().getSimpleName() + ".withTolerations"); resourceTester - .assertDesiredResource("-DeploymentConfig.yaml", kcc -> kcc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null).getSpec().getTemplate().getSpec().getTolerations()); + .assertDesiredResource("-DeploymentConfig.yaml", kcc -> kcc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null).getSpec().getTemplate().getSpec().getTolerations()); } @ParallelTest @@ -407,8 +410,9 @@ public void testGenerateDeploymentConfigWithTls() { .endTls() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("my-secret")); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-another-secret")); @@ -440,8 +444,9 @@ public void testGenerateDeploymentConfigWithTlsAuth() { .build()) .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user-secret")); @@ -471,8 +476,9 @@ public void testGenerateDeploymentWithTlsSameSecret() { .build()) .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); // 2 = 1 volume from logging/metrics + just 1 from above certs Secret assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(2)); @@ -494,8 +500,9 @@ public void testGenerateDeploymentWithScramSha512Auth() { ) .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(1).getName(), is("user1-secret")); @@ -610,10 +617,11 @@ public void testTemplate() { .endTemplate() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getMetadata().getLabels().entrySet().containsAll(expectedDepLabels.entrySet()), is(true)); assertThat(dep.getMetadata().getAnnotations().entrySet().containsAll(depAnots.entrySet()), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getPriorityClassName(), is("top-priority")); @@ -667,10 +675,11 @@ public void testExternalConfigurationSecretEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check DeploymentConfig - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -694,10 +703,11 @@ public void testExternalConfigurationConfigEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check DeploymentConfig - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -719,10 +729,11 @@ public void testExternalConfigurationSecretVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check DeploymentConfig - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -750,10 +761,11 @@ public void testExternalConfigurationSecretVolumesWithDots() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check DeploymentConfig - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().startsWith(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -781,10 +793,11 @@ public void testExternalConfigurationConfigVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check DeploymentConfig - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -812,10 +825,11 @@ public void testExternalConfigurationConfigVolumesWithDots() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check DeploymentConfig - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().startsWith(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -844,10 +858,11 @@ public void testExternalConfigurationInvalidVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -870,10 +885,11 @@ public void testNoExternalConfigurationVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaConnectCluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -900,10 +916,11 @@ public void testInvalidExternalConfigurationEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -924,10 +941,11 @@ public void testNoExternalConfigurationEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); List envs = dep.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -944,18 +962,20 @@ public void testGracePeriod() { .endTemplate() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); } @ParallelTest public void testDefaultGracePeriod() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource).build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); } @@ -973,9 +993,10 @@ public void testImagePullSecrets() { .endTemplate() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); @@ -990,9 +1011,10 @@ public void testImagePullSecretsCO() { secrets.add(secret1); secrets.add(secret2); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(this.resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); - Deployment dep = kc.generateDeployment(emptyMap(), true, null, secrets); + Deployment dep = kc.generateDeployment( + emptyMap(), true, null, secrets); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); @@ -1012,9 +1034,10 @@ public void testImagePullSecretsBoth() { .endTemplate() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kc.generateDeployment(emptyMap(), true, null, singletonList(secret1)); + Deployment dep = kc.generateDeployment( + emptyMap(), true, null, singletonList(secret1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(false)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); @@ -1023,9 +1046,10 @@ public void testImagePullSecretsBoth() { @ParallelTest public void testDefaultImagePullSecrets() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource).build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); } @@ -1040,9 +1064,10 @@ public void testSecurityContext() { .endTemplate() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext().getFsGroup(), is(Long.valueOf(123))); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext().getRunAsGroup(), is(Long.valueOf(456))); @@ -1052,9 +1077,10 @@ public void testSecurityContext() { @ParallelTest public void testDefaultSecurityContext() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource).build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); } @@ -1069,7 +1095,7 @@ public void testPodDisruptionBudget() { .endTemplate() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -1078,7 +1104,7 @@ public void testPodDisruptionBudget() { @ParallelTest public void testDefaultPodDisruptionBudget() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resource).build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -1086,12 +1112,14 @@ public void testDefaultPodDisruptionBudget() { @ParallelTest public void testImagePullPolicy() { - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); - dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, ImagePullPolicy.IFNOTPRESENT, null); + dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, ImagePullPolicy.IFNOTPRESENT, null); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.IFNOTPRESENT.toString())); } @@ -1110,9 +1138,10 @@ public void testResources() { .withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build()) .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getResources().getLimits(), is(limits)); assertThat(cont.getResources().getRequests(), is(requests)); @@ -1133,9 +1162,10 @@ public void testJvmOptions() { .endJvmOptions() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-XX:+UseG1GC"), is(true)); assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-XX:MaxGCPauseMillis=20"), is(true)); @@ -1172,7 +1202,7 @@ public void testKafkaConnectContainerEnvVars() { .endSpec() .build(); - List kafkaEnvVars = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -1210,7 +1240,7 @@ public void testKafkaContainerEnvVarsConflict() { .endSpec() .build(); - List kafkaEnvVars = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -1228,9 +1258,10 @@ public void testTracing() { .endJaegerTracing() .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(Collections.EMPTY_MAP, true, null, null); + DeploymentConfig dep = kc.generateDeploymentConfig( + Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(env -> KafkaConnectCluster.ENV_VAR_STRIMZI_TRACING.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals("jaeger"), is(true)); assertThat(cont.getEnv().stream().filter(env -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_CONFIGURATION.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("consumer.interceptor.classes=io.opentracing.contrib.kafka.TracingConsumerInterceptor"), is(true)); @@ -1251,8 +1282,9 @@ public void testGenerateDeploymentWithOAuthWithAccessToken() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1277,8 +1309,9 @@ public void testGenerateDeploymentWithOAuthWithRefreshToken() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1303,8 +1336,9 @@ public void testGenerateDeploymentWithOAuthWithClientSecret() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1326,7 +1360,7 @@ public void testGenerateDeploymentWithOAuthWithMissingClientSecret() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1346,7 +1380,7 @@ public void testGenerateDeploymentWithOAuthWithMissingUri() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1383,8 +1417,9 @@ public void testGenerateDeploymentWithOAuthWithTls() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); - DeploymentConfig dep = kc.generateDeploymentConfig(emptyMap(), true, null, null); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + DeploymentConfig dep = kc.generateDeploymentConfig( + emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1416,7 +1451,7 @@ public void testGenerateDeploymentWithOAuthWithTls() { public void testNetworkPolicyWithConnectorOperator() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resourceWithMetrics) .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); NetworkPolicy np = kc.generateNetworkPolicy(true, "operator-namespace", null); @@ -1438,7 +1473,7 @@ public void testNetworkPolicyWithConnectorOperator() { public void testNetworkPolicyWithConnectorOperatorSameNamespace() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resourceWithMetrics) .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); NetworkPolicy np = kc.generateNetworkPolicy(true, namespace, null); @@ -1460,7 +1495,7 @@ public void testNetworkPolicyWithConnectorOperatorSameNamespace() { public void testNetworkPolicyWithConnectorOperatorWithNamespaceLabels() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resourceWithMetrics) .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); NetworkPolicy np = kc.generateNetworkPolicy(true, "operator-namespace", Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue"))); @@ -1482,7 +1517,7 @@ public void testNetworkPolicyWithConnectorOperatorWithNamespaceLabels() { public void testNetworkPolicyWithoutConnectorOperator() { KafkaConnectS2I resource = new KafkaConnectS2IBuilder(this.resourceWithMetrics) .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); assertThat(kc.generateNetworkPolicy(false, null, null), is(nullValue())); } @@ -1501,7 +1536,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(kafkaConnect, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaConnect, VERSIONS); assertThat(kc.isMetricsEnabled(), is(true)); assertThat(kc.getMetricsConfigInCm(), is(metrics)); @@ -1509,7 +1544,7 @@ public void testMetricsParsingFromConfigMap() { @ParallelTest public void testMetricsParsingNoMetrics() { - KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(this.resource, VERSIONS); + KafkaConnectS2ICluster kc = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); assertThat(kc.isMetricsEnabled(), is(false)); assertThat(kc.getMetricsConfigInCm(), is(nullValue())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java index bb96f2d4d8..e09a2c9a44 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaExporterTest.java @@ -35,6 +35,7 @@ import io.strimzi.api.kafka.model.storage.Storage; import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -101,7 +102,7 @@ public class KafkaExporterTest { .withKafkaExporter(exporterOperator) .endSpec() .build(); - private final KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + private final KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); public void checkOwnerReference(OwnerReference ownerRef, HasMetadata resource) { assertThat(resource.getMetadata().getOwnerReferences().size(), is(1)); @@ -143,7 +144,7 @@ public void testFromConfigMapDefaultConfig() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, null, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, new KafkaExporterSpec(), null); - KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); assertThat(ke.getImage(), is(KafkaVersionTestUtils.DEFAULT_KAFKA_IMAGE)); assertThat(ke.logging, is("info")); assertThat(ke.groupRegex, is(".*")); @@ -260,7 +261,7 @@ public void testContainerTemplateEnvVars() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, exporterSpec, null); - KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); List kafkaEnvVars = ke.getEnvVars(); assertThat(kafkaEnvVars.stream().filter(var -> testEnvOneKey.equals(var.getName())).map(EnvVar::getValue).findFirst().get(), is(testEnvOneValue)); @@ -296,7 +297,7 @@ public void testContainerTemplateEnvVarsWithKeyConflict() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, exporterSpec, null); - KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); List kafkaEnvVars = ke.getEnvVars(); assertThat(kafkaEnvVars.stream().filter(var -> testEnvOneKey.equals(var.getName())).map(EnvVar::getValue).findFirst().get(), is(testEnvOneValue)); @@ -308,7 +309,7 @@ public void testExporterNotDeployed() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, null); - KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); assertThat(ke.generateDeployment(true, null, null), is(nullValue())); assertThat(ke.generateSecret(null, true), is(nullValue())); @@ -319,7 +320,7 @@ public void testGenerateDeploymentWhenDisabled() { Kafka resource = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, kafkaConfig, zooConfig, kafkaStorage, zkStorage, kafkaLogJson, zooLogJson, null, null); - KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); assertThat(ke.generateDeployment(true, null, null), is(nullValue())); } @@ -407,7 +408,7 @@ public void testTemplate() { .endKafkaExporter() .endSpec() .build(); - KafkaExporter ke = KafkaExporter.fromCrd(resource, VERSIONS); + KafkaExporter ke = KafkaExporter.fromCrd(new Reconciliation("test", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()), resource, VERSIONS); // Check Deployment Deployment dep = ke.generateDeployment(true, null, null); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java index b4ef8d2314..7cc50de652 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2ClusterTest.java @@ -55,6 +55,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.common.MetricsAndLogging; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.test.TestUtils; @@ -139,7 +140,7 @@ public class KafkaMirrorMaker2ClusterTest { .endSpec() .build(); - private final KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resourceWithMetrics, VERSIONS); + private final KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resourceWithMetrics, VERSIONS); { // we were setting metricsEnabled in fromCrd, which was just checking it for non-null. With metrics in CM, we have to check // its content, what is done in generateMetricsAndLogConfigMap @@ -192,7 +193,7 @@ private Container getContainer(Deployment dep) { @ParallelTest public void testDefaultValues() { - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(ResourceUtils.createEmptyKafkaMirrorMaker2(namespace, cluster), VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createEmptyKafkaMirrorMaker2(namespace, cluster), VERSIONS); assertThat(kmm2.image, is(KafkaVersionTestUtils.DEFAULT_KAFKA_CONNECT_IMAGE)); assertThat(kmm2.replicas, is(KafkaMirrorMaker2Cluster.DEFAULT_REPLICAS)); @@ -245,7 +246,7 @@ public void testGenerateServiceWithoutMetrics() { .withMetrics(null) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Service svc = kmm2.generateService(); assertThat(svc.getSpec().getType(), is("ClusterIP")); @@ -265,7 +266,8 @@ public void testGenerateServiceWithoutMetrics() { @ParallelTest public void testGenerateDeployment() { - Deployment dep = kmm2.generateDeployment(new HashMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + new HashMap(), true, null, null); assertThat(dep.getMetadata().getName(), is(KafkaMirrorMaker2Resources.deploymentName(cluster))); assertThat(dep.getMetadata().getNamespace(), is(namespace)); @@ -296,16 +298,18 @@ public void testGenerateDeployment() { @ParallelTest public void withAffinity() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(KafkaMirrorMaker2.class, VERSIONS, KafkaMirrorMaker2Cluster::fromCrd, this.getClass().getSimpleName() + ".withAffinity"); + ResourceTester resourceTester = new ResourceTester<>(KafkaMirrorMaker2.class, VERSIONS, (kafkaMirrorMaker2, versions) -> KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaMirrorMaker2, versions), this.getClass().getSimpleName() + ".withAffinity"); resourceTester - .assertDesiredResource("-Deployment.yaml", kmm2c -> kmm2c.generateDeployment(new HashMap(), true, null, null).getSpec().getTemplate().getSpec().getAffinity()); + .assertDesiredResource("-Deployment.yaml", kmm2c -> kmm2c.generateDeployment( + new HashMap(), true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withTolerations() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(KafkaMirrorMaker2.class, VERSIONS, KafkaMirrorMaker2Cluster::fromCrd, this.getClass().getSimpleName() + ".withTolerations"); + ResourceTester resourceTester = new ResourceTester<>(KafkaMirrorMaker2.class, VERSIONS, (kafkaMirrorMaker2, versions) -> KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaMirrorMaker2, versions), this.getClass().getSimpleName() + ".withTolerations"); resourceTester - .assertDesiredResource("-Deployment.yaml", kmm2c -> kmm2c.generateDeployment(new HashMap(), true, null, null).getSpec().getTemplate().getSpec().getTolerations()); + .assertDesiredResource("-Deployment.yaml", kmm2c -> kmm2c.generateDeployment( + new HashMap(), true, null, null).getSpec().getTemplate().getSpec().getTolerations()); } @ParallelTest @@ -322,7 +326,7 @@ public void testGenerateDeploymentWithTls() { .withClusters(targetClusterWithTls) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-secret")); @@ -353,8 +357,9 @@ public void testGenerateDeploymentWithTlsWithoutCerts() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); Container cont = getContainer(dep); assertThat(AbstractModel.containerEnvVars(cont).get(KafkaMirrorMaker2Cluster.ENV_VAR_KAFKA_CONNECT_TRUSTED_CERTS), @@ -386,8 +391,9 @@ public void testGenerateDeploymentWithTlsAuth() { .withClusters(targetClusterWithTlsAuth) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(3).getName(), is("user-secret")); @@ -421,8 +427,9 @@ public void testGenerateDeploymentWithTlsSameSecret() { .withClusters(targetClusterWithTlsAuth) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); // 3 = 1 volume from logging/metrics + 2 from above cert mounted for connect and for connectors assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(4)); @@ -445,8 +452,9 @@ public void testGenerateDeploymentWithScramSha512Auth() { .withClusters(targetClusterWithScramSha512Auth) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user1-secret")); @@ -483,8 +491,9 @@ public void testGenerateDeploymentWithScramSha512AuthAndTLSSameSecret() { .withClusters(targetClusterWithScramSha512Auth) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(4)); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); @@ -543,8 +552,9 @@ public void testGenerateDeploymentWithMultipleClustersScramSha512AuthAndTLSSameS .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(5)); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); @@ -595,8 +605,9 @@ public void testGenerateDeploymentWithPlainAuth() { .withClusters(targetClusterWithPlainAuth) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("user1-secret")); @@ -634,8 +645,9 @@ public void testGenerateDeploymentWithPlainAuthAndTLSSameSecret() { .withClusters(targetClusterWithPlainAuth) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().toString(), dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(4)); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(0).getName(), is(AbstractModel.STRIMZI_TMP_DIRECTORY_DEFAULT_VOLUME_NAME)); @@ -738,10 +750,11 @@ public void testTemplate() { .endTemplate() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getMetadata().getLabels().entrySet().containsAll(expectedDepLabels.entrySet()), is(true)); assertThat(dep.getMetadata().getAnnotations().entrySet().containsAll(depAnots.entrySet()), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getPriorityClassName(), is("top-priority")); @@ -795,10 +808,11 @@ public void testExternalConfigurationSecretEnvs() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List envs = getContainer(dep).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -822,10 +836,11 @@ public void testExternalConfigurationConfigEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List envs = getContainer(dep).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -847,10 +862,11 @@ public void testExternalConfigurationSecretVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaMirrorMaker2Cluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -878,10 +894,11 @@ public void testExternalConfigurationConfigVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaMirrorMaker2Cluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(1)); @@ -910,10 +927,11 @@ public void testExternalConfigurationInvalidVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaMirrorMaker2Cluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -936,10 +954,11 @@ public void testNoExternalConfigurationVolumes() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List volumes = dep.getSpec().getTemplate().getSpec().getVolumes(); List selected = volumes.stream().filter(vol -> vol.getName().equals(KafkaMirrorMaker2Cluster.EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + "my-volume")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -966,10 +985,11 @@ public void testInvalidExternalConfigurationEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List envs = getContainer(dep).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -990,10 +1010,11 @@ public void testNoExternalConfigurationEnvs() { .endExternalConfiguration() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); List envs = getContainer(dep).getEnv(); List selected = envs.stream().filter(var -> var.getName().equals("MY_ENV_VAR")).collect(Collectors.toList()); assertThat(selected.size(), is(0)); @@ -1010,16 +1031,17 @@ public void testGracePeriod() { .endTemplate() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); } @ParallelTest public void testDefaultGracePeriod() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource).build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -1039,9 +1061,10 @@ public void testImagePullSecrets() { .endTemplate() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); @@ -1056,9 +1079,10 @@ public void testImagePullSecretsCO() { secrets.add(secret1); secrets.add(secret2); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(this.resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, secrets); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, secrets); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(true)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); @@ -1078,9 +1102,10 @@ public void testImagePullSecretsBoth() { .endTemplate() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, singletonList(secret1)); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, singletonList(secret1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret1), is(false)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().contains(secret2), is(true)); @@ -1089,9 +1114,10 @@ public void testImagePullSecretsBoth() { @ParallelTest public void testDefaultImagePullSecrets() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource).build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); } @@ -1106,9 +1132,10 @@ public void testSecurityContext() { .endTemplate() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext().getFsGroup(), is(Long.valueOf(123))); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext().getRunAsGroup(), is(Long.valueOf(456))); @@ -1118,9 +1145,10 @@ public void testSecurityContext() { @ParallelTest public void testDefaultSecurityContext() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource).build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); } @@ -1135,7 +1163,7 @@ public void testPodDisruptionBudget() { .endTemplate() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kmm2.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -1144,7 +1172,7 @@ public void testPodDisruptionBudget() { @ParallelTest public void testDefaultPodDisruptionBudget() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resource).build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = kmm2.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -1152,12 +1180,14 @@ public void testDefaultPodDisruptionBudget() { @ParallelTest public void testImagePullPolicy() { - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); + Deployment dep = kmm2.generateDeployment( + Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); assertThat(getContainer(dep).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); - dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, ImagePullPolicy.IFNOTPRESENT, null); + dep = kmm2.generateDeployment( + Collections.EMPTY_MAP, true, ImagePullPolicy.IFNOTPRESENT, null); assertThat(getContainer(dep).getImagePullPolicy(), is(ImagePullPolicy.IFNOTPRESENT.toString())); } @@ -1176,9 +1206,10 @@ public void testResources() { .withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build()) .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, null, null); + Deployment dep = kmm2.generateDeployment( + Collections.EMPTY_MAP, true, null, null); Container cont = getContainer(dep); assertThat(cont.getResources().getLimits(), is(limits)); assertThat(cont.getResources().getRequests(), is(requests)); @@ -1199,9 +1230,10 @@ public void testJvmOptions() { .endJvmOptions() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, null, null); + Deployment dep = kmm2.generateDeployment( + Collections.EMPTY_MAP, true, null, null); Container cont = getContainer(dep); assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-XX:+UseG1GC"), is(true)); assertThat(cont.getEnv().stream().filter(env -> "KAFKA_JVM_PERFORMANCE_OPTS".equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("-XX:MaxGCPauseMillis=20"), is(true)); @@ -1238,7 +1270,7 @@ public void testKafkaMirrorMaker2ContainerEnvVars() { .endSpec() .build(); - List kafkaEnvVars = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -1276,7 +1308,7 @@ public void testKafkaContainerEnvVarsConflict() { .endSpec() .build(); - List kafkaEnvVars = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -1294,9 +1326,10 @@ public void testTracing() { .endJaegerTracing() .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(Collections.EMPTY_MAP, true, null, null); + Deployment dep = kmm2.generateDeployment( + Collections.EMPTY_MAP, true, null, null); Container cont = getContainer(dep); assertThat(cont.getEnv().stream().filter(env -> KafkaMirrorMaker2Cluster.ENV_VAR_STRIMZI_TRACING.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").equals("jaeger"), is(true)); assertThat(cont.getEnv().stream().filter(env -> KafkaMirrorMaker2Cluster.ENV_VAR_KAFKA_CONNECT_CONFIGURATION.equals(env.getName())).map(EnvVar::getValue).findFirst().orElse("").contains("consumer.interceptor.classes=io.opentracing.contrib.kafka.TracingConsumerInterceptor"), is(true)); @@ -1320,8 +1353,9 @@ public void testGenerateDeploymentWithOAuthWithAccessToken() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); Container cont = getContainer(dep); assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMaker2Cluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1349,8 +1383,9 @@ public void testGenerateDeploymentWithOAuthWithRefreshToken() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); Container cont = getContainer(dep); assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMaker2Cluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1379,8 +1414,9 @@ public void testGenerateDeploymentWithOAuthWithClientSecret() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); Container cont = getContainer(dep); assertThat(cont.getEnv().stream().filter(var -> KafkaMirrorMaker2Cluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1406,7 +1442,7 @@ public void testGenerateDeploymentWithOAuthWithMissingClientSecret() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1429,7 +1465,7 @@ public void testGenerateDeploymentWithOAuthWithMissingUri() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1469,8 +1505,9 @@ public void testGenerateDeploymentWithOAuthWithTls() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); - Deployment dep = kmm2.generateDeployment(emptyMap(), true, null, null); + KafkaMirrorMaker2Cluster kmm2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); + Deployment dep = kmm2.generateDeployment( + emptyMap(), true, null, null); Container cont = getContainer(dep); assertThat(cont.getEnv().stream().filter(var -> KafkaConnectCluster.ENV_VAR_KAFKA_CONNECT_SASL_MECHANISM.equals(var.getName())).findFirst().orElse(null).getValue(), is("oauth")); @@ -1507,7 +1544,7 @@ public void testGenerateDeploymentWithOldVersion() { .endSpec() .build(); - KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1515,7 +1552,7 @@ public void testGenerateDeploymentWithOldVersion() { public void testNetworkPolicy() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resourceWithMetrics) .build(); - KafkaMirrorMaker2Cluster kc = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kc = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); kc.generateMetricsAndLogConfigMap(new MetricsAndLogging(metricsCM, null)); NetworkPolicy np = kc.generateNetworkPolicy(true, "operator-namespace", null); @@ -1539,7 +1576,7 @@ public void testNetworkPolicy() { public void testNetworkPolicyWithConnectorOperatorSameNamespace() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resourceWithMetrics) .build(); - KafkaMirrorMaker2Cluster kc = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kc = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); kc.generateMetricsAndLogConfigMap(new MetricsAndLogging(metricsCM, null)); NetworkPolicy np = kc.generateNetworkPolicy(true, namespace, null); @@ -1562,7 +1599,7 @@ public void testNetworkPolicyWithConnectorOperatorSameNamespace() { public void testNetworkPolicyWithConnectorOperatorWithNamespaceLabels() { KafkaMirrorMaker2 resource = new KafkaMirrorMaker2Builder(this.resourceWithMetrics) .build(); - KafkaMirrorMaker2Cluster kc = KafkaMirrorMaker2Cluster.fromCrd(resource, VERSIONS); + KafkaMirrorMaker2Cluster kc = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); kc.generateMetricsAndLogConfigMap(new MetricsAndLogging(metricsCM, null)); NetworkPolicy np = kc.generateNetworkPolicy(true, "operator-namespace", Labels.fromMap(Collections.singletonMap("nsLabelKey", "nsLabelValue"))); @@ -1595,7 +1632,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - KafkaMirrorMaker2Cluster kmm = KafkaMirrorMaker2Cluster.fromCrd(kafkaMirrorMaker2, VERSIONS); + KafkaMirrorMaker2Cluster kmm = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaMirrorMaker2, VERSIONS); assertThat(kmm.isMetricsEnabled(), is(true)); assertThat(kmm.getMetricsConfigInCm(), is(metrics)); @@ -1603,7 +1640,7 @@ public void testMetricsParsingFromConfigMap() { @ParallelTest public void testMetricsParsingNoMetrics() { - KafkaMirrorMaker2Cluster kmm = KafkaMirrorMaker2Cluster.fromCrd(this.resource, VERSIONS); + KafkaMirrorMaker2Cluster kmm = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); assertThat(kmm.isMetricsEnabled(), is(false)); assertThat(kmm.getMetricsConfigInCm(), is(nullValue())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java index 739e029a58..9de117d585 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerClusterTest.java @@ -44,6 +44,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.common.MetricsAndLogging; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.ParallelSuite; @@ -65,6 +66,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.junit.jupiter.api.Assertions.assertThrows; +@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling"}) @ParallelSuite public class KafkaMirrorMakerClusterTest { private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); @@ -122,7 +124,7 @@ public class KafkaMirrorMakerClusterTest { .endSpec() .build(); - private final KafkaMirrorMakerCluster mm = KafkaMirrorMakerCluster.fromCrd(resourceWithMetrics, VERSIONS); + private final KafkaMirrorMakerCluster mm = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resourceWithMetrics, VERSIONS); @ParallelTest public void testMetricsConfigMap() { @@ -194,10 +196,10 @@ public void testDefaultValues() { .withInclude(".*") .endSpec() .build(); - KafkaMirrorMakerCluster mm = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mm = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); assertThat(mm.image, is(KafkaVersionTestUtils.DEFAULT_KAFKA_MIRROR_MAKER_IMAGE)); - assertThat(new KafkaMirrorMakerConsumerConfiguration(mm.consumer.getConfig().entrySet()).getConfiguration(), is(defaultConsumerConfiguration)); - assertThat(new KafkaMirrorMakerProducerConfiguration(mm.producer.getConfig().entrySet()).getConfiguration(), is(defaultProducerConfiguration)); + assertThat(new KafkaMirrorMakerConsumerConfiguration(Reconciliation.DUMMY_RECONCILIATION, mm.consumer.getConfig().entrySet()).getConfiguration(), is(defaultConsumerConfiguration)); + assertThat(new KafkaMirrorMakerProducerConfiguration(Reconciliation.DUMMY_RECONCILIATION, mm.producer.getConfig().entrySet()).getConfiguration(), is(defaultProducerConfiguration)); } @ParallelTest @@ -219,7 +221,7 @@ public void testIncludeHandling() { .withWhitelist("alternative.*") .endSpec() .build(); - KafkaMirrorMakerCluster cluster = KafkaMirrorMakerCluster.fromCrd(both, VERSIONS); + KafkaMirrorMakerCluster cluster = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, both, VERSIONS); assertThat(cluster.getInclude(), is(include)); @@ -229,7 +231,7 @@ public void testIncludeHandling() { .withInclude(null) .endSpec() .build(); - cluster = KafkaMirrorMakerCluster.fromCrd(legacy, VERSIONS); + cluster = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, legacy, VERSIONS); assertThat(cluster.getInclude(), is("alternative.*")); @@ -240,7 +242,7 @@ public void testIncludeHandling() { .endSpec() .build(); - assertThrows(InvalidResourceException.class, () -> KafkaMirrorMakerCluster.fromCrd(none, VERSIONS)); + assertThrows(InvalidResourceException.class, () -> KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, none, VERSIONS)); } @ParallelTest @@ -290,7 +292,7 @@ public void testGenerateDeploymentWithTls() { .endProducer() .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("my-secret-p")); @@ -348,7 +350,7 @@ public void testGenerateDeploymentWithTlsAuth() { .endProducer() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(5).getName(), is("user-secret-c")); @@ -400,7 +402,7 @@ public void testGenerateDeploymentWithTlsSameSecret() { .endProducer() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().size(), is(4)); @@ -433,7 +435,7 @@ public void testGenerateDeploymentWithScramSha512Auth() { .endConsumer() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("producer-secret")); @@ -480,7 +482,7 @@ public void testGenerateDeploymentWithPlain() { .endConsumer() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getVolumes().get(2).getName(), is("producer-secret")); @@ -570,7 +572,7 @@ public void testTemplate() { .endTemplate() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); // Check Deployment Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); @@ -609,7 +611,7 @@ public void testGracePeriod() { .endTemplate() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); @@ -618,7 +620,7 @@ public void testGracePeriod() { @ParallelTest public void testDefaultGracePeriod() { KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource).build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -638,7 +640,7 @@ public void testImagePullSecrets() { .endTemplate() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -655,7 +657,7 @@ public void testImagePullSecretsFromCo() { secrets.add(secret1); secrets.add(secret2); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(this.resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, secrets); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -677,7 +679,7 @@ public void testImagePullSecretsFromBoth() { .endTemplate() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, singletonList(secret1)); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); @@ -688,7 +690,7 @@ public void testImagePullSecretsFromBoth() { @ParallelTest public void testDefaultImagePullSecrets() { KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource).build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); @@ -705,7 +707,7 @@ public void testSecurityContext() { .endTemplate() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -717,7 +719,7 @@ public void testSecurityContext() { @ParallelTest public void testDefaultSecurityContext() { KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource).build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); assertThat(dep.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); @@ -734,7 +736,7 @@ public void testPodDisruptionBudget() { .endTemplate() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = mmc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -743,7 +745,7 @@ public void testPodDisruptionBudget() { @ParallelTest public void testDefaultPodDisruptionBudget() { KafkaMirrorMaker resource = new KafkaMirrorMakerBuilder(this.resource).build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); PodDisruptionBudget pdb = mmc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -751,7 +753,7 @@ public void testDefaultPodDisruptionBudget() { @ParallelTest public void testImagePullPolicy() { - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(Collections.EMPTY_MAP, true, ImagePullPolicy.ALWAYS, null); assertThat(dep.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); @@ -775,7 +777,7 @@ public void testResources() { .withResources(new ResourceRequirementsBuilder().withLimits(limits).withRequests(requests).build()) .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -798,7 +800,7 @@ public void testJvmOptions() { .endJvmOptions() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -810,7 +812,7 @@ public void testJvmOptions() { @ParallelTest public void testDefaultProbes() { - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(this.resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); Deployment dep = mmc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -848,7 +850,7 @@ public void testConfiguredProbes() { .endReadinessProbe() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -900,7 +902,7 @@ public void testKafkaMMContainerEnvVars() { .endSpec() .build(); - List kafkaEnvVars = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to correctly set container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -938,7 +940,7 @@ public void testKafkaMMContainerEnvVarsConflict() { .endSpec() .build(); - List kafkaEnvVars = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS).getEnvVars(); + List kafkaEnvVars = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS).getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, kafkaEnvVars.stream().filter(env -> testEnvOneKey.equals(env.getName())) @@ -956,7 +958,7 @@ public void testTracing() { .endJaegerTracing() .endSpec() .build(); - KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster mmc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = mmc.generateDeployment(Collections.EMPTY_MAP, true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -981,7 +983,7 @@ public void testGenerateDeploymentWithConsumerOAuthWithAccessToken() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1009,7 +1011,7 @@ public void testGenerateDeploymentWithConsumerOAuthWithRefreshToken() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1038,7 +1040,7 @@ public void testGenerateDeploymentWithConsumerOAuthWithClientSecret() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1064,7 +1066,7 @@ public void testGenerateDeploymentWithConsumerOAuthWithMissingClientSecret() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1086,7 +1088,7 @@ public void testGenerateDeploymentWithConsumerOAuthWithMissingUri() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1125,7 +1127,7 @@ public void testGenerateDeploymentWithConsumerOAuthWithTls() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1170,7 +1172,7 @@ public void testGenerateDeploymentWithProducerOAuthWithAccessToken() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1198,7 +1200,7 @@ public void testGenerateDeploymentWithProducerOAuthWithRefreshToken() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1227,7 +1229,7 @@ public void testGenerateDeploymentWithProducerOAuthWithClientSecret() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1253,7 +1255,7 @@ public void testGenerateDeploymentWithProducerOAuthWithMissingClientSecret() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1275,7 +1277,7 @@ public void testGenerateDeploymentWithProducerOAuthWithMissingUri() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); }); } @@ -1314,7 +1316,7 @@ public void testGenerateDeploymentWithProducerOAuthWithTls() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1391,7 +1393,7 @@ public void testGenerateDeploymentWithBothSidedOAuthWithTls() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1474,7 +1476,7 @@ public void testGenerateDeploymentWithOAuthUsingOpaqueTokens() { .endSpec() .build(); - KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(resource, VERSIONS); + KafkaMirrorMakerCluster kc = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, resource, VERSIONS); Deployment dep = kc.generateDeployment(emptyMap(), true, null, null); Container cont = dep.getSpec().getTemplate().getSpec().getContainers().get(0); @@ -1505,7 +1507,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - KafkaMirrorMakerCluster kmm = KafkaMirrorMakerCluster.fromCrd(mirrorMaker, VERSIONS); + KafkaMirrorMakerCluster kmm = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mirrorMaker, VERSIONS); assertThat(kmm.isMetricsEnabled(), is(true)); assertThat(kmm.getMetricsConfigInCm(), is(metrics)); @@ -1513,7 +1515,7 @@ public void testMetricsParsingFromConfigMap() { @ParallelTest public void testMetricsParsingNoMetrics() { - KafkaMirrorMakerCluster kmm = KafkaMirrorMakerCluster.fromCrd(this.resource, VERSIONS); + KafkaMirrorMakerCluster kmm = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, this.resource, VERSIONS); assertThat(kmm.isMetricsEnabled(), is(false)); assertThat(kmm.getMetricsConfigInCm(), is(nullValue())); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ListenersValidatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ListenersValidatorTest.java index d571185a72..73716f006c 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ListenersValidatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ListenersValidatorTest.java @@ -21,6 +21,7 @@ import io.strimzi.api.kafka.model.template.ExternalTrafficPolicy; import io.strimzi.api.kafka.model.template.IpFamily; import io.strimzi.api.kafka.model.template.IpFamilyPolicy; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -55,7 +56,7 @@ public void testValidateOldListener() { .build(); List newListeners = ListenersConvertor.convertToNewFormat(oldListeners); - ListenersValidator.validate(3, newListeners); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, newListeners); } @ParallelTest @@ -73,7 +74,7 @@ public void testValidateNewListeners() { .build(); List listeners = asList(listener1, listener2); - ListenersValidator.validate(3, listeners); + ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners); } @ParallelTest @@ -92,7 +93,7 @@ public void testValidateThrowsException() { List listeners = asList(listener1, listener2); - Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners)); + Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners)); assertThat(exception.getMessage(), containsString("every listener needs to have a unique port number")); } @@ -677,7 +678,7 @@ public void testValidateOauth() { List listeners = asList(listener1); - Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners)); + Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners)); assertThat(exception.getMessage(), allOf( containsString("listener listener1: Introspection endpoint URI or JWKS endpoint URI has to be specified"), containsString("listener listener1: Valid Issuer URI has to be specified or 'checkIssuer' set to 'false'"))); @@ -699,7 +700,7 @@ public void testValidateBrokerCertChainAndKey() { List listeners = asList(listener1); - Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners)); + Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners)); assertThat(exception.getMessage(), allOf( containsString("listener 'listener1' cannot have an empty secret name in the brokerCertChainAndKey"), containsString("listener 'listener1' cannot have an empty key in the brokerCertChainAndKey"), @@ -778,7 +779,7 @@ public void testValidateOauthPlain() { List listeners = asList(listener); - Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners)); + Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners)); assertThat(exception.getMessage(), allOf( containsString("listener listener1: At least one of 'enablePlain', 'enableOauthBearer' has to be set to 'true'"))); @@ -788,7 +789,7 @@ public void testValidateOauthPlain() { .build(); List listeners2 = asList(listener); - exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners2)); + exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners2)); assertThat(exception.getMessage(), allOf( containsString("listener listener1: Introspection endpoint URI or JWKS endpoint URI has to be specified"))); @@ -798,7 +799,7 @@ public void testValidateOauthPlain() { .build(); List listeners3 = asList(listener); - assertDoesNotThrow(() -> ListenersValidator.validate(3, listeners3)); + assertDoesNotThrow(() -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners3)); } @ParallelTest @@ -817,7 +818,7 @@ public void testValidateAudienceOauth() { List listeners = asList(listener); - Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners)); + Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners)); assertThat(exception.getMessage(), allOf( containsString("listener listener1: 'clientId' has to be configured when 'checkAudience' is 'true'"))); @@ -828,7 +829,7 @@ public void testValidateAudienceOauth() { .build(); List listeners2 = asList(listener); - exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners2)); + exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners2)); assertThat(exception.getMessage(), allOf( not(containsString("listener listener1: 'clientId' has to be configured when 'checkAudience' is 'true'")))); } @@ -849,7 +850,7 @@ public void testValidateCustomClaimCheckOauth() { List listeners = asList(listener); - Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners)); + Exception exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners)); assertThat(exception.getMessage(), allOf( containsString("listener listener1: 'customClaimCheck' value not a valid JsonPath filter query - Failed to parse filter query: \"invalid\""))); @@ -860,7 +861,7 @@ public void testValidateCustomClaimCheckOauth() { .build(); List listeners2 = asList(listener); - exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(3, listeners2)); + exception = assertThrows(InvalidResourceException.class, () -> ListenersValidator.validate(Reconciliation.DUMMY_RECONCILIATION, 3, listeners2)); assertThat(exception.getMessage(), allOf( not(containsString("listener listener1: 'customClaimCheck' value not a valid JsonPath filter query - Failed to parse query: \"invalid\" at position: 0")))); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ModelUtilsTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ModelUtilsTest.java index 060e62b369..582dba5e38 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ModelUtilsTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ModelUtilsTest.java @@ -38,6 +38,7 @@ import io.strimzi.api.kafka.model.template.PodTemplate; import io.strimzi.api.kafka.model.template.PodTemplateBuilder; import io.strimzi.operator.cluster.KafkaVersionTestUtils; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -111,7 +112,7 @@ public void testParsePodDisruptionBudgetTemplate() { .withMaxUnavailable(2) .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parsePodDisruptionBudgetTemplate(model, template); assertThat(model.templatePodDisruptionBudgetLabels, is(Collections.singletonMap("labelKey", "labelValue"))); @@ -128,7 +129,7 @@ public void testParseNullPodDisruptionBudgetTemplate() { .endMetadata() .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parsePodDisruptionBudgetTemplate(model, null); assertThat(model.templatePodDisruptionBudgetLabels, is(nullValue())); @@ -181,7 +182,7 @@ public void testParsePodTemplate() { .withTolerations(tolerations) .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parsePodTemplate(model, template); assertThat(model.templatePodLabels, is(Collections.singletonMap("labelKey", "labelValue"))); @@ -207,7 +208,7 @@ public void testParseNullPodTemplate() { .endMetadata() .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parsePodTemplate(model, null); assertThat(model.templatePodLabels, is(nullValue())); @@ -234,7 +235,7 @@ public void testParseDeploymentTemplate() { .withDeploymentStrategy(DeploymentStrategy.RECREATE) .build(); - Model model = new Model(connect); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, connect); ModelUtils.parseDeploymentTemplate(model, template); assertThat(model.templateDeploymentLabels, is(Collections.singletonMap("labelKey", "labelValue"))); @@ -251,7 +252,7 @@ public void testParseNullDeploymentTemplate() { .endMetadata() .build(); - Model model = new Model(connect); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, connect); ModelUtils.parseDeploymentTemplate(model, null); assertThat(model.templateDeploymentAnnotations, is(nullValue())); @@ -277,7 +278,7 @@ public void testParseInternalServiceTemplate() { .withIpFamilies(IpFamily.IPV6, IpFamily.IPV4) .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parseInternalServiceTemplate(model, template); assertThat(model.templateServiceLabels, is(Collections.singletonMap("labelKey", "labelValue"))); @@ -295,7 +296,7 @@ public void testParseNullInternalServiceTemplate() { .endMetadata() .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parseInternalServiceTemplate(model, null); assertThat(model.templateServiceLabels, is(nullValue())); @@ -322,7 +323,7 @@ public void testParseInternalHeadlessServiceTemplate() { .withIpFamilies(IpFamily.IPV6, IpFamily.IPV4) .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parseInternalHeadlessServiceTemplate(model, template); assertThat(model.templateHeadlessServiceLabels, is(Collections.singletonMap("labelKey", "labelValue"))); @@ -340,7 +341,7 @@ public void testParseNullInternalHeadlessServiceTemplate() { .endMetadata() .build(); - Model model = new Model(kafka); + Model model = new Model(Reconciliation.DUMMY_RECONCILIATION, kafka); ModelUtils.parseInternalHeadlessServiceTemplate(model, null); assertThat(model.templateHeadlessServiceLabels, is(nullValue())); @@ -350,8 +351,8 @@ public void testParseNullInternalHeadlessServiceTemplate() { } private class Model extends AbstractModel { - public Model(HasMetadata resource) { - super(resource, "model-app"); + public Model(Reconciliation reconciliation, HasMetadata resource) { + super(reconciliation, resource, "model-app"); } @Override @@ -510,7 +511,7 @@ public void testEmptyTolerations() { .endSpec() .build(); - KafkaCluster model1 = KafkaCluster.fromCrd(kafka, KafkaVersionTestUtils.getKafkaVersionLookup()); + KafkaCluster model1 = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup()); /*AbstractModel model1 = new AbstractModel(kafka, "test") { @Override protected String getDefaultLogConfigFileName() { @@ -524,7 +525,7 @@ protected List getContainers(ImagePullPolicy imagePullPolicy) { };*/ ModelUtils.parsePodTemplate(model1, pt1); - KafkaCluster model2 = KafkaCluster.fromCrd(kafka, KafkaVersionTestUtils.getKafkaVersionLookup()); + KafkaCluster model2 = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, KafkaVersionTestUtils.getKafkaVersionLookup()); /*AbstractModel model2 = new AbstractModel(kafka, "test") { @Override protected String getDefaultLogConfigFileName() { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/StorageDiffTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/StorageDiffTest.java index f8c33948a8..43afe88fb2 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/StorageDiffTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/StorageDiffTest.java @@ -9,6 +9,7 @@ import io.strimzi.api.kafka.model.storage.PersistentClaimStorageBuilder; import io.strimzi.api.kafka.model.storage.PersistentClaimStorageOverrideBuilder; import io.strimzi.api.kafka.model.storage.Storage; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -29,13 +30,13 @@ public void testJbodDiff() { new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(true).withId(1).withSize("1000Gi").build()) .build(); - StorageDiff diff = new StorageDiff(jbod, jbod, 3, 3); + StorageDiff diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(false)); - diff = new StorageDiff(jbod, jbod2, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod2, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(false)); assertThat(diff.shrinkSize(), is(false)); @@ -47,13 +48,13 @@ public void testPersistentDiff() { Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(); Storage persistent2 = new PersistentClaimStorageBuilder().withStorageClass("gp2-st1").withDeleteClaim(false).withId(0).withSize("1000Gi").build(); - assertThat(new StorageDiff(persistent, persistent, 3, 3).changesType(), is(false)); - assertThat(new StorageDiff(persistent, persistent, 3, 3).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent, persistent, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).changesType(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).changesType(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).changesType(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).shrinkSize(), is(false)); } @ParallelTest @@ -85,17 +86,17 @@ public void testPersistentDiffWithOverrides() { .build()) .build(); - assertThat(new StorageDiff(persistent, persistent, 3, 3).changesType(), is(false)); - assertThat(new StorageDiff(persistent, persistent, 3, 3).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent, persistent, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).changesType(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).changesType(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).changesType(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(persistent2, persistent3, 3, 3).changesType(), is(false)); - assertThat(new StorageDiff(persistent2, persistent3, 3, 3).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent2, persistent3, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 3, 3).changesType(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 3, 3).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 3, 3).shrinkSize(), is(false)); } @ParallelTest @@ -104,18 +105,18 @@ public void testSizeChanges() { Storage persistent2 = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("1000Gi").build(); Storage persistent3 = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("10Gi").build(); - assertThat(new StorageDiff(persistent, persistent, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(persistent, persistent3, 3, 3).shrinkSize(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent3, 3, 3).shrinkSize(), is(true)); } @ParallelTest public void testEphemeralDiff() { Storage ephemeral = new EphemeralStorageBuilder().build(); - assertThat(new StorageDiff(ephemeral, ephemeral, 3, 3).changesType(), is(false)); - assertThat(new StorageDiff(ephemeral, ephemeral, 3, 3).isEmpty(), is(true)); - assertThat(new StorageDiff(ephemeral, ephemeral, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, ephemeral, ephemeral, 3, 3).changesType(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, ephemeral, ephemeral, 3, 3).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, ephemeral, ephemeral, 3, 3).shrinkSize(), is(false)); } @ParallelTest @@ -129,9 +130,9 @@ public void testCrossDiff() { Storage persistent = new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(false).withId(0).withSize("100Gi").build(); - StorageDiff diffJbodEphemeral = new StorageDiff(jbod, ephemeral, 3, 3); - StorageDiff diffPersistentEphemeral = new StorageDiff(persistent, ephemeral, 3, 3); - StorageDiff diffJbodPersistent = new StorageDiff(jbod, persistent, 3, 3); + StorageDiff diffJbodEphemeral = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, ephemeral, 3, 3); + StorageDiff diffPersistentEphemeral = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, ephemeral, 3, 3); + StorageDiff diffJbodPersistent = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, persistent, 3, 3); assertThat(diffJbodEphemeral.changesType(), is(true)); assertThat(diffPersistentEphemeral.changesType(), is(true)); @@ -176,63 +177,63 @@ public void testJbodDiffWithNewVolume() { .build(); // Volume added - StorageDiff diff = new StorageDiff(jbod, jbod2, 3, 3); + StorageDiff diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod2, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // Volume removed - diff = new StorageDiff(jbod2, jbod, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod2, jbod, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // Volume added with changes - diff = new StorageDiff(jbod, jbod3, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod3, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(false)); assertThat(diff.shrinkSize(), is(true)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // No volume added, but with changes - diff = new StorageDiff(jbod2, jbod3, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod2, jbod3, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(false)); assertThat(diff.shrinkSize(), is(true)); assertThat(diff.isVolumesAddedOrRemoved(), is(false)); // Volume removed from the beginning - diff = new StorageDiff(jbod3, jbod5, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod3, jbod5, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // Volume added to the beginning - diff = new StorageDiff(jbod5, jbod3, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod5, jbod3, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // Volume replaced with another ID and another volume which is kept changed - diff = new StorageDiff(jbod3, jbod6, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod3, jbod6, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(false)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // Volume replaced with another ID in single volume broker - diff = new StorageDiff(jbod, jbod4, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod4, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); assertThat(diff.isVolumesAddedOrRemoved(), is(true)); // Volume replaced with another ID without chenging the volumes which are kept - diff = new StorageDiff(jbod2, jbod6, 3, 3); + diff = new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod2, jbod6, 3, 3); assertThat(diff.changesType(), is(false)); assertThat(diff.isEmpty(), is(true)); assertThat(diff.shrinkSize(), is(false)); @@ -256,9 +257,9 @@ public void testSizeChangesInJbod() { new PersistentClaimStorageBuilder().withStorageClass("gp2-ssd").withDeleteClaim(true).withId(1).withSize("500Gi").build()) .build(); - assertThat(new StorageDiff(jbod, jbod, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(jbod, jbod2, 3, 3).shrinkSize(), is(false)); - assertThat(new StorageDiff(jbod, jbod3, 3, 3).shrinkSize(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod2, 3, 3).shrinkSize(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, jbod, jbod3, 3, 3).shrinkSize(), is(true)); } @ParallelTest @@ -296,20 +297,20 @@ public void testPersistentDiffWithOverridesChangesToExistingOverrides() { .build(); // Test no changes when the diff is the same - assertThat(new StorageDiff(persistent, persistent, 2, 2).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent2, persistent2, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent2, 2, 2).isEmpty(), is(true)); // Override changed for node which does not exist => is allowed - assertThat(new StorageDiff(persistent, persistent2, 1, 1).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 1, 1).isEmpty(), is(true)); // Override changed for node which is being scaled up => is allowed - assertThat(new StorageDiff(persistent, persistent2, 1, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 1, 2).isEmpty(), is(true)); // Override changed for existing node => is not allowed - assertThat(new StorageDiff(persistent, persistent2, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 2, 2).isEmpty(), is(false)); // Override changed for node being scaled down => is allowed - assertThat(new StorageDiff(persistent, persistent2, 2, 1).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 2, 1).isEmpty(), is(true)); } @ParallelTest @@ -350,23 +351,23 @@ public void testPersistentDiffWithOverridesBeingAdded() { .build(); // Test no changes - assertThat(new StorageDiff(persistent, persistent, 2, 2).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent2, persistent2, 2, 2).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent3, persistent3, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent2, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent3, 2, 2).isEmpty(), is(true)); // Overrides added for existing nodes => not allowed - assertThat(new StorageDiff(persistent, persistent2, 2, 2).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent, persistent3, 2, 2).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent2, persistent3, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent3, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 2, 2).isEmpty(), is(false)); // Overrides added for new nodes => allowed - assertThat(new StorageDiff(persistent2, persistent3, 1, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 1, 2).isEmpty(), is(true)); // Overrides added for removed nodes => allowed - assertThat(new StorageDiff(persistent2, persistent3, 2, 1).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 2, 1).isEmpty(), is(true)); // Overrides added for non-existing nodes => allowed - assertThat(new StorageDiff(persistent2, persistent3, 1, 1).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent3, 1, 1).isEmpty(), is(true)); } @ParallelTest @@ -407,23 +408,23 @@ public void testPersistentDiffWithOverridesBeingRemoved() { .build(); // Test no changes - assertThat(new StorageDiff(persistent, persistent, 2, 2).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent2, persistent2, 2, 2).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent3, persistent3, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent2, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent3, 2, 2).isEmpty(), is(true)); // Overrides removed for existing nodes => not allowed - assertThat(new StorageDiff(persistent3, persistent, 2, 2).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent3, persistent2, 2, 2).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent2, persistent, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent2, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent, 2, 2).isEmpty(), is(false)); // Overrides removed for new nodes => allowed - assertThat(new StorageDiff(persistent3, persistent2, 1, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent2, 1, 2).isEmpty(), is(true)); // Overrides removed for removed nodes => allowed - assertThat(new StorageDiff(persistent3, persistent2, 2, 1).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent2, 2, 1).isEmpty(), is(true)); // Overrides removed for non-existing nodes => allowed - assertThat(new StorageDiff(persistent3, persistent2, 1, 1).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent3, persistent2, 1, 1).isEmpty(), is(true)); } @ParallelTest @@ -453,17 +454,17 @@ public void testPersistentDiffWithOverridesBeingAddedAndRemoved() { .build(); // Test no changes - assertThat(new StorageDiff(persistent, persistent, 2, 2).isEmpty(), is(true)); - assertThat(new StorageDiff(persistent2, persistent2, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent, 2, 2).isEmpty(), is(true)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent2, 2, 2).isEmpty(), is(true)); // Overrides added and removed for existing nodes => not allowed - assertThat(new StorageDiff(persistent2, persistent, 2, 2).isEmpty(), is(false)); - assertThat(new StorageDiff(persistent, persistent2, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent, 2, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 2, 2).isEmpty(), is(false)); // Overrides added for new nodes but removed for old => not allowed - assertThat(new StorageDiff(persistent, persistent2, 1, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent, persistent2, 1, 2).isEmpty(), is(false)); // Overrides removed for new nodes but added for old => not allowed - assertThat(new StorageDiff(persistent2, persistent, 1, 2).isEmpty(), is(false)); + assertThat(new StorageDiff(Reconciliation.DUMMY_RECONCILIATION, persistent2, persistent, 1, 2).isEmpty(), is(false)); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java index a95d76de4f..7a56761586 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/model/ZookeeperClusterTest.java @@ -53,6 +53,7 @@ import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.common.MetricsAndLogging; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.test.TestUtils; @@ -117,7 +118,7 @@ public class ZookeeperClusterTest { private final Kafka ka = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, zooConfigurationJson, null, null, kafkaLogConfigJson, zooLogConfigJson, null, null); - private final ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + private final ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); @ParallelTest public void testMetricsConfigMap() { @@ -171,7 +172,7 @@ public void testGenerateServiceWithoutMetrics() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); Service headful = zc.generateService(); assertThat(headful.getSpec().getType(), is("ClusterIP")); @@ -230,7 +231,7 @@ public void testGenerateStatefulSetWithPodManagementPolicy() { .endTemplate() .endZookeeper() .endSpec().build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(editZooAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, editZooAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getPodManagementPolicy(), is(PodManagementPolicy.ORDERED_READY.toValue())); } @@ -250,7 +251,7 @@ public void testInvalidVersion() { .endSpec() .build(); - ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); }); } @@ -314,7 +315,7 @@ public void testPvcNames() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); PersistentVolumeClaim pvc = zc.getVolumeClaims().get(0); @@ -326,13 +327,13 @@ public void testPvcNames() { @ParallelTest public void withAffinity() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, ZookeeperCluster::fromCrd, this.getClass().getSimpleName() + ".withAffinity"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly, versions) -> ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, versions), this.getClass().getSimpleName() + ".withAffinity"); resourceTester.assertDesiredResource("-STS.yaml", zc -> zc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getAffinity()); } @ParallelTest public void withTolerations() throws IOException { - ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, ZookeeperCluster::fromCrd, this.getClass().getSimpleName() + ".withTolerations"); + ResourceTester resourceTester = new ResourceTester<>(Kafka.class, VERSIONS, (kafkaAssembly, versions) -> ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, versions), this.getClass().getSimpleName() + ".withTolerations"); resourceTester.assertDesiredResource("-STS.yaml", zc -> zc.generateStatefulSet(true, null, null).getSpec().getTemplate().getSpec().getTolerations()); } @@ -342,7 +343,7 @@ public void checkOwnerReference(OwnerReference ownerRef, HasMetadata resource) } private Secret generateNodeSecret() { - ClusterCa clusterCa = new ClusterCa(new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, null, null); + ClusterCa clusterCa = new ClusterCa(Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, null, null); clusterCa.createRenewOrReplace(namespace, cluster, emptyMap(), emptyMap(), emptyMap(), null, true); zc.generateCertificates(ka, clusterCa, true); @@ -473,7 +474,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check StatefulSet StatefulSet sts = zc.generateStatefulSet(true, null, null); @@ -528,7 +529,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(123))); @@ -539,7 +540,7 @@ public void testDefaultGracePeriod() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap())) .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getTerminationGracePeriodSeconds(), is(Long.valueOf(30))); @@ -562,7 +563,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -581,7 +582,7 @@ public void testImagePullSecretsFromCO() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap()); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, secrets); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(2)); @@ -606,7 +607,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, singletonList(secret1)); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets().size(), is(1)); @@ -619,7 +620,7 @@ public void testDefaultImagePullSecrets() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap())) .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getImagePullSecrets(), is(nullValue())); @@ -639,7 +640,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getSecurityContext(), is(notNullValue())); @@ -653,7 +654,7 @@ public void testDefaultSecurityContext() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap())) .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getSecurityContext(), is(nullValue())); @@ -673,7 +674,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); PodDisruptionBudget pdb = zc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(2))); @@ -684,7 +685,7 @@ public void testDefaultPodDisruptionBudget() { Kafka kafkaAssembly = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap())) .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); PodDisruptionBudget pdb = zc.generatePodDisruptionBudget(); assertThat(pdb.getSpec().getMaxUnavailable(), is(new IntOrString(1))); @@ -695,7 +696,7 @@ public void testImagePullPolicy() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap()); kafkaAssembly.getSpec().getKafka().setRack(new RackBuilder().withTopologyKey("topology-key").build()); - ZookeeperCluster kc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster kc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(true, ImagePullPolicy.ALWAYS, null); assertThat(sts.getSpec().getTemplate().getSpec().getContainers().get(0).getImagePullPolicy(), is(ImagePullPolicy.ALWAYS.toString())); @@ -709,7 +710,7 @@ public void testNetworkPolicyNewKubernetesVersions() { Kafka kafkaAssembly = ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, emptyMap()); kafkaAssembly.getSpec().getKafka().setRack(new RackBuilder().withTopologyKey("topology-key").build()); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); // Check Network Policies => Other namespace NetworkPolicy np = zc.generateNetworkPolicy("operator-namespace", null); @@ -795,7 +796,7 @@ public void testGeneratePersistentVolumeClaimsPersistentWithClaimDeletion() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); // Check Storage annotation on STS assertThat(zc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -824,7 +825,7 @@ public void testGeneratePersistentVolumeClaimsPersistentWithoutClaimDeletion() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); // Check Storage annotation on STS assertThat(zc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -861,7 +862,7 @@ public void testGeneratePersistentVolumeClaimsPersistentWithOverride() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); // Check Storage annotation on STS assertThat(zc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -911,7 +912,7 @@ public void testGeneratePersistentVolumeClaimsWithTemplate() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); // Check PVCs List pvcs = zc.generatePersistentVolumeClaims(); @@ -934,7 +935,7 @@ public void testGeneratePersistentVolumeClaimsEphemeral() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); // Check Storage annotation on STS assertThat(zc.generateStatefulSet(true, ImagePullPolicy.NEVER, null).getMetadata().getAnnotations().get(AbstractModel.ANNO_STRIMZI_IO_STORAGE), @@ -955,7 +956,7 @@ public void testGenerateSTSWithPersistentVolumeEphemeral() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); StatefulSet sts = zc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(nullValue())); @@ -971,7 +972,7 @@ public void testGenerateSTSWithPersistentVolumeEphemeralWithSizeLimit() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS); StatefulSet sts = zc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getVolumes().get(0).getEmptyDir().getSizeLimit(), is(new Quantity("1", "Gi"))); @@ -991,7 +992,7 @@ public void testStorageReverting() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ka, VERSIONS, persistent, replicas); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, persistent, replicas); assertThat(zc.getStorage(), is(persistent)); ka = new KafkaBuilder(ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJson, zooConfigurationJson)) @@ -1001,7 +1002,7 @@ public void testStorageReverting() { .endZookeeper() .endSpec() .build(); - zc = ZookeeperCluster.fromCrd(ka, VERSIONS, ephemeral, replicas); + zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ka, VERSIONS, ephemeral, replicas); // Storage is reverted assertThat(zc.getStorage(), is(ephemeral)); @@ -1026,7 +1027,7 @@ public void testStorageValidationAfterInitialDeployment() { .endZookeeper() .endSpec() .build(); - ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS, oldStorage, replicas); + ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS, oldStorage, replicas); }); } @@ -1062,7 +1063,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); List zkEnvVars = zc.getEnvVars(); @@ -1106,7 +1107,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); List zkEnvVars = zc.getEnvVars(); assertThat("Failed to prevent over writing existing container environment variable: " + testEnvOneKey, @@ -1144,7 +1145,7 @@ image, healthDelay, healthTimeout, metricsCm, jmxMetricsConfig, configurationJso .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); StatefulSet sts = zc.generateStatefulSet(false, null, null); assertThat(sts.getSpec().getTemplate().getSpec().getContainers(), @@ -1171,7 +1172,7 @@ public void testMetricsParsingFromConfigMap() { .endSpec() .build(); - ZookeeperCluster zc = ZookeeperCluster.fromCrd(kafkaAssembly, VERSIONS); + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafkaAssembly, VERSIONS); assertThat(zc.isMetricsEnabled(), is(true)); assertThat(zc.getMetricsConfigInCm(), is(metrics)); @@ -1179,7 +1180,7 @@ public void testMetricsParsingFromConfigMap() { @ParallelTest public void testMetricsParsingNoMetrics() { - ZookeeperCluster zc = ZookeeperCluster.fromCrd(ResourceUtils.createKafka(namespace, cluster, replicas, + ZookeeperCluster zc = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.createKafka(namespace, cluster, replicas, image, healthDelay, healthTimeout), VERSIONS); assertThat(zc.isMetricsEnabled(), is(false)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CertificateRenewalTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CertificateRenewalTest.java index 6660c9d49a..45bd4db995 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CertificateRenewalTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/CertificateRenewalTest.java @@ -147,10 +147,10 @@ private Future> reconcileCa(VertxTestContext context, Kaf }).collect(Collectors.toList()); }); ArgumentCaptor c = ArgumentCaptor.forClass(Secret.class); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), c.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(0)))); KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(1L)); @@ -1215,7 +1215,7 @@ public void testRenewalOfDeploymentCertificatesWithNullSecret() throws IOExcepti OwnerReference ownerReference = new OwnerReference(); boolean isMaintenanceTimeWindowsSatisfied = true; - Secret newSecret = ModelUtils.buildSecret(clusterCaMock, null, namespace, secretName, commonName, + Secret newSecret = ModelUtils.buildSecret(Reconciliation.DUMMY_RECONCILIATION, clusterCaMock, null, namespace, secretName, commonName, keyCertName, labels, ownerReference, isMaintenanceTimeWindowsSatisfied); assertThat(newSecret.getData(), hasEntry("deployment.crt", newCertAndKey.certAsBase64String())); @@ -1249,7 +1249,7 @@ public void testRenewalOfDeploymentCertificatesWithRenewingCa() throws IOExcepti OwnerReference ownerReference = new OwnerReference(); boolean isMaintenanceTimeWindowsSatisfied = true; - Secret newSecret = ModelUtils.buildSecret(clusterCaMock, initialSecret, namespace, secretName, commonName, + Secret newSecret = ModelUtils.buildSecret(Reconciliation.DUMMY_RECONCILIATION, clusterCaMock, initialSecret, namespace, secretName, commonName, keyCertName, labels, ownerReference, isMaintenanceTimeWindowsSatisfied); assertThat(newSecret.getData(), hasEntry("deployment.crt", newCertAndKey.certAsBase64String())); @@ -1283,7 +1283,7 @@ public void testRenewalOfDeploymentCertificatesDelayedRenewal() throws IOExcepti OwnerReference ownerReference = new OwnerReference(); boolean isMaintenanceTimeWindowsSatisfied = true; - Secret newSecret = ModelUtils.buildSecret(clusterCaMock, initialSecret, namespace, secretName, commonName, + Secret newSecret = ModelUtils.buildSecret(Reconciliation.DUMMY_RECONCILIATION, clusterCaMock, initialSecret, namespace, secretName, commonName, keyCertName, labels, ownerReference, isMaintenanceTimeWindowsSatisfied); assertThat(newSecret.getData(), hasEntry("deployment.crt", newCertAndKey.certAsBase64String())); @@ -1317,7 +1317,7 @@ public void testRenewalOfDeploymentCertificatesDelayedRenewalOutsideOfMaintenanc OwnerReference ownerReference = new OwnerReference(); boolean isMaintenanceTimeWindowsSatisfied = false; - Secret newSecret = ModelUtils.buildSecret(clusterCaMock, initialSecret, namespace, secretName, commonName, + Secret newSecret = ModelUtils.buildSecret(Reconciliation.DUMMY_RECONCILIATION, clusterCaMock, initialSecret, namespace, secretName, commonName, keyCertName, labels, ownerReference, isMaintenanceTimeWindowsSatisfied); assertThat(newSecret.getData(), hasEntry("deployment.crt", Base64.getEncoder().encodeToString("old-cert".getBytes()))); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java index 7b3a900c4d..c55cd80fca 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/ConnectorMockTest.java @@ -47,8 +47,8 @@ import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; -import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -87,7 +87,7 @@ @ExtendWith(VertxExtension.class) public class ConnectorMockTest { - private static final Logger log = LogManager.getLogger(ConnectorMockTest.class.getName()); + private static final Logger LOGGER = LogManager.getLogger(ConnectorMockTest.class.getName()); private static final String NAMESPACE = "ns"; @@ -199,7 +199,7 @@ private void setupMockConnectAPI() { .map(s -> s.substring(matchingKeyPrefix.length())) .collect(Collectors.toList())); }); - when(api.listConnectorPlugins(any(), anyInt())).thenAnswer(i -> { + when(api.listConnectorPlugins(any(), any(), anyInt())).thenAnswer(i -> { ConnectorPlugin connectorPlugin = new ConnectorPluginBuilder() .withConnectorClass("io.strimzi.MyClass") .withType("sink") @@ -207,10 +207,10 @@ private void setupMockConnectAPI() { .build(); return Future.succeededFuture(Collections.singletonList(connectorPlugin)); }); - when(api.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); - when(api.getConnectorConfig(any(), any(), anyInt(), any())).thenAnswer(invocation -> { - String host = invocation.getArgument(1); - String connectorName = invocation.getArgument(3); + when(api.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(api.getConnectorConfig(any(), any(), any(), anyInt(), any())).thenAnswer(invocation -> { + String host = invocation.getArgument(2); + String connectorName = invocation.getArgument(4); ConnectorState connectorState = runningConnectors.get(key(host, connectorName)); if (connectorState != null) { Map map = new HashMap<>(); @@ -225,9 +225,9 @@ private void setupMockConnectAPI() { return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s/config", connectorName), 404, "Not Found", "")); } }); - when(api.getConnector(any(), anyInt(), any())).thenAnswer(invocation -> { - String host = invocation.getArgument(0); - String connectorName = invocation.getArgument(2); + when(api.getConnector(any(), any(), anyInt(), any())).thenAnswer(invocation -> { + String host = invocation.getArgument(1); + String connectorName = invocation.getArgument(3); ConnectorState connectorState = runningConnectors.get(key(host, connectorName)); if (connectorState == null) { return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s", connectorName), 404, "Not Found", "")); @@ -237,32 +237,32 @@ private void setupMockConnectAPI() { "config", connectorState.config, "tasks", emptyMap())); }); - when(api.createOrUpdatePutRequest(any(), anyInt(), anyString(), any())).thenAnswer(invocation -> { - log.info((String) invocation.getArgument(0) + invocation.getArgument(1) + invocation.getArgument(2) + invocation.getArgument(3)); - String host = invocation.getArgument(0); - log.info("###### create " + host); - String connectorName = invocation.getArgument(2); - JsonObject connectorConfig = invocation.getArgument(3); + when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenAnswer(invocation -> { + LOGGER.info((String) invocation.getArgument(1) + invocation.getArgument(2) + invocation.getArgument(3) + invocation.getArgument(4)); + String host = invocation.getArgument(1); + LOGGER.info("###### create " + host); + String connectorName = invocation.getArgument(3); + JsonObject connectorConfig = invocation.getArgument(4); runningConnectors.putIfAbsent(key(host, connectorName), new ConnectorState(false, connectorConfig)); return Future.succeededFuture(); }); - when(api.delete(any(), anyInt(), anyString())).thenAnswer(invocation -> { - String host = invocation.getArgument(0); - log.info("###### delete " + host); - String connectorName = invocation.getArgument(2); + when(api.delete(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> { + String host = invocation.getArgument(1); + LOGGER.info("###### delete " + host); + String connectorName = invocation.getArgument(3); ConnectorState remove = runningConnectors.remove(key(host, connectorName)); return remove != null ? Future.succeededFuture() : Future.failedFuture("No such connector " + connectorName); }); - when(api.statusWithBackOff(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> { - String host = invocation.getArgument(1); - log.info("###### status " + host); - String connectorName = invocation.getArgument(3); + when(api.statusWithBackOff(any(), any(), any(), anyInt(), anyString())).thenAnswer(invocation -> { + String host = invocation.getArgument(2); + LOGGER.info("###### status " + host); + String connectorName = invocation.getArgument(4); return kafkaConnectApiStatusMock(host, connectorName); }); - when(api.status(any(), anyInt(), anyString())).thenAnswer(invocation -> { - String host = invocation.getArgument(0); - log.info("###### status " + host); - String connectorName = invocation.getArgument(2); + when(api.status(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> { + String host = invocation.getArgument(1); + LOGGER.info("###### status " + host); + String connectorName = invocation.getArgument(3); return kafkaConnectApiStatusMock(host, connectorName); }); when(api.pause(any(), anyInt(), anyString())).thenAnswer(invocation -> { @@ -307,9 +307,9 @@ private void setupMockConnectAPI() { } return Future.succeededFuture(); }); - when(api.getConnectorTopics(any(), anyInt(), anyString())).thenAnswer(invocation -> { - String host = invocation.getArgument(0); - String connectorName = invocation.getArgument(2); + when(api.getConnectorTopics(any(), any(), anyInt(), anyString())).thenAnswer(invocation -> { + String host = invocation.getArgument(1); + String connectorName = invocation.getArgument(3); ConnectorState connectorState = runningConnectors.get(key(host, connectorName)); if (connectorState == null) { return Future.failedFuture(new ConnectRestException("GET", String.format("/connectors/%s/topics", connectorName), 404, "Not Found", "")); @@ -577,7 +577,7 @@ public void testConnectConnectorConnectorConnect() { verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -598,7 +598,7 @@ public void testConnectConnectorConnectorConnect() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -608,7 +608,7 @@ public void testConnectConnectorConnectorConnect() { waitFor("delete call on connect REST api", 1_000, 30_000, () -> runningConnectors.isEmpty()); // Verify connector is deleted from the connect via REST api - verify(api).delete( + verify(api).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName)); @@ -638,7 +638,7 @@ public void testConnectorConnectConnectorConnect() { verify(api, never()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(empty())); @@ -660,7 +660,7 @@ public void testConnectorConnectConnectorConnect() { verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); // triggered three times (Connect creation, Connector Status update, Connect Status update) - verify(api, times(3)).createOrUpdatePutRequest( + verify(api, times(3)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -669,7 +669,7 @@ public void testConnectorConnectConnectorConnect() { assertThat(connectorDeleted, is(true)); waitFor("delete call on connect REST api", 1_000, 30_000, () -> runningConnectors.isEmpty()); - verify(api).delete( + verify(api).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName)); @@ -700,7 +700,7 @@ public void testConnectConnectorConnectConnector() { // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -723,7 +723,7 @@ public void testConnectConnectorConnectConnector() { verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); // triggered twice (Connect creation, Connector Status update) - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -736,7 +736,7 @@ public void testConnectConnectorConnectConnector() { boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete(); assertThat(connectorDeleted, is(true)); - verify(api, never()).delete( + verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName)); } @@ -763,7 +763,7 @@ public void testConnectorConnectConnectConnector() { verify(api, never()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(empty())); @@ -787,7 +787,7 @@ public void testConnectorConnectConnectConnector() { verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); // triggered at least two times (Connect creation, Connector Status update) - verify(api, atLeast(2)).createOrUpdatePutRequest( + verify(api, atLeast(2)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -800,7 +800,7 @@ public void testConnectorConnectConnectConnector() { boolean connectorDeleted = Crds.kafkaConnectorOperation(client).inNamespace(NAMESPACE).withName(connectorName).delete(); assertThat(connectorDeleted, is(true)); // Verify the connector was never deleted from connect as the cluster was deleted first - verify(api, never()).delete( + verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName)); } @@ -858,11 +858,11 @@ public void testChangeStrimziClusterLabel(VertxTestContext context) throws Inter waitForConnectorReady(connectorName); // triggered twice (Connect creation, Connector Status update) for the first cluster - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); // never triggered for the second cluster as connector's Strimzi cluster label does not match cluster 2 - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -882,10 +882,10 @@ public void testChangeStrimziClusterLabel(VertxTestContext context) throws Inter // Note: The connector does not get deleted immediately from the first cluster, only on the next timed reconciliation - verify(api, never()).delete( + verify(api, never()).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName)); - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(newConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -893,7 +893,7 @@ public void testChangeStrimziClusterLabel(VertxTestContext context) throws Inter Checkpoint async = context.checkpoint(); kafkaConnectOperator.reconcile(new Reconciliation("test", "KafkaConnect", NAMESPACE, oldConnectClusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(api, times(1)).delete( + verify(api, times(1)).delete(any(), eq(KafkaConnectResources.qualifiedServiceName(oldConnectClusterName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName)); async.flag(); @@ -906,7 +906,7 @@ public void testConnectorNotReadyWhenExceptionFromConnectRestApi() { String connectName = "cluster"; String connectorName = "connector"; - when(api.createOrUpdatePutRequest(any(), anyInt(), anyString(), any())) + when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())) .thenAnswer(invocation -> Future.failedFuture(new ConnectRestException("GET", "/foo", 500, "Internal server error", "Bad stuff happened"))); // NOTE: Clear runningConnectors as re-mocking it causes an entry to be added runningConnectors.clear(); @@ -929,7 +929,7 @@ public void testConnectorNotReadyWhenExceptionFromConnectRestApi() { // triggered atleast once (Connect creation) verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -949,7 +949,7 @@ public void testConnectorNotReadyWhenExceptionFromConnectRestApi() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(2)).createOrUpdatePutRequest( + verify(api, times(2)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(empty())); @@ -978,7 +978,7 @@ public void testConnectorPauseResume() { // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -1000,7 +1000,7 @@ public void testConnectorPauseResume() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -1065,8 +1065,8 @@ public void testConnectorRestart() { // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, never()).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); // Create KafkaConnector and wait till it's ready @@ -1086,8 +1086,8 @@ public void testConnectorRestart() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, times(1)).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -1142,8 +1142,8 @@ public void testConnectorRestartFail() { // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, never()).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); // Create KafkaConnector and wait till it's ready @@ -1163,8 +1163,8 @@ public void testConnectorRestartFail() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, times(1)).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -1217,8 +1217,8 @@ public void testConnectorRestartTask() { // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, never()).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); // Create KafkaConnector and wait till it's ready @@ -1238,8 +1238,8 @@ public void testConnectorRestartTask() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, times(1)).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -1293,8 +1293,8 @@ public void testConnectorRestartTaskFail() { // could be triggered twice (creation followed by status update) but waitForConnectReady could be satisfied with single verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, never()).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); // Create KafkaConnector and wait till it's ready @@ -1314,8 +1314,8 @@ public void testConnectorRestartTaskFail() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( - eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), + verify(api, times(1)).createOrUpdatePutRequest(any(), + eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); @@ -1369,7 +1369,7 @@ public void testConnectScaleToZero() { verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -1390,16 +1390,16 @@ public void testConnectScaleToZero() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); when(api.list(any(), anyInt())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.listConnectorPlugins(any(), anyInt())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.createOrUpdatePutRequest(any(), anyInt(), anyString(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.listConnectorPlugins(any(), any(), anyInt())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); when(api.getConnectorConfig(any(), any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.getConnector(any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.getConnector(any(), any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).edit(spec -> new KafkaConnectBuilder(spec) .editSpec() @@ -1435,7 +1435,7 @@ public void testConnectRestAPIIssues() { verify(api, atLeastOnce()).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, never()).createOrUpdatePutRequest( + verify(api, never()).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); @@ -1456,16 +1456,16 @@ public void testConnectRestAPIIssues() { verify(api, times(2)).list( eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT)); - verify(api, times(1)).createOrUpdatePutRequest( + verify(api, times(1)).createOrUpdatePutRequest(any(), eq(KafkaConnectResources.qualifiedServiceName(connectName, NAMESPACE)), eq(KafkaConnectCluster.REST_API_PORT), eq(connectorName), any()); assertThat(runningConnectors.keySet(), is(Collections.singleton(key("cluster-connect-api.ns.svc", connectorName)))); when(api.list(any(), anyInt())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.listConnectorPlugins(any(), anyInt())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.createOrUpdatePutRequest(any(), anyInt(), anyString(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.getConnectorConfig(any(), any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); - when(api.getConnector(any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.listConnectorPlugins(any(), any(), anyInt())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.createOrUpdatePutRequest(any(), any(), anyInt(), anyString(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.getConnectorConfig(any(), any(), any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); + when(api.getConnector(any(), any(), anyInt(), any())).thenReturn(Future.failedFuture(new ConnectTimeoutException("connection timed out"))); Crds.kafkaConnectOperation(client).inNamespace(NAMESPACE).withName(connectName).edit(sp -> new KafkaConnectBuilder(sp) .editSpec() diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java index 804c80bb97..96d1c6f53d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorManualRollingUpdatesTest.java @@ -106,8 +106,8 @@ public void testNoManualRollingUpdate(VertxTestContext context) throws ParseExce .endZookeeper() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -125,8 +125,8 @@ public void testNoManualRollingUpdate(VertxTestContext context) throws ParseExce CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any())).thenReturn(Future.succeededFuture()); - when(mockKafkaOps.updateStatusAsync(any())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), @@ -138,8 +138,8 @@ vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), Checkpoint async = context.checkpoint(); kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { - Mockito.verify(mockKafkaSetOps, never()).maybeRollingUpdate(any(), any()); - Mockito.verify(mockZkSetOps, never()).maybeRollingUpdate(any(), any()); + Mockito.verify(mockKafkaSetOps, never()).maybeRollingUpdate(any(), any(), any()); + Mockito.verify(mockZkSetOps, never()).maybeRollingUpdate(any(), any(), any()); async.flag(); }))); @@ -174,8 +174,8 @@ public void testStatefulSetManualRollingUpdate(VertxTestContext context) throws .endZookeeper() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -193,7 +193,7 @@ public void testStatefulSetManualRollingUpdate(VertxTestContext context) throws return Future.succeededFuture(sts); }); ArgumentCaptor>> zkNeedsRestartCaptor = ArgumentCaptor.forClass(Function.class); - when(mockZkSetOps.maybeRollingUpdate(any(), zkNeedsRestartCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockZkSetOps.maybeRollingUpdate(any(), any(), zkNeedsRestartCaptor.capture())).thenReturn(Future.succeededFuture()); PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); @@ -202,7 +202,7 @@ public void testStatefulSetManualRollingUpdate(VertxTestContext context) throws CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), @@ -215,7 +215,7 @@ vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { // Verify Zookeeper rolling updates - Mockito.verify(mockZkSetOps, times(1)).maybeRollingUpdate(any(), any()); + Mockito.verify(mockZkSetOps, times(1)).maybeRollingUpdate(any(), any(), any()); Function> zkPodNeedsRestart = zkNeedsRestartCaptor.getValue(); assertThat(zkPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(Collections.singletonList("manual rolling update"))); assertThat(zkPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(Collections.singletonList("manual rolling update"))); @@ -260,8 +260,8 @@ public void testPodManualRollingUpdate(VertxTestContext context) throws ParseExc .endZookeeper() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -271,7 +271,7 @@ public void testPodManualRollingUpdate(VertxTestContext context) throws ParseExc StatefulSetOperator mockZkSetOps = supplier.zkSetOperations; when(mockZkSetOps.getAsync(any(), any())).thenReturn(Future.succeededFuture(zkCluster.generateStatefulSet(false, null, null))); ArgumentCaptor>> zkNeedsRestartCaptor = ArgumentCaptor.forClass(Function.class); - when(mockZkSetOps.maybeRollingUpdate(any(), zkNeedsRestartCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockZkSetOps.maybeRollingUpdate(any(), any(), zkNeedsRestartCaptor.capture())).thenReturn(Future.succeededFuture()); PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenAnswer(i -> { @@ -294,7 +294,7 @@ public void testPodManualRollingUpdate(VertxTestContext context) throws ParseExc CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), @@ -307,7 +307,7 @@ vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { // Verify Zookeeper rolling updates - Mockito.verify(mockZkSetOps, times(1)).maybeRollingUpdate(any(), any()); + Mockito.verify(mockZkSetOps, times(1)).maybeRollingUpdate(any(), any(), any()); Function> zkPodNeedsRestart = zkNeedsRestartCaptor.getValue(); assertThat(zkPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-0")), is(nullValue())); assertThat(zkPodNeedsRestart.apply(podWithName("my-cluster-zookeeper-1")), is(Collections.singletonList("manual rolling update annotation on a pod"))); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedTest.java index 2893265993..9476f87a4d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorNonParametrizedTest.java @@ -138,10 +138,10 @@ public void testCustomLabelsAndAnnotations(VertxTestContext context) { ArgumentCaptor clusterCaKey = ArgumentCaptor.forClass(Secret.class); ArgumentCaptor clientsCaCert = ArgumentCaptor.forClass(Secret.class); ArgumentCaptor clientsCaKey = ArgumentCaptor.forClass(Secret.class); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(1L)); @@ -218,10 +218,10 @@ public void testClusterCASecretsWithoutOwnerReference(VertxTestContext context) ArgumentCaptor clusterCaKey = ArgumentCaptor.forClass(Secret.class); ArgumentCaptor clientsCaCert = ArgumentCaptor.forClass(Secret.class); ArgumentCaptor clientsCaKey = ArgumentCaptor.forClass(Secret.class); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(1L)); @@ -292,10 +292,10 @@ public void testClientsCASecretsWithoutOwnerReference(VertxTestContext context) ArgumentCaptor clusterCaKey = ArgumentCaptor.forClass(Secret.class); ArgumentCaptor clientsCaCert = ArgumentCaptor.forClass(Secret.class); ArgumentCaptor clientsCaKey = ArgumentCaptor.forClass(Secret.class); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); - when(secretOps.reconcile(eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaCertSecretName(NAME)), clusterCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(AbstractModel.clusterCaKeySecretName(NAME)), clusterCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaCertSecretName(NAME)), clientsCaCert.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); + when(secretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaCluster.clientsCaKeySecretName(NAME)), clientsCaKey.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.created(i.getArgument(0)))); KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(1L)); @@ -332,7 +332,7 @@ public void testDeleteClusterRoleBindings(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); ClusterRoleBindingOperator mockCrbOps = supplier.clusterRoleBindingOperator; ArgumentCaptor desiredCrb = ArgumentCaptor.forClass(ClusterRoleBinding.class); - when(mockCrbOps.reconcile(eq(KafkaResources.initContainerClusterRoleBindingName(NAME, NAMESPACE)), desiredCrb.capture())).thenReturn(Future.succeededFuture()); + when(mockCrbOps.reconcile(any(), eq(KafkaResources.initContainerClusterRoleBindingName(NAME, NAMESPACE)), desiredCrb.capture())).thenReturn(Future.succeededFuture()); KafkaAssemblyOperator op = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(1L)); @@ -343,7 +343,7 @@ public void testDeleteClusterRoleBindings(VertxTestContext context) { op.delete(reconciliation) .onComplete(context.succeeding(c -> context.verify(() -> { assertThat(desiredCrb.getValue(), is(nullValue())); - Mockito.verify(mockCrbOps, times(1)).reconcile(any(), any()); + Mockito.verify(mockCrbOps, times(1)).reconcile(any(), any(), any()); async.flag(); }))); @@ -376,7 +376,7 @@ public void testSelectorLabels(VertxTestContext context) { CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); ClusterOperatorConfig config = new ClusterOperatorConfig( singleton("dummy"), @@ -468,7 +468,7 @@ public void testIngressV1Beta1(VertxTestContext context) { CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // Mock the KafkaSet operations KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -482,15 +482,15 @@ public void testIngressV1Beta1(VertxTestContext context) { IngressV1Beta1Operator mockIngressV1Beta1ops = supplier.ingressV1Beta1Operations; ArgumentCaptor ingressV1Beta1Captor = ArgumentCaptor.forClass(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class); when(mockIngressV1Beta1ops.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - when(mockIngressV1Beta1ops.reconcile(anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress()))); - when(mockIngressV1Beta1ops.hasIngressAddress(eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockIngressV1Beta1ops.reconcile(any(), anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress()))); + when(mockIngressV1Beta1ops.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); // Mock ingress v1 ops IngressOperator mockIngressOps = supplier.ingressOperations; ArgumentCaptor ingressCaptor = ArgumentCaptor.forClass(Ingress.class); when(mockIngressOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - when(mockIngressOps.reconcile(anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress()))); - when(mockIngressOps.hasIngressAddress(eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockIngressOps.reconcile(any(), anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress()))); + when(mockIngressOps.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); KafkaAssemblyOperator op = new MockKafkaAssemblyOperatorForIngressTests(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_16), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(KafkaVersionTestUtils.getKafkaVersionLookup())); @@ -503,8 +503,8 @@ public void testIngressV1Beta1(VertxTestContext context) { assertThat(ingressV1Beta1Captor.getAllValues().size(), is(4)); verify(mockIngressOps, never()).list(any(), any()); - verify(mockIngressOps, never()).reconcile(any(), any(), any()); - verify(mockIngressOps, never()).hasIngressAddress(any(), any(), anyLong(), anyLong()); + verify(mockIngressOps, never()).reconcile(any(), any(), any(), any()); + verify(mockIngressOps, never()).hasIngressAddress(any(), any(), any(), anyLong(), anyLong()); async.flag(); }))); @@ -559,7 +559,7 @@ public void testIngressV1(VertxTestContext context) { CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // Mock the KafkaSet operations KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -573,15 +573,15 @@ public void testIngressV1(VertxTestContext context) { IngressV1Beta1Operator mockIngressV1Beta1ops = supplier.ingressV1Beta1Operations; ArgumentCaptor ingressV1Beta1Captor = ArgumentCaptor.forClass(io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress.class); when(mockIngressV1Beta1ops.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - when(mockIngressV1Beta1ops.reconcile(anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress()))); - when(mockIngressV1Beta1ops.hasIngressAddress(eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockIngressV1Beta1ops.reconcile(any(), anyString(), anyString(), ingressV1Beta1Captor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new io.fabric8.kubernetes.api.model.networking.v1beta1.Ingress()))); + when(mockIngressV1Beta1ops.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); // Mock ingress v1 ops IngressOperator mockIngressOps = supplier.ingressOperations; ArgumentCaptor ingressCaptor = ArgumentCaptor.forClass(Ingress.class); when(mockIngressOps.listAsync(eq(NAMESPACE), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); - when(mockIngressOps.reconcile(anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress()))); - when(mockIngressOps.hasIngressAddress(eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockIngressOps.reconcile(any(), anyString(), anyString(), ingressCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Ingress()))); + when(mockIngressOps.hasIngressAddress(any(), eq(NAMESPACE), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); KafkaAssemblyOperator op = new MockKafkaAssemblyOperatorForIngressTests(vertx, new PlatformFeaturesAvailability(false, KubernetesVersion.V1_19), certManager, passwordGenerator, supplier, ResourceUtils.dummyClusterOperatorConfig(KafkaVersionTestUtils.getKafkaVersionLookup())); @@ -594,8 +594,8 @@ public void testIngressV1(VertxTestContext context) { assertThat(ingressV1Beta1Captor.getAllValues().size(), is(0)); verify(mockIngressV1Beta1ops, never()).list(any(), any()); - verify(mockIngressV1Beta1ops, never()).reconcile(any(), any(), any()); - verify(mockIngressV1Beta1ops, never()).hasIngressAddress(any(), any(), anyLong(), anyLong()); + verify(mockIngressV1Beta1ops, never()).reconcile(any(), any(), any(), any()); + verify(mockIngressV1Beta1ops, never()).hasIngressAddress(any(), any(), any(), anyLong(), anyLong()); async.flag(); }))); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorRbacScopeTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorRbacScopeTest.java index d9624feae3..d5fdf20fb1 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorRbacScopeTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorRbacScopeTest.java @@ -137,14 +137,14 @@ public void testRolesDeployedWhenNamespaceRbacScope(VertxTestContext context) { CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // Mock the operations for RoleBindings RoleBindingOperator mockRoleBindingOps = supplier.roleBindingOperations; // Capture the names of reconciled rolebindings and their patched state ArgumentCaptor roleBindingNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor roleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); - when(mockRoleBindingOps.reconcile(eq(namespace), roleBindingNameCaptor.capture(), roleBindingCaptor.capture())) + when(mockRoleBindingOps.reconcile(any(), eq(namespace), roleBindingNameCaptor.capture(), roleBindingCaptor.capture())) .thenReturn(Future.succeededFuture()); KafkaAssemblyOperatorRolesSubset kao = new KafkaAssemblyOperatorRolesSubset( @@ -179,7 +179,7 @@ public void testRolesDeployedWhenNamespaceRbacScope(VertxTestContext context) { .withName("test-instance-entity-operator") .build())); - verify(supplier.clusterRoleBindingOperator, never()).reconcile(anyString(), any()); + verify(supplier.clusterRoleBindingOperator, never()).reconcile(any(), anyString(), any()); async.flag(); }))); @@ -218,14 +218,14 @@ public void testRolesDeployedWhenClusterRbacScope(VertxTestContext context) { CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // Mock the operations for RoleBindings RoleBindingOperator mockRoleBindingOps = supplier.roleBindingOperations; // Capture the names of reconciled rolebindings and their patched state ArgumentCaptor roleBindingNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor roleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); - when(mockRoleBindingOps.reconcile(eq(namespace), roleBindingNameCaptor.capture(), roleBindingCaptor.capture())) + when(mockRoleBindingOps.reconcile(any(), eq(namespace), roleBindingNameCaptor.capture(), roleBindingCaptor.capture())) .thenReturn(Future.succeededFuture()); KafkaAssemblyOperatorRolesSubset kao = new KafkaAssemblyOperatorRolesSubset( @@ -299,14 +299,14 @@ public void testRolesDeployedWhenNamespaceRbacScopeAndMultiWatchNamespace(VertxT CrdOperator mockKafkaOps = supplier.kafkaOperator; when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // Mock the operations for Roles RoleOperator mockRoleOps = supplier.roleOperations; // Capture the names of reconciled Roles and their patched state ArgumentCaptor roleNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor roleCaptor = ArgumentCaptor.forClass(Role.class); - when(mockRoleOps.reconcile(anyString(), roleNameCaptor.capture(), roleCaptor.capture())) + when(mockRoleOps.reconcile(any(), anyString(), roleNameCaptor.capture(), roleCaptor.capture())) .thenReturn(Future.succeededFuture()); // Mock the operations for RoleBindings @@ -314,7 +314,7 @@ public void testRolesDeployedWhenNamespaceRbacScopeAndMultiWatchNamespace(VertxT // Capture the names of reconciled RoleBindings and their patched state ArgumentCaptor roleBindingNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor roleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); - when(mockRoleBindingOps.reconcile(anyString(), roleBindingNameCaptor.capture(), roleBindingCaptor.capture())) + when(mockRoleBindingOps.reconcile(any(), anyString(), roleBindingNameCaptor.capture(), roleBindingCaptor.capture())) .thenReturn(Future.succeededFuture()); KafkaAssemblyOperatorRolesSubset kao = new KafkaAssemblyOperatorRolesSubset( diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java index 1b3744f6a7..43da0eb612 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorTest.java @@ -416,9 +416,9 @@ private Map createPvcs(String namespace, Storage } private void createCluster(VertxTestContext context, Kafka kafka, List secrets) { - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); - ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromCrd(kafka, VERSIONS); - EntityOperator entityOperator = EntityOperator.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); + ZookeeperCluster zookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); + EntityOperator entityOperator = EntityOperator.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); // create CM, Service, headless service, statefulset and so on ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); @@ -443,17 +443,17 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s String kafkaNamespace = kafka.getMetadata().getNamespace(); when(mockKafkaOps.get(kafkaNamespace, kafkaName)).thenReturn(null); when(mockKafkaOps.getAsync(eq(kafkaNamespace), eq(kafkaName))).thenReturn(Future.succeededFuture(kafka)); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); ArgumentCaptor policyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); ArgumentCaptor ssCaptor = ArgumentCaptor.forClass(StatefulSet.class); - when(mockZsOps.reconcile(anyString(), anyString(), ssCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new StatefulSet()))); - when(mockZsOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null)); - when(mockZsOps.maybeRollingUpdate(any(), any(Function.class))).thenReturn(Future.succeededFuture()); - when(mockZsOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockZsOps.reconcile(any(), anyString(), anyString(), ssCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new StatefulSet()))); + when(mockZsOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null)); + when(mockZsOps.maybeRollingUpdate(any(), any(), any(Function.class))).thenReturn(Future.succeededFuture()); + when(mockZsOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); AtomicReference ref = new AtomicReference<>(); - when(mockKsOps.reconcile(anyString(), anyString(), ssCaptor.capture())).thenAnswer(i -> { + when(mockKsOps.reconcile(any(), anyString(), anyString(), ssCaptor.capture())).thenAnswer(i -> { StatefulSet sts = new StatefulSetBuilder().withNewMetadata() .withName(kafkaName + "kafka") .withNamespace(kafkaNamespace) @@ -464,14 +464,14 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s ref.set(sts); return Future.succeededFuture(ReconcileResult.created(sts)); }); - when(mockKsOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null)); - when(mockKsOps.maybeRollingUpdate(any(), any(Function.class))).thenReturn(Future.succeededFuture()); - when(mockKsOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockPolicyOps.reconcile(anyString(), anyString(), policyCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockKsOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(null)); + when(mockKsOps.maybeRollingUpdate(any(), any(), any(Function.class))).thenReturn(Future.succeededFuture()); + when(mockKsOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockPolicyOps.reconcile(any(), anyString(), anyString(), policyCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); when(mockZsOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); when(mockKsOps.getAsync(anyString(), anyString())).thenAnswer(i -> Future.succeededFuture(ref.get())); - when(mockPdbOps.reconcile(anyString(), anyString(), pdbCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockPdbOps.reconcile(any(), anyString(), anyString(), pdbCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); // Service mocks Set createdServices = new HashSet<>(); @@ -496,8 +496,8 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s return Future.succeededFuture(svc); }); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Service()))); - when(mockServiceOps.endpointReadiness(anyString(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Service()))); + when(mockServiceOps.endpointReadiness(any(), anyString(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockServiceOps.listAsync(eq(kafkaNamespace), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); // Ingress mocks @@ -536,7 +536,7 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s } // Mock pod readiness - when(mockPodOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockPodOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockPodOps.listAsync(anyString(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); // Mock node ops @@ -579,7 +579,7 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s Set expectedPvcs = new HashSet<>(zkPvcs.keySet()); expectedPvcs.addAll(kafkaPvcs.keySet()); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); Set expectedSecrets = set( KafkaCluster.clientsCaKeySecretName(kafkaName), @@ -600,9 +600,9 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s expectedSecrets.add(EntityOperator.secretName(kafkaName)); } - when(mockDepOps.reconcile(anyString(), anyString(), any())).thenAnswer(invocation -> { - String name = invocation.getArgument(1); - Deployment desired = invocation.getArgument(2); + when(mockDepOps.reconcile(any(), anyString(), anyString(), any())).thenAnswer(invocation -> { + String name = invocation.getArgument(2); + Deployment desired = invocation.getArgument(3); if (desired != null) { if (name.contains("operator")) { if (entityOperator != null) { @@ -617,10 +617,10 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s when(mockDepOps.getAsync(anyString(), anyString())).thenReturn( Future.succeededFuture() ); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); @@ -639,8 +639,8 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s Future.succeededFuture(secretsMap.get(i.getArgument(1))) ); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenAnswer(invocation -> { - Secret desired = invocation.getArgument(2); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenAnswer(invocation -> { + Secret desired = invocation.getArgument(3); if (desired != null) { secretsMap.put(desired.getMetadata().getName(), desired); } @@ -649,11 +649,11 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s ArgumentCaptor metricsCaptor = ArgumentCaptor.forClass(ConfigMap.class); ArgumentCaptor metricsNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(anyString(), metricsNameCaptor.capture(), metricsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), metricsNameCaptor.capture(), metricsCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor logCaptor = ArgumentCaptor.forClass(ConfigMap.class); ArgumentCaptor logNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockCmOps.reconcile(anyString(), logNameCaptor.capture(), logCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), logNameCaptor.capture(), logCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ConfigMap metricsCm = kafkaCluster.generateAncillaryConfigMap(new MetricsAndLogging(metricsCM, null), emptySet(), emptySet(), false); when(mockCmOps.getAsync(kafkaNamespace, KafkaCluster.metricAndLogConfigsName(kafkaName))).thenReturn(Future.succeededFuture(metricsCm)); @@ -669,7 +669,7 @@ private void createCluster(VertxTestContext context, Kafka kafka, List s ArgumentCaptor routeCaptor = ArgumentCaptor.forClass(Route.class); ArgumentCaptor routeNameCaptor = ArgumentCaptor.forClass(String.class); if (openShift) { - when(mockRouteOps.reconcile(eq(kafkaNamespace), routeNameCaptor.capture(), routeCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Route()))); + when(mockRouteOps.reconcile(any(), eq(kafkaNamespace), routeNameCaptor.capture(), routeCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new Route()))); } KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), @@ -911,13 +911,13 @@ public void testUpdateZkClusterLogConfig(Params params, VertxTestContext context } private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kafka updatedAssembly) { - KafkaCluster originalKafkaCluster = KafkaCluster.fromCrd(originalAssembly, VERSIONS); - KafkaCluster updatedKafkaCluster = KafkaCluster.fromCrd(updatedAssembly, VERSIONS); - ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromCrd(originalAssembly, VERSIONS); - ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromCrd(updatedAssembly, VERSIONS); - EntityOperator originalEntityOperator = EntityOperator.fromCrd(originalAssembly, VERSIONS); - KafkaExporter originalKafkaExporter = KafkaExporter.fromCrd(originalAssembly, VERSIONS); - CruiseControl originalCruiseControl = CruiseControl.fromCrd(originalAssembly, VERSIONS); + KafkaCluster originalKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS); + KafkaCluster updatedKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, VERSIONS); + ZookeeperCluster originalZookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS); + ZookeeperCluster updatedZookeeperCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, updatedAssembly, VERSIONS); + EntityOperator originalEntityOperator = EntityOperator.fromCrd(new Reconciliation("test", originalAssembly.getKind(), originalAssembly.getMetadata().getNamespace(), originalAssembly.getMetadata().getName()), originalAssembly, VERSIONS); + KafkaExporter originalKafkaExporter = KafkaExporter.fromCrd(new Reconciliation("test", originalAssembly.getKind(), originalAssembly.getMetadata().getNamespace(), originalAssembly.getMetadata().getName()), originalAssembly, VERSIONS); + CruiseControl originalCruiseControl = CruiseControl.fromCrd(Reconciliation.DUMMY_RECONCILIATION, originalAssembly, VERSIONS); // create CM, Service, headless service, statefulset and so on ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(openShift); @@ -991,12 +991,12 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf return Future.succeededFuture(Collections.EMPTY_LIST); }); - when(mockPvcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock CM get when(mockKafkaOps.get(clusterNamespace, clusterName)).thenReturn(updatedAssembly); when(mockKafkaOps.getAsync(eq(clusterNamespace), eq(clusterName))).thenReturn(Future.succeededFuture(updatedAssembly)); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); ConfigMap metricsCm = new ConfigMapBuilder() .withNewMetadata() .withName("metrics-cm") @@ -1036,7 +1036,7 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf when(mockCmOps.getAsync(clusterNamespace, differentMetricsCMName)).thenReturn(Future.succeededFuture(metricsCM)); // Mock pod ops - when(mockPodOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockPodOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockPodOps.listAsync(anyString(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); // Mock node ops @@ -1055,7 +1055,7 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf Map expectedServicesMap = expectedServices.stream().collect(Collectors.toMap(s -> s.getMetadata().getName(), s -> s)); - when(mockServiceOps.endpointReadiness(eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( + when(mockServiceOps.endpointReadiness(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); when(mockServiceOps.get(eq(clusterNamespace), anyString())).thenAnswer(i -> Future.succeededFuture(expectedServicesMap.get(i.getArgument(1)))); @@ -1074,7 +1074,7 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf originalKafkaCluster.generateHeadlessService() )) ); - when(mockServiceOps.hasNodePort(eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( + when(mockServiceOps.hasNodePort(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); @@ -1111,7 +1111,7 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf when(mockRouteOps.listAsync(eq(clusterNamespace), any(Labels.class))).thenReturn( Future.succeededFuture(emptyList()) ); - when(mockRouteOps.hasAddress(eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( + when(mockRouteOps.hasAddress(any(), eq(clusterNamespace), any(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); } @@ -1168,10 +1168,10 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf when(mockDepOps.getAsync(clusterNamespace, EntityOperator.entityOperatorName(clusterName))).thenReturn( Future.succeededFuture(originalEntityOperator.generateDeployment(true, Collections.EMPTY_MAP, null, null)) ); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); } @@ -1183,10 +1183,10 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf when(mockDepOps.getAsync(clusterNamespace, EntityOperator.entityOperatorName(clusterName))).thenReturn( Future.succeededFuture(originalCruiseControl.generateDeployment(true, Collections.EMPTY_MAP, null, null)) ); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); } @@ -1198,10 +1198,10 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf when(mockDepOps.getAsync(clusterNamespace, KafkaExporter.kafkaExporterName(clusterName))).thenReturn( Future.succeededFuture(originalKafkaExporter.generateDeployment(true, null, null)) ); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn( + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn( Future.succeededFuture() ); } @@ -1211,37 +1211,37 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(clusterNamespace), any(), any()); + }).when(mockCmOps).reconcile(any(), eq(clusterNamespace), any(), any()); Set logCms = set(); doAnswer(invocation -> { logCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(clusterNamespace), any(), any()); + }).when(mockCmOps).reconcile(any(), eq(clusterNamespace), any(), any()); // Mock Service patch (both service and headless service ArgumentCaptor patchedServicesCaptor = ArgumentCaptor.forClass(String.class); - when(mockServiceOps.reconcile(eq(clusterNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(clusterNamespace), patchedServicesCaptor.capture(), any())).thenReturn(Future.succeededFuture()); // Mock Secrets patch - when(mockSecretOps.reconcile(eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); // Mock NetworkPolicy patch - when(mockPolicyOps.reconcile(eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPolicyOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); // Mock PodDisruptionBudget patch - when(mockPdbOps.reconcile(eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), eq(clusterNamespace), any(), any())).thenReturn(Future.succeededFuture()); // Mock StatefulSet patch - when(mockZsOps.reconcile(anyString(), anyString(), any())).thenAnswer(invocation -> { - StatefulSet sts = invocation.getArgument(2); + when(mockZsOps.reconcile(any(), anyString(), anyString(), any())).thenAnswer(invocation -> { + StatefulSet sts = invocation.getArgument(3); return Future.succeededFuture(ReconcileResult.patched(sts)); }); - when(mockKsOps.reconcile(anyString(), anyString(), any())).thenAnswer(invocation -> { - StatefulSet sts = invocation.getArgument(2); + when(mockKsOps.reconcile(any(), anyString(), anyString(), any())).thenAnswer(invocation -> { + StatefulSet sts = invocation.getArgument(3); return Future.succeededFuture(ReconcileResult.patched(sts)); }); - when(mockZsOps.maybeRollingUpdate(any(), any(Function.class))).thenReturn(Future.succeededFuture()); - when(mockKsOps.maybeRollingUpdate(any(), any(Function.class))).thenReturn(Future.succeededFuture()); + when(mockZsOps.maybeRollingUpdate(any(), any(), any(Function.class))).thenReturn(Future.succeededFuture()); + when(mockKsOps.maybeRollingUpdate(any(), any(), any(Function.class))).thenReturn(Future.succeededFuture()); when(mockZsOps.getAsync(clusterNamespace, ZookeeperCluster.zookeeperClusterName(clusterName))).thenReturn( Future.succeededFuture(originalZookeeperCluster.generateStatefulSet(openShift, null, null)) @@ -1250,27 +1250,27 @@ private void updateCluster(VertxTestContext context, Kafka originalAssembly, Kaf // Mock StatefulSet scaleUp ArgumentCaptor scaledUpCaptor = ArgumentCaptor.forClass(String.class); - when(mockZsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn( + when(mockZsOps.scaleUp(any(), anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn( Future.succeededFuture(42) ); // Mock StatefulSet scaleDown ArgumentCaptor scaledDownCaptor = ArgumentCaptor.forClass(String.class); - when(mockZsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn( + when(mockZsOps.scaleDown(any(), anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn( Future.succeededFuture(42) ); //ArgumentCaptor scaledUpCaptor = ArgumentCaptor.forClass(String.class); - when(mockKsOps.scaleUp(anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn( + when(mockKsOps.scaleUp(any(), anyString(), scaledUpCaptor.capture(), anyInt())).thenReturn( Future.succeededFuture(42) ); // Mock StatefulSet scaleDown //ArgumentCaptor scaledDownCaptor = ArgumentCaptor.forClass(String.class); - when(mockKsOps.scaleDown(anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn( + when(mockKsOps.scaleDown(any(), anyString(), scaledDownCaptor.capture(), anyInt())).thenReturn( Future.succeededFuture(42) ); // Mock Deployment patch ArgumentCaptor depCaptor = ArgumentCaptor.forClass(String.class); - when(mockDepOps.reconcile(anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), depCaptor.capture(), any())).thenReturn(Future.succeededFuture()); KafkaAssemblyOperator ops = new KafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(openShift, kubernetesVersion), certManager, @@ -1295,23 +1295,23 @@ public Future waitForQuiescence(StatefulSet sts) { .onComplete(context.succeeding(v -> context.verify(() -> { // rolling restart Set expectedRollingRestarts = set(); - if (KafkaSetOperator.needsRollingUpdate( - new StatefulSetDiff(originalKafkaCluster.generateStatefulSet(openShift, null, null), + if (KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, + new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, originalKafkaCluster.generateStatefulSet(openShift, null, null), updatedKafkaCluster.generateStatefulSet(openShift, null, null)))) { expectedRollingRestarts.add(originalKafkaCluster.getName()); } - if (ZookeeperSetOperator.needsRollingUpdate( - new StatefulSetDiff(originalZookeeperCluster.generateStatefulSet(openShift, null, null), + if (ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, + new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, originalZookeeperCluster.generateStatefulSet(openShift, null, null), updatedZookeeperCluster.generateStatefulSet(openShift, null, null)))) { expectedRollingRestarts.add(originalZookeeperCluster.getName()); } // Check that ZK scale-up happens when it should boolean zkScaledUp = updatedAssembly.getSpec().getZookeeper().getReplicas() > originalAssembly.getSpec().getZookeeper().getReplicas(); - verify(mockZsOps, times(zkScaledUp ? 1 : 0)).scaleUp(anyString(), scaledUpCaptor.capture(), anyInt()); + verify(mockZsOps, times(zkScaledUp ? 1 : 0)).scaleUp(any(), anyString(), scaledUpCaptor.capture(), anyInt()); // No metrics config => no CMs created - verify(mockCmOps, never()).createOrUpdate(any()); + verify(mockCmOps, never()).createOrUpdate(any(), any()); async.flag(); }))); @@ -1341,7 +1341,7 @@ public void testReconcile(Params params, VertxTestContext context) { when(mockKafkaOps.get(eq(kafkaNamespace), eq("bar"))).thenReturn(bar); when(mockKafkaOps.getAsync(eq(kafkaNamespace), eq("foo"))).thenReturn(Future.succeededFuture(foo)); when(mockKafkaOps.getAsync(eq(kafkaNamespace), eq("bar"))).thenReturn(Future.succeededFuture(bar)); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // providing certificates Secrets for existing clusters List fooSecrets = ResourceUtils.createKafkaInitialSecrets(kafkaNamespace, "foo"); @@ -1349,27 +1349,27 @@ public void testReconcile(Params params, VertxTestContext context) { List barSecrets = ResourceUtils.createKafkaSecretsWithReplicas(kafkaNamespace, "bar", bar.getSpec().getKafka().getReplicas(), bar.getSpec().getZookeeper().getReplicas()); - ClusterCa barClusterCa = ResourceUtils.createInitialClusterCa("bar", - findSecretWithName(barSecrets, AbstractModel.clusterCaCertSecretName("bar")), - findSecretWithName(barSecrets, AbstractModel.clusterCaKeySecretName("bar"))); - ClientsCa barClientsCa = ResourceUtils.createInitialClientsCa("bar", - findSecretWithName(barSecrets, KafkaCluster.clientsCaCertSecretName("bar")), - findSecretWithName(barSecrets, KafkaCluster.clientsCaKeySecretName("bar"))); + ClusterCa barClusterCa = ResourceUtils.createInitialClusterCa(Reconciliation.DUMMY_RECONCILIATION, + "bar", + findSecretWithName(barSecrets, AbstractModel.clusterCaCertSecretName("bar")), findSecretWithName(barSecrets, AbstractModel.clusterCaKeySecretName("bar"))); + ClientsCa barClientsCa = ResourceUtils.createInitialClientsCa(Reconciliation.DUMMY_RECONCILIATION, + "bar", + findSecretWithName(barSecrets, KafkaCluster.clientsCaCertSecretName("bar")), findSecretWithName(barSecrets, KafkaCluster.clientsCaKeySecretName("bar"))); // providing the list of ALL StatefulSets for all the Kafka clusters Labels newLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND); when(mockKsOps.list(eq(kafkaNamespace), eq(newLabels))).thenReturn( - asList(KafkaCluster.fromCrd(bar, VERSIONS).generateStatefulSet(openShift, null, null)) + asList(KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateStatefulSet(openShift, null, null)) ); when(mockSecretOps.get(eq(kafkaNamespace), eq(AbstractModel.clusterCaCertSecretName(foo.getMetadata().getName())))) .thenReturn( fooSecrets.get(0)); - when(mockSecretOps.reconcile(eq(kafkaNamespace), eq(AbstractModel.clusterCaCertSecretName(foo.getMetadata().getName())), any(Secret.class))).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kafkaNamespace), eq(AbstractModel.clusterCaCertSecretName(foo.getMetadata().getName())), any(Secret.class))).thenReturn(Future.succeededFuture()); // providing the list StatefulSets for already "existing" Kafka clusters Labels barLabels = Labels.forStrimziCluster("bar"); - KafkaCluster barCluster = KafkaCluster.fromCrd(bar, VERSIONS); + KafkaCluster barCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS); when(mockKsOps.list(eq(kafkaNamespace), eq(barLabels))).thenReturn( asList(barCluster.generateStatefulSet(openShift, null, null)) ); @@ -1381,7 +1381,7 @@ public void testReconcile(Params params, VertxTestContext context) { barClusterCa.caCertSecret())) ); when(mockSecretOps.get(eq(kafkaNamespace), eq(AbstractModel.clusterCaCertSecretName(bar.getMetadata().getName())))).thenReturn(barSecrets.get(0)); - when(mockSecretOps.reconcile(eq(kafkaNamespace), eq(AbstractModel.clusterCaCertSecretName(bar.getMetadata().getName())), any(Secret.class))).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kafkaNamespace), eq(AbstractModel.clusterCaCertSecretName(bar.getMetadata().getName())), any(Secret.class))).thenReturn(Future.succeededFuture()); Checkpoint fooAsync = context.checkpoint(); Checkpoint barAsync = context.checkpoint(); @@ -1435,29 +1435,29 @@ public void testReconcileAllNamespaces(Params params, VertxTestContext context) when(mockKafkaOps.get(eq("namespace2"), eq("bar"))).thenReturn(bar); when(mockKafkaOps.getAsync(eq("namespace1"), eq("foo"))).thenReturn(Future.succeededFuture(foo)); when(mockKafkaOps.getAsync(eq("namespace2"), eq("bar"))).thenReturn(Future.succeededFuture(bar)); - when(mockKafkaOps.updateStatusAsync(any(Kafka.class))).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), any(Kafka.class))).thenReturn(Future.succeededFuture()); // providing certificates Secrets for existing clusters List fooSecrets = ResourceUtils.createKafkaInitialSecrets("namespace1", "foo"); List barSecrets = ResourceUtils.createKafkaSecretsWithReplicas("namespace2", "bar", bar.getSpec().getKafka().getReplicas(), bar.getSpec().getZookeeper().getReplicas()); - ClusterCa barClusterCa = ResourceUtils.createInitialClusterCa("bar", - findSecretWithName(barSecrets, AbstractModel.clusterCaCertSecretName("bar")), - findSecretWithName(barSecrets, AbstractModel.clusterCaKeySecretName("bar"))); - ClientsCa barClientsCa = ResourceUtils.createInitialClientsCa("bar", - findSecretWithName(barSecrets, KafkaCluster.clientsCaCertSecretName("bar")), - findSecretWithName(barSecrets, KafkaCluster.clientsCaKeySecretName("bar"))); + ClusterCa barClusterCa = ResourceUtils.createInitialClusterCa(Reconciliation.DUMMY_RECONCILIATION, + "bar", + findSecretWithName(barSecrets, AbstractModel.clusterCaCertSecretName("bar")), findSecretWithName(barSecrets, AbstractModel.clusterCaKeySecretName("bar"))); + ClientsCa barClientsCa = ResourceUtils.createInitialClientsCa(Reconciliation.DUMMY_RECONCILIATION, + "bar", + findSecretWithName(barSecrets, KafkaCluster.clientsCaCertSecretName("bar")), findSecretWithName(barSecrets, KafkaCluster.clientsCaKeySecretName("bar"))); // providing the list of ALL StatefulSets for all the Kafka clusters Labels newLabels = Labels.forStrimziKind(Kafka.RESOURCE_KIND); when(mockKsOps.list(eq("*"), eq(newLabels))).thenReturn( - asList(KafkaCluster.fromCrd(bar, VERSIONS).generateStatefulSet(openShift, null, null)) + asList(KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateStatefulSet(openShift, null, null)) ); // providing the list StatefulSets for already "existing" Kafka clusters Labels barLabels = Labels.forStrimziCluster("bar"); - KafkaCluster barCluster = KafkaCluster.fromCrd(bar, VERSIONS); + KafkaCluster barCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS); when(mockKsOps.list(eq("*"), eq(barLabels))).thenReturn( asList(barCluster.generateStatefulSet(openShift, null, null)) ); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperatorTest.java index 1729552656..bb326e982b 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaBridgeAssemblyOperatorTest.java @@ -79,7 +79,7 @@ public class KafkaBridgeAssemblyOperatorTest { protected static Vertx vertx; private static final String METRICS_CONFIG = "{\"foo\":\"bar\"}"; - private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties("kafkaBridgeDefaultLoggingProperties") + private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties(Reconciliation.DUMMY_RECONCILIATION, "kafkaBridgeDefaultLoggingProperties") .asPairsWithComment("Do not change this generated file. Logging can be configured in the corresponding Kubernetes resource."); private static final String BOOTSTRAP_SERVERS = "foo-kafka:9092"; @@ -121,29 +121,29 @@ public void testCreateOrUpdateCreatesCluster(VertxTestContext context) { when(mockBridgeOps.get(anyString(), anyString())).thenReturn(kb); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor bridgeCaptor = ArgumentCaptor.forClass(KafkaBridge.class); - when(mockBridgeOps.updateStatusAsync(bridgeCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), bridgeCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); - KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(kb, + KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); Checkpoint async = context.checkpoint(); @@ -205,36 +205,36 @@ public void testCreateOrUpdateWithNoDiffCausesNoChanges(VertxTestContext context KafkaBridge kb = ResourceUtils.createKafkaBridge(kbNamespace, kbName, image, 1, BOOTSTRAP_SERVERS, KAFKA_BRIDGE_PRODUCER_SPEC, KAFKA_BRIDGE_CONSUMER_SPEC, KAFKA_BRIDGE_HTTP_SPEC, true); - KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(kb, + KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); - when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateService()); when(mockDcOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(eq(kbNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kbNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kbNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kbNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kbNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kbNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kbNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kbNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), @@ -284,38 +284,38 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { metricsCmP.put("foo", "bar"); KafkaBridge kb = ResourceUtils.createKafkaBridge(kbNamespace, kbName, image, 1, BOOTSTRAP_SERVERS, KAFKA_BRIDGE_PRODUCER_SPEC, KAFKA_BRIDGE_CONSUMER_SPEC, KAFKA_BRIDGE_HTTP_SPEC, true); - KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(kb, + KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); kb.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); - when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateService()); when(mockDcOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(eq(kbNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kbNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kbNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kbNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kbNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kbNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kbNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kbNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock CM get when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); @@ -341,7 +341,7 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(kbNamespace), anyString(), any()); + }).when(mockCmOps).reconcile(any(), eq(kbNamespace), anyString(), any()); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -353,7 +353,7 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName), kb) .onComplete(context.succeeding(v -> context.verify(() -> { - KafkaBridgeCluster compareTo = KafkaBridgeCluster.fromCrd(kb, + KafkaBridgeCluster compareTo = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); // Verify service @@ -382,7 +382,7 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1)); // No metrics config => no CMs created - verify(mockCmOps, never()).createOrUpdate(any()); + verify(mockCmOps, never()).createOrUpdate(any(), any()); async.flag(); }))); } @@ -403,42 +403,42 @@ public void testCreateOrUpdateThrowsWhenCreateServiceThrows(VertxTestContext con metricsCm.put("foo", "bar"); KafkaBridge kb = ResourceUtils.createKafkaBridge(kbNamespace, kbName, image, 1, BOOTSTRAP_SERVERS, KAFKA_BRIDGE_PRODUCER_SPEC, KAFKA_BRIDGE_CONSUMER_SPEC, KAFKA_BRIDGE_HTTP_SPEC, true); - KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(kb, + KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); kb.getSpec().setImage("some/different:image"); // Change the image to generate some differences when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); - when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateService()); when(mockDcOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockBridgeOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockBridgeOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -467,31 +467,31 @@ public void testCreateOrUpdateWithReplicasScaleUpToOne(VertxTestContext context) KafkaBridge kb = ResourceUtils.createEmptyKafkaBridge(kbNamespace, kbName); kb.getSpec().setReplicas(0); - KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(kb, VERSIONS); + KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); kb.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleUp when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); - when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateService()); Deployment dep = bridge.generateDeployment(new HashMap<>(), true, null, null); when(mockDcOps.get(kbNamespace, bridge.getName())).thenReturn(dep); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kbNamespace, bridge.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kbNamespace, bridge.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); - when(mockBridgeOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockBridgeOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -499,7 +499,7 @@ public void testCreateOrUpdateWithReplicasScaleUpToOne(VertxTestContext context) Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName), kb) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kbNamespace, bridge.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); async.flag(); }))); @@ -528,30 +528,30 @@ public void testCreateOrUpdateWithScaleDown(VertxTestContext context) { .withReplicas(scaleTo) .endSpec() .build(); - KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(kb, VERSIONS); + KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kb, VERSIONS); when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); - when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kbNamespace, bridge.getName())).thenReturn(bridge.generateService()); Deployment dep = bridge.generateDeployment(new HashMap<>(), true, null, null); when(mockDcOps.get(kbNamespace, bridge.getName())).thenReturn(dep); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kbNamespace, bridge.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kbNamespace, bridge.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); - when(mockBridgeOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockBridgeOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -559,8 +559,8 @@ public void testCreateOrUpdateWithScaleDown(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName), scaledDownCluster) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kbNamespace, bridge.getName(), scaleTo); - verify(mockDcOps).scaleDown(kbNamespace, bridge.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); + verify(mockDcOps).scaleDown(any(), eq(kbNamespace), eq(bridge.getName()), eq(scaleTo)); async.flag(); }))); } @@ -584,7 +584,7 @@ public void testReconcileCallsCreateOrUpdate(VertxTestContext context) { when(mockBridgeOps.listAsync(eq(kbNamespace), any(Optional.class))).thenReturn(Future.succeededFuture(asList(foo, bar))); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(bar)); - when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), any(KafkaBridge.class))).thenReturn(Future.succeededFuture()); // when requested ConfigMap for a specific Kafka Bridge cluster when(mockBridgeOps.get(eq(kbNamespace), eq("foo"))).thenReturn(foo); when(mockBridgeOps.get(eq(kbNamespace), eq("bar"))).thenReturn(bar); @@ -592,19 +592,19 @@ public void testReconcileCallsCreateOrUpdate(VertxTestContext context) { // providing the list of ALL Deployments for all the Kafka Bridge clusters Labels newLabels = Labels.forStrimziKind(KafkaBridge.RESOURCE_KIND); when(mockDcOps.list(eq(kbNamespace), eq(newLabels))).thenReturn( - asList(KafkaBridgeCluster.fromCrd(bar, + asList(KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); // providing the list Deployments for already "existing" Kafka Bridge clusters Labels barLabels = Labels.forStrimziCluster("bar"); when(mockDcOps.list(eq(kbNamespace), eq(barLabels))).thenReturn( - asList(KafkaBridgeCluster.fromCrd(bar, + asList(KafkaBridgeCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null)) ); - when(mockSecretOps.reconcile(eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kbNamespace), any(), any())).thenReturn(Future.succeededFuture()); Set createdOrUpdated = new CopyOnWriteArraySet<>(); @@ -658,17 +658,17 @@ public void testCreateClusterStatusNotReady(VertxTestContext context) { when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); when(mockBridgeOps.get(anyString(), anyString())).thenReturn(kb); - when(mockServiceOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor bridgeCaptor = ArgumentCaptor.forClass(KafkaBridge.class); - when(mockBridgeOps.updateStatusAsync(bridgeCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), bridgeCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), @@ -708,15 +708,15 @@ public void testCreateOrUpdateBridgeZeroReplica(VertxTestContext context) { when(mockBridgeOps.get(kbNamespace, kbName)).thenReturn(kb); when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kb)); when(mockBridgeOps.get(anyString(), anyString())).thenReturn(kb); - when(mockServiceOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor bridgeCaptor = ArgumentCaptor.forClass(KafkaBridge.class); - when(mockBridgeOps.updateStatusAsync(bridgeCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBridgeOps.updateStatusAsync(any(), bridgeCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), @@ -727,7 +727,7 @@ public void testCreateOrUpdateBridgeZeroReplica(VertxTestContext context) { ops.reconcile(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, kbNamespace, kbName)) .onComplete(context.succeeding(v -> context.verify(() -> { // 0 Replicas - readiness should never get called. - verify(mockDcOps, never()).readiness(anyString(), anyString(), anyLong(), anyLong()); + verify(mockDcOps, never()).readiness(any(), anyString(), anyString(), anyLong(), anyLong()); assertNull(bridgeCaptor.getValue().getStatus().getUrl()); async.flag(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiMockTest.java index 93f4967c1b..eb06d6eefe 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiMockTest.java @@ -5,6 +5,7 @@ package io.strimzi.operator.cluster.operator.assembly; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; @@ -43,7 +44,7 @@ public void testStatusWithBackOffSucceedingImmediately(VertxTestContext context) KafkaConnectApi api = new MockKafkaConnectApi(vertx, statusResults); Checkpoint async = context.checkpoint(); - api.statusWithBackOff(backOff, "some-host", 8083, "some-connector") + api.statusWithBackOff(Reconciliation.DUMMY_RECONCILIATION, backOff, "some-host", 8083, "some-connector") .onComplete(context.succeeding(res -> async.flag())); } @@ -57,7 +58,7 @@ public void testStatusWithBackOffSuccedingEventually(VertxTestContext context) { KafkaConnectApi api = new MockKafkaConnectApi(vertx, statusResults); Checkpoint async = context.checkpoint(); - api.statusWithBackOff(backOff, "some-host", 8083, "some-connector") + api.statusWithBackOff(Reconciliation.DUMMY_RECONCILIATION, backOff, "some-host", 8083, "some-connector") .onComplete(context.succeeding(res -> async.flag())); } @@ -72,7 +73,7 @@ public void testStatusWithBackOffFailingRepeatedly(VertxTestContext context) { KafkaConnectApi api = new MockKafkaConnectApi(vertx, statusResults); Checkpoint async = context.checkpoint(); - api.statusWithBackOff(backOff, "some-host", 8083, "some-connector") + api.statusWithBackOff(Reconciliation.DUMMY_RECONCILIATION, backOff, "some-host", 8083, "some-connector") .onComplete(context.failing(res -> async.flag())); } @@ -84,7 +85,7 @@ public void testStatusWithBackOffOtherExceptionStillFails(VertxTestContext conte KafkaConnectApi api = new MockKafkaConnectApi(vertx, statusResults); Checkpoint async = context.checkpoint(); - api.statusWithBackOff(backOff, "some-host", 8083, "some-connector") + api.statusWithBackOff(Reconciliation.DUMMY_RECONCILIATION, backOff, "some-host", 8083, "some-connector") .onComplete(context.failing(res -> async.flag())); } @@ -97,7 +98,7 @@ public MockKafkaConnectApi(Vertx vertx, Queue>> statu } @Override - public Future> status(String host, int port, String connectorName) { + public Future> status(Reconciliation reconciliation, String host, int port, String connectorName) { return statusResults.remove(); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiTest.java index 6f2f857503..a597a24682 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectApiTest.java @@ -6,6 +6,7 @@ import io.strimzi.api.kafka.model.connect.ConnectorPlugin; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.OrderedProperties; import io.strimzi.test.TestUtils; import io.strimzi.test.annotations.IsolatedTest; @@ -111,7 +112,7 @@ public static void after() { public void test(VertxTestContext context) { KafkaConnectApi client = new KafkaConnectApiImpl(vertx); Checkpoint async = context.checkpoint(); - client.listConnectorPlugins("localhost", PORT) + client.listConnectorPlugins(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT) .onComplete(context.succeeding(connectorPlugins -> context.verify(() -> { assertThat(connectorPlugins.size(), greaterThanOrEqualTo(2)); @@ -137,7 +138,7 @@ public void test(VertxTestContext context) { .put("tasks.max", "1") .put("file", "/dev/null") .put("topic", "my-topic"); - return client.createOrUpdatePutRequest("localhost", PORT, "test", o); + return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test", o); }) .onComplete(context.succeeding()) .compose(created -> { @@ -147,7 +148,7 @@ public void test(VertxTestContext context) { Handler handler = new Handler() { @Override public void handle(Long timerId) { - client.status("localhost", PORT, "test").onComplete(result -> { + client.status(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test").onComplete(result -> { if (result.succeeded()) { Map status = result.result(); if ("RUNNING".equals(((Map) status.getOrDefault("connector", emptyMap())).get("state"))) { @@ -179,7 +180,7 @@ public void handle(Long timerId) { assertThat(an.get("worker_id"), is("localhost:18083")); } }))) - .compose(status -> client.getConnectorConfig(new BackOff(10), "localhost", PORT, "test")) + .compose(status -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "test")) .onComplete(context.succeeding(config -> context.verify(() -> { assertThat(config, is(TestUtils.map("connector.class", "FileStreamSource", "file", "/dev/null", @@ -187,7 +188,7 @@ public void handle(Long timerId) { "name", "test", "topic", "my-topic"))); }))) - .compose(config -> client.getConnectorConfig(new BackOff(10), "localhost", PORT, "does-not-exist")) + .compose(config -> client.getConnectorConfig(Reconciliation.DUMMY_RECONCILIATION, new BackOff(10), "localhost", PORT, "does-not-exist")) .onComplete(context.failing(error -> context.verify(() -> { assertThat(error, instanceOf(ConnectRestException.class)); assertThat(((ConnectRestException) error).getStatusCode(), is(404)); @@ -212,7 +213,7 @@ public void handle(Long timerId) { .put("tasks.max", "1") .put("file", "/dev/null") .put("topic", "my-topic"); - return client.createOrUpdatePutRequest("localhost", PORT, "broken", o); + return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken", o); }) .onComplete(context.failing(error -> context.verify(() -> { assertThat(error, instanceOf(ConnectRestException.class)); @@ -227,7 +228,7 @@ public void handle(Long timerId) { .put("tasks.max", "dog") .put("file", "/dev/null") .put("topic", "my-topic"); - return client.createOrUpdatePutRequest("localhost", PORT, "broken2", o); + return client.createOrUpdatePutRequest(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "broken2", o); }) .onComplete(context.failing(error -> context.verify(() -> { assertThat(error, instanceOf(ConnectRestException.class)); @@ -238,11 +239,11 @@ public void handle(Long timerId) { .compose(createResponse -> client.list("localhost", PORT)) .onComplete(context.succeeding(connectorNames -> context.verify(() -> assertThat(connectorNames, is(singletonList("test")))))) - .compose(connectorNames -> client.delete("localhost", PORT, "test")) + .compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "test")) .onComplete(context.succeeding()) .compose(deletedConnector -> client.list("localhost", PORT)) .onComplete(context.succeeding(connectorNames -> assertThat(connectorNames, is(empty())))) - .compose(connectorNames -> client.delete("localhost", PORT, "never-existed")) + .compose(connectorNames -> client.delete(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, "never-existed")) .onComplete(context.failing(error -> { assertThat(error, instanceOf(ConnectRestException.class)); assertThat(error.getMessage(), @@ -265,9 +266,9 @@ public void testChangeLoggers(VertxTestContext context) throws InterruptedExcept OrderedProperties ops = new OrderedProperties(); ops.addStringPairs(desired); - client.updateConnectLoggers("localhost", PORT, desired, ops) + client.updateConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT, desired, ops) .onComplete(context.succeeding()) - .compose(a -> client.listConnectLoggers("localhost", PORT) + .compose(a -> client.listConnectLoggers(Reconciliation.DUMMY_RECONCILIATION, "localhost", PORT) .onComplete(context.succeeding(map -> context.verify(() -> { assertThat(map.get("org.apache.zookeeper").get("level"), is("WARN")); assertThat(map.get("org.I0Itec.zkclient").get("level"), is("INFO")); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java index 8ec20a846d..dc56fee079 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java @@ -59,6 +59,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; @@ -159,7 +160,7 @@ public void testReconcileCreateAndUpdate(VertxTestContext context) { .build()); KafkaConnectApi mock = mock(KafkaConnectApi.class); when(mock.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mock.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); + when(mock.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); Checkpoint async = context.checkpoint(); createConnectCluster(context, mock, false) @@ -187,7 +188,7 @@ public void testPauseReconcileUnpause(VertxTestContext context) { .build()); KafkaConnectApi mock = mock(KafkaConnectApi.class); when(mock.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mock.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); + when(mock.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); Checkpoint async = context.checkpoint(); createConnectCluster(context, mock, true) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorTest.java index dfa44eedf6..7df783de27 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorTest.java @@ -97,7 +97,7 @@ public class KafkaConnectAssemblyOperatorTest { private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); protected static Vertx vertx; private static final String METRICS_CONFIG = "{\"foo\":\"bar\"}"; - private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties("kafkaConnectDefaultLoggingProperties") + private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties(Reconciliation.DUMMY_RECONCILIATION, "kafkaConnectDefaultLoggingProperties") .asPairsWithComment("Do not change this generated file. Logging can be configured in the corresponding Kubernetes resource."); private final KubernetesVersion kubernetesVersion = KubernetesVersion.V1_16; @@ -133,31 +133,31 @@ public void createKafkaConnectCluster(VertxTestContext context, KafkaConnect kc, when(mockConnectS2IOps.getAsync(kc.getMetadata().getNamespace(), kc.getMetadata().getName())).thenReturn(Future.succeededFuture(null)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDcOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); ArgumentCaptor npCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), npCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), npCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -167,13 +167,13 @@ public void createKafkaConnectCluster(VertxTestContext context, KafkaConnect kc, .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kc.getMetadata().getNamespace(), kc.getMetadata().getName())) @@ -272,44 +272,44 @@ public void testCreateOrUpdateDoesNotUpdateWithNoDiff(VertxTestContext context) String kcNamespace = "test"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(eq(kcNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kcNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kcNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kcNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kcNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kcNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kcNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kcNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -319,8 +319,8 @@ public void testCreateOrUpdateDoesNotUpdateWithNoDiff(VertxTestContext context) .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -371,45 +371,45 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { String kcNamespace = "test"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); kc.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(eq(kcNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kcNamespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kcNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kcNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kcNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kcNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kcNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kcNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock CM get when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); @@ -435,7 +435,7 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(kcNamespace), anyString(), any()); + }).when(mockCmOps).reconcile(any(), eq(kcNamespace), anyString(), any()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -445,8 +445,8 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -454,7 +454,7 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName), kc) .onComplete(context.succeeding(v -> context.verify(() -> { - KafkaConnectCluster compareTo = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster compareTo = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Verify service List capturedServices = serviceCaptor.getAllValues(); @@ -484,7 +484,7 @@ public void testCreateOrUpdateUpdatesCluster(VertxTestContext context) { assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1)); // No metrics config => no CMs created - verify(mockCmOps, never()).createOrUpdate(any()); + verify(mockCmOps, never()).createOrUpdate(any(), any()); async.flag(); }))); } @@ -505,43 +505,43 @@ public void testCreateOrUpdateFailsWhenDeploymentUpdateFails(VertxTestContext co String kcNamespace = "test"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); kc.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockConnectOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -572,36 +572,36 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { String kcNamespace = "test"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); kc.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleUp when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kcNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kcNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo)); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockConnectOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -611,8 +611,8 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -620,7 +620,7 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName), kc) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kcNamespace, connect.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo)); async.flag(); }))); } @@ -647,36 +647,36 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { String kcNamespace = "test"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); kc.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleDown when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kcNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kcNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo)); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockConnectOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -686,8 +686,8 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -695,7 +695,7 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kcNamespace, kcName), kc) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kcNamespace, connect.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kcNamespace), eq(connect.getName()), eq(scaleTo)); async.flag(); }))); @@ -725,19 +725,19 @@ public void testReconcile(VertxTestContext context) { // providing the list of ALL Deployments for all the Kafka Connect clusters Labels newLabels = Labels.forStrimziKind(KafkaConnect.RESOURCE_KIND); when(mockDcOps.list(eq(kcNamespace), eq(newLabels))).thenReturn( - asList(KafkaConnectCluster.fromCrd(bar, VERSIONS).generateDeployment(new HashMap(), true, null, null))); + asList(KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null))); // providing the list Deployments for already "existing" Kafka Connect clusters Labels barLabels = Labels.forStrimziCluster("bar"); when(mockDcOps.list(eq(kcNamespace), eq(barLabels))).thenReturn( - asList(KafkaConnectCluster.fromCrd(bar, VERSIONS).generateDeployment(new HashMap(), true, null, null)) + asList(KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null)) ); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kcNamespace), eq(KafkaConnectResources.deploymentName(bar.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockSecretOps.reconcile(eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kcNamespace), eq(KafkaConnectResources.deploymentName(bar.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockSecretOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), eq(kcNamespace), any(), any())).thenReturn(Future.succeededFuture()); Set createdOrUpdated = new CopyOnWriteArraySet<>(); @@ -783,28 +783,28 @@ public void testUpdateClusterWithFailedScaleDownSetsStatusNotReady(VertxTestCont String kcNamespace = "test"; String failureMsg = "failure"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); - when(mockServiceOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())) + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())) .thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -851,31 +851,31 @@ public void assertCreateClusterWithDuplicateOlderConnect(VertxTestContext contex when(mockConnectS2IOps.getAsync(kc.getMetadata().getNamespace(), kc.getMetadata().getName())).thenReturn(Future.succeededFuture(conflictingConnectS2I)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDcOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); when(mockSecretOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -885,13 +885,13 @@ public void assertCreateClusterWithDuplicateOlderConnect(VertxTestContext contex .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, kc.getMetadata().getNamespace(), kc.getMetadata().getName())) @@ -988,7 +988,7 @@ public void testCreateClusterWitDuplicateNeverConnectHasNotReadyStatus(VertxTest KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -1036,26 +1036,26 @@ public void testCreateOrUpdateFailsWhenClusterRoleBindingRightsAreMissingButRequ KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); kc.getSpec().setRack(new RackBuilder().withNewTopologyKey("some-node-label").build()); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockCrbOps.reconcile(any(), any())).thenReturn(Future.failedFuture("Message: Forbidden!")); - when(mockServiceOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(any(), any(), anyInt())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleDown(any(), any(), anyInt())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockConnectOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockCrbOps.reconcile(any(), any(), any())).thenReturn(Future.failedFuture("Message: Forbidden!")); + when(mockServiceOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), any(), any(), anyInt())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), any(), any(), anyInt())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -1088,29 +1088,29 @@ public void testCreateOrUpdatePassesWhenClusterRoleBindingRightsAreMissingAndNot String kcNamespace = "test"; KafkaConnect kc = ResourceUtils.createEmptyKafkaConnect(kcNamespace, kcName); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); when(mockConnectOps.get(kcNamespace, kcName)).thenReturn(kc); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kc)); - when(mockConnectOps.updateStatusAsync(any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), any(KafkaConnect.class))).thenReturn(Future.succeededFuture()); when(mockConnectS2IOps.getAsync(kcNamespace, kcName)).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.getAsync(kcNamespace, connect.getName())).thenReturn(Future.succeededFuture(connect.generateDeployment(new HashMap(), true, null, null))); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - - when(mockCrbOps.reconcile(any(), any())).thenReturn(Future.failedFuture("Message: Forbidden!")); - when(mockServiceOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(any(), any(), anyInt())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleDown(any(), any(), anyInt())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPodOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockBcOps.reconcile(eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); - when(mockConnectOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + + when(mockCrbOps.reconcile(any(), any(), any())).thenReturn(Future.failedFuture("Message: Forbidden!")); + when(mockServiceOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), any(), any(), anyInt())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), any(), any(), anyInt())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.deploymentName(kc.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockPodOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildPodName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(kc.getMetadata().getNamespace()), eq(KafkaConnectResources.buildConfigName(kc.getMetadata().getName())), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnect()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -1131,7 +1131,7 @@ public void testDeleteClusterRoleBindings(VertxTestContext context) { ClusterRoleBindingOperator mockCrbOps = supplier.clusterRoleBindingOperator; ArgumentCaptor desiredCrb = ArgumentCaptor.forClass(ClusterRoleBinding.class); - when(mockCrbOps.reconcile(eq(KafkaConnectResources.initContainerClusterRoleBindingName(kcName, kcNamespace)), desiredCrb.capture())).thenReturn(Future.succeededFuture()); + when(mockCrbOps.reconcile(any(), eq(KafkaConnectResources.initContainerClusterRoleBindingName(kcName, kcNamespace)), desiredCrb.capture())).thenReturn(Future.succeededFuture()); CrdOperator mockCntrOps = supplier.kafkaConnectorOperator; when(mockCntrOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(emptyList())); @@ -1145,7 +1145,7 @@ public void testDeleteClusterRoleBindings(VertxTestContext context) { op.delete(reconciliation) .onComplete(context.succeeding(c -> context.verify(() -> { assertThat(desiredCrb.getValue(), is(nullValue())); - Mockito.verify(mockCrbOps, times(1)).reconcile(any(), any()); + Mockito.verify(mockCrbOps, times(1)).reconcile(any(), any(), any()); async.flag(); }))); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java index 91c8dc5109..7c0427640f 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorKubeTest.java @@ -118,7 +118,7 @@ public void testBuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -147,30 +147,30 @@ public void testBuildOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture SA ops ArgumentCaptor saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); - when(mockSaOps.reconcile(anyString(), anyString(), saCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ServiceAccount()))); + when(mockSaOps.reconcile(any(), anyString(), anyString(), saCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ServiceAccount()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod terminatedPod = new PodBuilder() .withNewMetadata() @@ -183,21 +183,21 @@ public void testBuildOnKube(VertxTestContext context) { .withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build@sha256:blablabla").endTerminated().endState().build()) .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(null), Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -206,7 +206,7 @@ public void testBuildOnKube(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -275,7 +275,7 @@ public void testBuildFailureOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -303,26 +303,26 @@ public void testBuildFailureOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod terminatedPod = new PodBuilder() .withNewMetadata() @@ -335,21 +335,21 @@ public void testBuildFailureOnKube(VertxTestContext context) { .withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(1).endTerminated().endState().build()) .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(null), Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -401,8 +401,8 @@ public void testUpdateWithRebuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -412,7 +412,7 @@ public void testUpdateWithRebuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -440,31 +440,31 @@ public void testUpdateWithRebuildOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod terminatedPod = new PodBuilder() .withNewMetadata() @@ -477,21 +477,21 @@ public void testUpdateWithRebuildOnKube(VertxTestContext context) { .withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build@sha256:blablabla").endTerminated().endState().build()) .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(null), Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -500,7 +500,7 @@ public void testUpdateWithRebuildOnKube(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -567,8 +567,8 @@ public void testContinueWithPreviousBuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -578,7 +578,7 @@ public void testContinueWithPreviousBuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -606,31 +606,31 @@ public void testContinueWithPreviousBuildOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, "oldhashstub"); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod runningBuild = new PodBuilder() .withNewMetadata() @@ -654,21 +654,21 @@ public void testContinueWithPreviousBuildOnKube(VertxTestContext context) { .withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build@sha256:blablabla").endTerminated().endState().build()) .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(runningBuild), Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -677,7 +677,7 @@ public void testContinueWithPreviousBuildOnKube(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -741,8 +741,8 @@ public void testRestartPreviousBuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -752,7 +752,7 @@ public void testRestartPreviousBuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -780,31 +780,31 @@ public void testRestartPreviousBuildOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, "oldhashstub"); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod runningBuild = new PodBuilder() .withNewMetadata() @@ -829,21 +829,21 @@ public void testRestartPreviousBuildOnKube(VertxTestContext context) { .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(runningBuild), Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -852,7 +852,7 @@ public void testRestartPreviousBuildOnKube(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -919,8 +919,8 @@ public void testRestartPreviousBuildDueToFailureOnKube(VertxTestContext context) .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -930,7 +930,7 @@ public void testRestartPreviousBuildDueToFailureOnKube(VertxTestContext context) .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -958,31 +958,31 @@ public void testRestartPreviousBuildDueToFailureOnKube(VertxTestContext context) // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, "oldhashstub"); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod runningBuild = new PodBuilder() .withNewMetadata() @@ -1009,21 +1009,21 @@ public void testRestartPreviousBuildDueToFailureOnKube(VertxTestContext context) .withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build@sha256:blablabla").endTerminated().endState().build()) .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(runningBuild), Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -1032,7 +1032,7 @@ public void testRestartPreviousBuildDueToFailureOnKube(VertxTestContext context) KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -1093,8 +1093,8 @@ public void testUpdateWithoutRebuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -1122,44 +1122,44 @@ public void testUpdateWithoutRebuildOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = connect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, build.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:blablabla"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -1223,8 +1223,8 @@ public void testUpdateWithForcedRebuildOnKube(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -1252,11 +1252,11 @@ public void testUpdateWithForcedRebuildOnKube(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = connect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, build.generateDockerfile().hashStub()); @@ -1264,20 +1264,20 @@ public void testUpdateWithForcedRebuildOnKube(VertxTestContext context) { dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:blablabla"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor dockerfileCaptor = ArgumentCaptor.forClass(ConfigMap.class); - when(mockCmOps.reconcile(anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), eq(KafkaConnectResources.dockerFileConfigMapName(NAME)), dockerfileCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops ArgumentCaptor builderPodCaptor = ArgumentCaptor.forClass(Pod.class); - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), builderPodCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Pod terminatedPod = new PodBuilder() .withNewMetadata() @@ -1290,21 +1290,21 @@ public void testUpdateWithForcedRebuildOnKube(VertxTestContext context) { .withContainerStatuses(new ContainerStatusBuilder().withNewState().withNewTerminated().withExitCode(0).withMessage("my-connect-build@sha256:rebuiltblablabla").endTerminated().endState().build()) .endStatus() .build(); - when(mockPodOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); + when(mockPodOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture((Void) null)); when(mockPodOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)))).thenReturn(Future.succeededFuture(terminatedPod)); // Mock and capture BuildConfig ops - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java index 9ead5ba274..f2ffa3708c 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectBuildAssemblyOperatorOpenShiftTest.java @@ -117,7 +117,7 @@ public void testBuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -146,27 +146,27 @@ public void testBuildOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); when(mockBcOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)))).thenReturn(Future.succeededFuture(null)); Build builder = new BuildBuilder() @@ -191,18 +191,18 @@ public void testBuildOnOpenShift(VertxTestContext context) { when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(builder)); // Mock and capture Build ops - when(mockBuildOps.waitFor(eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -211,7 +211,7 @@ public void testBuildOnOpenShift(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -266,7 +266,7 @@ public void testBuildFailureOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -295,27 +295,27 @@ public void testBuildFailureOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); Build builder = new BuildBuilder() .withNewMetadata() @@ -333,18 +333,18 @@ public void testBuildFailureOnOpenShift(VertxTestContext context) { when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(builder)); // Mock and capture Build ops - when(mockBuildOps.waitFor(eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -391,8 +391,8 @@ public void testUpdateWithRebuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -402,7 +402,7 @@ public void testUpdateWithRebuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -431,32 +431,32 @@ public void testUpdateWithRebuildOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); when(mockBcOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)))).thenReturn(Future.succeededFuture(null)); Build builder = new BuildBuilder() @@ -481,18 +481,18 @@ public void testUpdateWithRebuildOnOpenShift(VertxTestContext context) { when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(builder)); // Mock and capture Build ops - when(mockBuildOps.waitFor(eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -501,7 +501,7 @@ public void testUpdateWithRebuildOnOpenShift(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -556,8 +556,8 @@ public void testUpdateWithoutRebuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -586,42 +586,42 @@ public void testUpdateWithoutRebuildOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = connect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, build.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:blablabla"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -681,8 +681,8 @@ public void testUpdateWithForcedRebuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -711,11 +711,11 @@ public void testUpdateWithForcedRebuildOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = connect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, build.generateDockerfile().hashStub()); @@ -723,21 +723,21 @@ public void testUpdateWithForcedRebuildOnOpenShift(VertxTestContext context) { dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:blablabla"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); when(mockBcOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)))).thenReturn(Future.succeededFuture(null)); Build builder = new BuildBuilder() @@ -760,18 +760,18 @@ public void testUpdateWithForcedRebuildOnOpenShift(VertxTestContext context) { when(mockBcOps.startBuild(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildRequestCaptor.capture())).thenReturn(Future.succeededFuture(builder)); // Mock and capture Build ops - when(mockBuildOps.waitFor(eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq("build-1"), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq("build-1"))).thenReturn(Future.succeededFuture(builder)); // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -839,8 +839,8 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -850,7 +850,7 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -879,28 +879,28 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture Build ops Build oldBuilder = new BuildBuilder() @@ -936,12 +936,12 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { .build(); when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)))).thenReturn(Future.succeededFuture(oldBuilder)); - when(mockBuildOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)))).thenReturn(Future.succeededFuture(newBuilder)); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); BuildConfig oldBuildConfig = new BuildConfigBuilder(oldBuild.generateBuildConfig(oldBuild.generateDockerfile())) .withNewStatus() @@ -956,14 +956,14 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -972,7 +972,7 @@ public void testContinueWithPreviousBuildOnOpenShift(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -1031,8 +1031,8 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -1042,7 +1042,7 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -1071,28 +1071,28 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture Build ops Build oldBuilder = new BuildBuilder() @@ -1128,12 +1128,12 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { .build(); when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)))).thenReturn(Future.succeededFuture(oldBuilder)); - when(mockBuildOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)))).thenReturn(Future.succeededFuture(newBuilder)); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); BuildConfig oldBuildConfig = new BuildConfigBuilder(oldBuild.generateBuildConfig(oldBuild.generateDockerfile())) .withNewStatus() @@ -1148,14 +1148,14 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -1164,7 +1164,7 @@ public void testRestartPreviousBuildOnOpenShift(VertxTestContext context) { KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) @@ -1225,8 +1225,8 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con .endSpec() .build(); - KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(oldKc, VERSIONS); - KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(oldKc, VERSIONS); + KafkaConnectCluster oldConnect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); + KafkaConnectBuild oldBuild = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKc, VERSIONS); KafkaConnect kc = new KafkaConnectBuilder(oldKc) .editSpec() @@ -1236,7 +1236,7 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con .endSpec() .build(); - KafkaConnectBuild build = KafkaConnectBuild.fromCrd(kc, VERSIONS); + KafkaConnectBuild build = KafkaConnectBuild.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); // Prepare and get mocks ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true); @@ -1265,28 +1265,28 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con // Mock and capture service ops ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock and capture deployment ops ArgumentCaptor depCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDepOps.reconcile(anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDepOps.reconcile(any(), anyString(), anyString(), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)))).thenAnswer(inv -> { Deployment dep = oldConnect.generateDeployment(emptyMap(), false, null, null); dep.getSpec().getTemplate().getMetadata().getAnnotations().put(Annotations.STRIMZI_IO_CONNECT_BUILD_REVISION, oldBuild.generateDockerfile().hashStub()); dep.getSpec().getTemplate().getSpec().getContainers().get(0).setImage("my-connect-build@sha256:olddigest"); return Future.succeededFuture(dep); }); - when(mockDepOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDepOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDepOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDepOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDepOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDepOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); // Mock and capture CM ops - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock and capture Pod ops - when(mockPodOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockPodOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildPodName(NAME)), eq(null))).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); // Mock and capture Build ops Build oldBuilder = new BuildBuilder() @@ -1322,12 +1322,12 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con .build(); when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 1L)))).thenReturn(Future.succeededFuture(oldBuilder)); - when(mockBuildOps.waitFor(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(mockBuildOps.waitFor(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); when(mockBuildOps.getAsync(eq(NAMESPACE), eq(KafkaConnectResources.buildName(NAME, 2L)))).thenReturn(Future.succeededFuture(newBuilder)); // Mock and capture BuildConfig ops ArgumentCaptor buildConfigCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); + when(mockBcOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.buildConfigName(NAME)), buildConfigCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.noop(null))); BuildConfig oldBuildConfig = new BuildConfigBuilder(oldBuild.generateBuildConfig(oldBuild.generateDockerfile())) .withNewStatus() @@ -1342,14 +1342,14 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con // Mock and capture NP ops - when(mockNetPolOps.reconcile(eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(NAMESPACE), eq(KafkaConnectResources.deploymentName(NAME)), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); // Mock and capture PDB ops - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); // Mock and capture KafkaConnect ops for status update ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnect.class); - when(mockConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock KafkaConnect API client KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); @@ -1358,7 +1358,7 @@ public void testRestartPreviousBuildDueToFailureOnOpenShift(VertxTestContext con KafkaConnectAssemblyOperator ops = new KafkaConnectAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(kc, VERSIONS); + KafkaConnectCluster connect = KafkaConnectCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kc, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnect.RESOURCE_KIND, NAMESPACE, NAME)) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperatorTest.java index 30702be935..0dff078f45 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectS2IAssemblyOperatorTest.java @@ -92,7 +92,7 @@ public class KafkaConnectS2IAssemblyOperatorTest { private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); protected static Vertx vertx; private static final String METRICS_CONFIG = "{\"foo\":\"bar\"}"; - private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties("kafkaConnectDefaultLoggingProperties") + private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties(Reconciliation.DUMMY_RECONCILIATION, "kafkaConnectDefaultLoggingProperties") .asPairsWithComment("Do not change this generated file. Logging can be configured in the corresponding Kubernetes resource."); private final KubernetesVersion kubernetesVersion = KubernetesVersion.V1_16; @@ -128,34 +128,34 @@ public void createCluster(VertxTestContext context, KafkaConnectS2I kcs2i, boole when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(null)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); when(mockSecretOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(DeploymentConfig.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor isCaptor = ArgumentCaptor.forClass(ImageStream.class); - when(mockIsOps.reconcile(anyString(), anyString(), isCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockIsOps.reconcile(any(), anyString(), anyString(), isCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor bcCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(anyString(), anyString(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), anyString(), anyString(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor npCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), npCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), npCaptor.capture())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -165,17 +165,17 @@ public void createCluster(VertxTestContext context, KafkaConnectS2I kcs2i, boole .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnectS2I.class); - when(mockConnectS2IOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnectS2I.RESOURCE_KIND, kcs2i.getMetadata().getNamespace(), kcs2i.getMetadata().getName())) @@ -285,60 +285,60 @@ public void testCreateOrUpdateDoesNotUpdateWithNoDiff(VertxTestContext context) String kcs2iNamespace = "test"; KafkaConnectS2I kcs2i = ResourceUtils.createEmptyKafkaConnectS2I(kcs2iNamespace, kcs2iName); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectS2IOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kcs2i)); - when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateDeploymentConfig(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockIsOps.get(kcs2iNamespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()))).thenReturn(connect.generateSourceImageStream()); when(mockIsOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateTargetImageStream()); when(mockBcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateBuildConfig()); when(mockPdbOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generatePodDisruptionBudget()); - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(DeploymentConfig.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor isNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor isNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor isCaptor = ArgumentCaptor.forClass(ImageStream.class); - when(mockIsOps.reconcile(isNamespaceCaptor.capture(), isNameCaptor.capture(), isCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockIsOps.reconcile(any(), isNamespaceCaptor.capture(), isNameCaptor.capture(), isCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor bcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor bcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor bcCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(bcNamespaceCaptor.capture(), bcNameCaptor.capture(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), bcNamespaceCaptor.capture(), bcNameCaptor.capture(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -348,8 +348,8 @@ public void testCreateOrUpdateDoesNotUpdateWithNoDiff(VertxTestContext context) .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, @@ -406,62 +406,62 @@ public void testUpdateCluster(VertxTestContext context) { String kcs2iNamespace = "test"; KafkaConnectS2I kcs2i = ResourceUtils.createEmptyKafkaConnectS2I(kcs2iNamespace, kcs2iName); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); kcs2i.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectS2IOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kcs2i)); - when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(null)); when(mockServiceOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateDeploymentConfig(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockIsOps.get(kcs2iNamespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()))).thenReturn(connect.generateSourceImageStream()); when(mockIsOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateTargetImageStream()); when(mockBcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateBuildConfig()); when(mockPdbOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generatePodDisruptionBudget()); - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(DeploymentConfig.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor isNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor isNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor isCaptor = ArgumentCaptor.forClass(ImageStream.class); - when(mockIsOps.reconcile(isNamespaceCaptor.capture(), isNameCaptor.capture(), isCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockIsOps.reconcile(any(), isNamespaceCaptor.capture(), isNameCaptor.capture(), isCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor bcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor bcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor bcCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(bcNamespaceCaptor.capture(), bcNameCaptor.capture(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), bcNamespaceCaptor.capture(), bcNameCaptor.capture(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); ArgumentCaptor pdbNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock CM get when(mockConnectS2IOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); @@ -478,7 +478,7 @@ public void testUpdateCluster(VertxTestContext context) { doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(kcs2iNamespace), anyString(), any()); + }).when(mockCmOps).reconcile(any(), eq(kcs2iNamespace), anyString(), any()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -488,8 +488,8 @@ public void testUpdateCluster(VertxTestContext context) { .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, @@ -498,7 +498,7 @@ public void testUpdateCluster(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnectS2I.RESOURCE_KIND, kcs2iNamespace, kcs2iName), kcs2i) .onComplete(context.succeeding(v -> context.verify(() -> { - KafkaConnectS2ICluster compareTo = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster compareTo = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); // Verify service List capturedServices = serviceCaptor.getAllValues(); @@ -566,58 +566,58 @@ public void testCreateOrUpdateFailsWhenDeploymentUpdateFails(VertxTestContext co String kcs2iNamespace = "test"; KafkaConnectS2I kcs2i = ResourceUtils.createEmptyKafkaConnectS2I(kcs2iNamespace, kcs2iName); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); kcs2i.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockConnectS2IOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); when(mockServiceOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateDeploymentConfig(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockIsOps.get(kcs2iNamespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()))).thenReturn(connect.generateSourceImageStream()); when(mockIsOps.get(kcs2iNamespace, KafkaConnectS2IResources.targetImageStreamName(connect.getCluster()))).thenReturn(connect.generateTargetImageStream()); when(mockBcOps.get(kcs2iNamespace, KafkaConnectS2IResources.buildConfigName(connect.getCluster()))).thenReturn(connect.generateBuildConfig()); when(mockPdbOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generatePodDisruptionBudget()); when(mockConnectOps.getAsync(kcs2iNamespace, kcs2iName)).thenReturn(Future.succeededFuture(null)); - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(DeploymentConfig.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor isNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor isNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor isCaptor = ArgumentCaptor.forClass(ImageStream.class); - when(mockIsOps.reconcile(isNamespaceCaptor.capture(), isNameCaptor.capture(), isCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockIsOps.reconcile(any(), isNamespaceCaptor.capture(), isNameCaptor.capture(), isCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor bcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor bcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor bcCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(bcNamespaceCaptor.capture(), bcNameCaptor.capture(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), bcNamespaceCaptor.capture(), bcNameCaptor.capture(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kcs2i)); - when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectS2IOps.updateStatusAsync(any(), any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, @@ -649,40 +649,40 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { String kcs2iNamespace = "test"; KafkaConnectS2I kcs2i = ResourceUtils.createEmptyKafkaConnectS2I(kcs2iNamespace, kcs2iName); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); kcs2i.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleUp when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectS2IOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kcs2i)); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(null)); - when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateDeploymentConfig(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockIsOps.get(kcs2iNamespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()))).thenReturn(connect.generateSourceImageStream()); when(mockIsOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateTargetImageStream()); when(mockBcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateBuildConfig()); when(mockPdbOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generatePodDisruptionBudget()); - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kcs2iNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kcs2iNamespace), eq(connect.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kcs2iNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kcs2iNamespace), eq(connect.getName()), eq(scaleTo)); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockIsOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockBcOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockIsOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -692,8 +692,8 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, @@ -702,7 +702,7 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnectS2I.RESOURCE_KIND, kcs2iNamespace, kcs2iName), kcs2i) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kcs2iNamespace, connect.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kcs2iNamespace), eq(connect.getName()), eq(scaleTo)); async.flag(); }))); } @@ -728,41 +728,41 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { String kcs2iNamespace = "test"; KafkaConnectS2I kcs2i = ResourceUtils.createEmptyKafkaConnectS2I(kcs2iNamespace, kcs2iName); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); kcs2i.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleDown when(mockConnectorOps.listAsync(anyString(), any(Optional.class))).thenReturn(Future.succeededFuture(emptyList())); when(mockConnectS2IOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kcs2i)); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(null)); - when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateService()); when(mockDcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateDeploymentConfig(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockIsOps.get(kcs2iNamespace, KafkaConnectS2IResources.sourceImageStreamName(connect.getCluster()))).thenReturn(connect.generateSourceImageStream()); when(mockIsOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateTargetImageStream()); when(mockBcOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generateBuildConfig()); when(mockPdbOps.get(kcs2iNamespace, connect.getName())).thenReturn(connect.generatePodDisruptionBudget()); - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kcs2iNamespace, connect.getName(), 2); + .when(mockDcOps).scaleUp(any(), eq(kcs2iNamespace), eq(connect.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kcs2iNamespace, connect.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kcs2iNamespace), eq(connect.getName()), eq(scaleTo)); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockIsOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockBcOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockIsOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -772,8 +772,8 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, @@ -783,7 +783,7 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { ops.createOrUpdate(new Reconciliation("test-trigger", KafkaConnectS2I.RESOURCE_KIND, kcs2iNamespace, kcs2iName), kcs2i) .onComplete(context.succeeding(v -> context.verify(() -> { // Verify ScaleDown - verify(mockDcOps).scaleDown(kcs2iNamespace, connect.getName(), scaleTo); + verify(mockDcOps).scaleDown(any(), eq(kcs2iNamespace), eq(connect.getName()), eq(scaleTo)); async.flag(); }))); } @@ -804,22 +804,22 @@ public void testReconcile(VertxTestContext context) { when(mockConnectS2IOps.get(eq(kcs2iNamespace), eq("foo"))).thenReturn(foo); when(mockConnectS2IOps.get(eq(kcs2iNamespace), eq("bar"))).thenReturn(bar); when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(bar)); - when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture()); // providing the list of ALL DeploymentConfigs for all the Kafka Connect S2I clusters Labels newLabels = Labels.forStrimziKind(KafkaConnectS2I.RESOURCE_KIND); when(mockDcOps.list(eq(kcs2iNamespace), eq(newLabels))).thenReturn( - asList(KafkaConnectS2ICluster.fromCrd(bar, VERSIONS).generateDeploymentConfig(new HashMap(), true, null, null))); + asList(KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeploymentConfig(new HashMap(), true, null, null))); // providing the list DeploymentConfigs for already "existing" Kafka Connect S2I clusters Labels barLabels = Labels.forStrimziCluster("bar"); when(mockDcOps.list(eq(kcs2iNamespace), eq(barLabels))).thenReturn( - asList(KafkaConnectS2ICluster.fromCrd(bar, VERSIONS).generateDeploymentConfig(new HashMap(), true, null, null)) + asList(KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeploymentConfig(new HashMap(), true, null, null)) ); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); Set createdOrUpdated = new CopyOnWriteArraySet<>(); @@ -868,25 +868,25 @@ public void testCreateClusterStatusNotReady(VertxTestContext context) { when(mockS2IConnectOps.get(kcs2iNamespace, kcs2iName)).thenReturn(kcs2i); when(mockS2IConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kcs2i)); when(mockConnectOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(null)); - when(mockServiceOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMessage)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockIsOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockBcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockS2IConnectOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - - when(mockSecretOps.reconcile(eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMessage)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockIsOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), any(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockS2IConnectOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + + when(mockSecretOps.reconcile(any(), eq(kcs2iNamespace), any(), any())).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnectS2I.class); - when(mockS2IConnectOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockS2IConnectOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -931,29 +931,29 @@ public void createClusterWithDuplicateOlderConnect(VertxTestContext context, Kaf when(mockConnectOps.getAsync(kcs2i.getMetadata().getNamespace(), kcs2i.getMetadata().getName())).thenReturn(Future.succeededFuture(conflictingConnect)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(DeploymentConfig.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor isCaptor = ArgumentCaptor.forClass(ImageStream.class); - when(mockIsOps.reconcile(anyString(), anyString(), isCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockIsOps.reconcile(any(), anyString(), anyString(), isCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor bcCaptor = ArgumentCaptor.forClass(BuildConfig.class); - when(mockBcOps.reconcile(anyString(), anyString(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockBcOps.reconcile(any(), anyString(), anyString(), bcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockNetPolOps.reconcile(eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockConnectS2IOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), pdbNamespaceCaptor.capture(), pdbNameCaptor.capture(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockNetPolOps.reconcile(any(), eq(kcs2i.getMetadata().getNamespace()), eq(KafkaConnectS2IResources.deploymentName(kcs2i.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockConnectS2IOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaConnectS2I()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); @@ -963,17 +963,17 @@ public void createClusterWithDuplicateOlderConnect(VertxTestContext context, Kaf .withType("sink") .withVersion("1.0.0") .build(); - when(mockConnectClient.listConnectorPlugins(anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.listConnectorPlugins(any(), anyString(), anyInt())).thenReturn(Future.succeededFuture(singletonList(plugin1))); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnectS2I.class); - when(mockConnectS2IOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(kcs2i, VERSIONS); + KafkaConnectS2ICluster connect = KafkaConnectS2ICluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kcs2i, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaConnectS2I.RESOURCE_KIND, kcs2i.getMetadata().getNamespace(), kcs2i.getMetadata().getName())) @@ -1081,7 +1081,7 @@ public void testCreateClusterWithSameNameAsConnectFails(VertxTestContext context PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion); ArgumentCaptor connectCaptor = ArgumentCaptor.forClass(KafkaConnectS2I.class); - when(mockConnectS2IOps.updateStatusAsync(connectCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockConnectS2IOps.updateStatusAsync(any(), connectCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa, supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java index ab0e3ac7c3..e174720117 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectorIT.java @@ -124,12 +124,12 @@ public void test(VertxTestContext context) { // Intercept status updates at CrdOperator level // This is to bridge limitations between MockKube and the CrdOperator, as there are currently no Fabric8 APIs for status update CrdOperator connectCrdOperator = mock(CrdOperator.class); - when(connectCrdOperator.updateStatusAsync(any())).thenAnswer(invocation -> { + when(connectCrdOperator.updateStatusAsync(any(), any())).thenAnswer(invocation -> { try { return Future.succeededFuture(Crds.kafkaConnectorOperation(client) .inNamespace(namespace) .withName(connectorName) - .patch(invocation.getArgument(0))); + .patch(invocation.getArgument(1))); } catch (Exception e) { return Future.failedFuture(e); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java index 1c118ea508..ecb952c662 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorMockTest.java @@ -163,7 +163,7 @@ public void testReconcileUpdate(VertxTestContext context) { .build()); KafkaConnectApi mock = mock(KafkaConnectApi.class); when(mock.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mock.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mock.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); createMirrorMaker2Cluster(context, mock, false) @@ -190,7 +190,7 @@ public void testPauseReconcile(VertxTestContext context) { .build()); KafkaConnectApi mock = mock(KafkaConnectApi.class); when(mock.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mock.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mock.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); createMirrorMaker2Cluster(context, mock, true) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorTest.java index 70ee100600..da3cd33172 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorTest.java @@ -84,7 +84,7 @@ public class KafkaMirrorMaker2AssemblyOperatorTest { private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); protected static Vertx vertx; private static final String METRICS_CONFIG = "{\"foo\":\"bar\"}"; - private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties("kafkaMirrorMaker2DefaultLoggingProperties") + private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties(Reconciliation.DUMMY_RECONCILIATION, "kafkaMirrorMaker2DefaultLoggingProperties") .asPairsWithComment("Do not change this generated file. Logging can be configured in the corresponding Kubernetes resource."); private final KubernetesVersion kubernetesVersion = KubernetesVersion.V1_16; @@ -118,32 +118,32 @@ public void testCreateCluster(VertxTestContext context) { when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor mirrorMaker2Captor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); - when(mockMirrorMaker2Ops.updateStatusAsync(mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, kmm2Namespace, kmm2Name)) @@ -203,41 +203,41 @@ public void testUpdateClusterNoDiff(VertxTestContext context) { String kmm2Namespace = "test"; KafkaMirrorMaker2 kmm2 = ResourceUtils.createEmptyKafkaMirrorMaker2(kmm2Namespace, kmm2Name); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); - when(mockMirrorMaker2Ops.updateStatusAsync(any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateService()); when(mockDcOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(eq(kmm2Namespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kmm2Namespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kmm2Namespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmm2Namespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kmm2Namespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kmm2Namespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kmm2Namespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kmm2Namespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -284,40 +284,40 @@ public void testUpdateCluster(VertxTestContext context) { String kmm2Namespace = "test"; KafkaMirrorMaker2 kmm2 = ResourceUtils.createEmptyKafkaMirrorMaker2(kmm2Namespace, kmm2Name); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); kmm2.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); - when(mockMirrorMaker2Ops.updateStatusAsync(any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateService()); when(mockDcOps.get(kmm2Namespace, mirrorMaker2.getName())) .thenReturn(mirrorMaker2.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(eq(kmm2Namespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kmm2Namespace), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kmm2Namespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmm2Namespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kmm2Namespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kmm2Namespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kmm2Namespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kmm2Namespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())) + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())) .thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock CM get when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); @@ -343,11 +343,11 @@ public void testUpdateCluster(VertxTestContext context) { doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(kmm2Namespace), anyString(), any()); + }).when(mockCmOps).reconcile(any(), eq(kmm2Namespace), anyString(), any()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -355,7 +355,7 @@ public void testUpdateCluster(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, kmm2Namespace, kmm2Name), kmm2) .onComplete(context.succeeding(v -> context.verify(() -> { - KafkaMirrorMaker2Cluster compareTo = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster compareTo = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); // Verify service List capturedServices = serviceCaptor.getAllValues(); @@ -385,7 +385,7 @@ public void testUpdateCluster(VertxTestContext context) { assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1)); // No metrics config => no CMs created - verify(mockCmOps, never()).createOrUpdate(any()); + verify(mockCmOps, never()).createOrUpdate(any(), any()); async.flag(); }))); } @@ -405,42 +405,42 @@ public void testUpdateClusterWithFailingDeploymentFailure(VertxTestContext conte String kmm2Namespace = "test"; KafkaMirrorMaker2 kmm2 = ResourceUtils.createEmptyKafkaMirrorMaker2(kmm2Namespace, kmm2Name); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); kmm2.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); - when(mockMirrorMaker2Ops.updateStatusAsync(any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateService()); when(mockDcOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor serviceNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), serviceNamespaceCaptor.capture(), serviceNameCaptor.capture(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockMirrorMaker2Ops.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker2()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockMirrorMaker2Ops.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker2()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -467,36 +467,36 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { String kmm2Namespace = "test"; KafkaMirrorMaker2 kmm2 = ResourceUtils.createEmptyKafkaMirrorMaker2(kmm2Namespace, kmm2Name); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); kmm2.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleUp when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); - when(mockMirrorMaker2Ops.updateStatusAsync(any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateService()); when(mockDcOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kmm2Namespace, mirrorMaker2.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kmm2Namespace), eq(mirrorMaker2.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kmm2Namespace, mirrorMaker2.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kmm2Namespace), eq(mirrorMaker2.getName()), eq(scaleTo)); - when(mockMirrorMaker2Ops.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker2()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockMirrorMaker2Ops.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker2()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -504,7 +504,7 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, kmm2Namespace, kmm2Name), kmm2) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kmm2Namespace, mirrorMaker2.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kmm2Namespace), eq(mirrorMaker2.getName()), eq(scaleTo)); async.flag(); }))); } @@ -526,35 +526,35 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { String kmm2Namespace = "test"; KafkaMirrorMaker2 kmm2 = ResourceUtils.createEmptyKafkaMirrorMaker2(kmm2Namespace, kmm2Name); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); kmm2.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleDown when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); - when(mockMirrorMaker2Ops.updateStatusAsync(any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), any(KafkaMirrorMaker2.class))).thenReturn(Future.succeededFuture()); when(mockServiceOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateService()); when(mockDcOps.get(kmm2Namespace, mirrorMaker2.getName())).thenReturn(mirrorMaker2.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockServiceOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kmm2Namespace, mirrorMaker2.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kmm2Namespace), eq(mirrorMaker2.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kmm2Namespace, mirrorMaker2.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kmm2Namespace), eq(mirrorMaker2.getName()), eq(scaleTo)); - when(mockMirrorMaker2Ops.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker2()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockMirrorMaker2Ops.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker2()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); @@ -562,7 +562,7 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, kmm2Namespace, kmm2Name), kmm2) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kmm2Namespace, mirrorMaker2.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kmm2Namespace), eq(mirrorMaker2.getName()), eq(scaleTo)); async.flag(); }))); } @@ -587,18 +587,18 @@ public void testReconcile(VertxTestContext context) { // providing the list of ALL Deployments for all the Kafka MirrorMaker 2.0 clusters Labels newLabels = Labels.forStrimziKind(KafkaMirrorMaker2.RESOURCE_KIND); when(mockDcOps.list(eq(kmm2Namespace), eq(newLabels))).thenReturn( - asList(KafkaMirrorMaker2Cluster.fromCrd(bar, VERSIONS).generateDeployment(new HashMap(), true, null, null))); + asList(KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null))); // providing the list Deployments for already "existing" Kafka MirrorMaker 2.0 clusters Labels barLabels = Labels.forStrimziCluster("bar"); when(mockDcOps.list(eq(kmm2Namespace), eq(barLabels))).thenReturn( - asList(KafkaMirrorMaker2Cluster.fromCrd(bar, VERSIONS).generateDeployment(new HashMap(), true, null, null)) + asList(KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null)) ); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); Set createdOrUpdated = new CopyOnWriteArraySet<>(); @@ -642,19 +642,19 @@ public void testCreateClusterStatusNotReady(VertxTestContext context) { when(mockMirrorMaker2Ops.get(kmm2Namespace, kmm2Name)).thenReturn(kmm2); when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); - when(mockServiceOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor mirrorMaker2Captor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); - when(mockMirrorMaker2Ops.updateStatusAsync(mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); @@ -692,38 +692,38 @@ public void testCreateClusterWithZeroReplicas(VertxTestContext context) { when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockSecretOps.reconcile(eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockSecretOps.reconcile(any(), eq(kmm2Namespace), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor mirrorMaker2Captor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); - when(mockMirrorMaker2Ops.updateStatusAsync(mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, kmm2Namespace, kmm2Name)) .onComplete(context.succeeding(v -> context.verify(() -> { // 0 Replicas - readiness should never get called. - verify(mockDcOps, never()).readiness(anyString(), anyString(), anyLong(), anyLong()); + verify(mockDcOps, never()).readiness(any(), anyString(), anyString(), anyLong(), anyLong()); // Verify service List capturedServices = serviceCaptor.getAllValues(); @@ -782,33 +782,33 @@ public void testCreateClusterWithJmxEnabled(VertxTestContext context) { when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName(kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); when(mockSecretOps.getAsync(anyString(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor mirrorMaker2Captor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); - when(mockMirrorMaker2Ops.updateStatusAsync(mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); KafkaMirrorMaker2AssemblyOperator ops = new KafkaMirrorMaker2AssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); - KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster mirrorMaker2 = KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); Checkpoint async = context.checkpoint(); ops.reconcile(new Reconciliation("test-trigger", KafkaMirrorMaker2.RESOURCE_KIND, kmm2Namespace, kmm2Name)) @@ -896,7 +896,7 @@ public void testTopicsGroupsExclude(VertxTestContext context) { supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); Checkpoint async = context.checkpoint(); - KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); mm2AssemblyOperator.reconcile(new Reconciliation("test-exclude", KafkaMirrorMaker2.RESOURCE_KIND, targetNamespace, kmm2Name)) .onComplete(context.succeeding(v -> context.verify(() -> { KafkaMirrorMaker2MirrorSpec capturedMirrorConnector = mirrorMaker2Captor.getAllValues().get(0).getSpec().getMirrors().get(0); @@ -946,7 +946,7 @@ public void testTopicsGroupsBlacklist(VertxTestContext context) { supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS), x -> mockConnectClient); Checkpoint async = context.checkpoint(); - KafkaMirrorMaker2Cluster.fromCrd(kmm2, VERSIONS); + KafkaMirrorMaker2Cluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm2, VERSIONS); mm2AssemblyOperator.reconcile(new Reconciliation("test-blacklist", KafkaMirrorMaker2.RESOURCE_KIND, targetNamespace, kmm2Name)) .onComplete(context.succeeding(v -> context.verify(() -> { KafkaMirrorMaker2MirrorSpec capturedMirrorConnector = mirrorMaker2Captor.getAllValues().get(0).getSpec().getMirrors().get(0); @@ -969,24 +969,24 @@ private ArgumentCaptor createMirrorMaker2CaptorMock(String ta when(mockMirrorMaker2Ops.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm2)); ArgumentCaptor serviceCaptor = ArgumentCaptor.forClass(Service.class); - when(mockServiceOps.reconcile(anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockServiceOps.reconcile(any(), anyString(), anyString(), serviceCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockNetPolOps.reconcile(eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName( + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockNetPolOps.reconcile(any(), eq(kmm2.getMetadata().getNamespace()), eq(KafkaMirrorMaker2Resources.deploymentName( kmm2.getMetadata().getName())), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new NetworkPolicy()))); - when(mockSecretOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor mirrorMaker2Captor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); - when(mockMirrorMaker2Ops.updateStatusAsync(mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorMaker2Ops.updateStatusAsync(any(), mirrorMaker2Captor.capture())).thenReturn(Future.succeededFuture()); return mirrorMaker2Captor; } @@ -994,7 +994,7 @@ private ArgumentCaptor createMirrorMaker2CaptorMock(String ta private KafkaConnectApi createConnectClientMock() { KafkaConnectApi mockConnectClient = mock(KafkaConnectApi.class); when(mockConnectClient.list(anyString(), anyInt())).thenReturn(Future.succeededFuture(emptyList())); - when(mockConnectClient.updateConnectLoggers(anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); + when(mockConnectClient.updateConnectLoggers(any(), anyString(), anyInt(), anyString(), any(OrderedProperties.class))).thenReturn(Future.succeededFuture()); return mockConnectClient; } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperatorTest.java index 8c6176080c..fc91e3e031 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMakerAssemblyOperatorTest.java @@ -75,7 +75,7 @@ public class KafkaMirrorMakerAssemblyOperatorTest { private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); protected static Vertx vertx; private static final String METRICS_CONFIG = "{\"foo\":\"bar\"}"; - private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties("mirrorMakerDefaultLoggingProperties") + private static final String LOGGING_CONFIG = AbstractModel.getOrderedProperties(Reconciliation.DUMMY_RECONCILIATION, "mirrorMakerDefaultLoggingProperties") .asPairsWithComment("Do not change this generated file. Logging can be configured in the corresponding Kubernetes resource."); private final String producerBootstrapServers = "foo-kafka:9092"; @@ -123,26 +123,26 @@ public void testCreateCluster(VertxTestContext context) { when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(KafkaMirrorMaker.class); - when(mockMirrorOps.updateStatusAsync(statusCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorOps.updateStatusAsync(any(), statusCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)); - KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); Checkpoint async = context.checkpoint(); @@ -206,31 +206,31 @@ public void testUpdateClusterNoDiff(VertxTestContext context) { metricsCm.put("foo", "bar"); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include, metricsCm); - KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockMirrorOps.updateStatusAsync(any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); when(mockDcOps.get(kmmNamespace, mirror.getName())).thenReturn(mirror.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kmmNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmmNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kmmNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kmmNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kmmNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockDcOps.readiness(eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kmmNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), @@ -281,33 +281,33 @@ public void testUpdateCluster(VertxTestContext context) { Map metricsCmP = new HashMap<>(); metricsCmP.put("foo", "bar"); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include, metricsCmP); - KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); kmm.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockMirrorOps.updateStatusAsync(any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); when(mockDcOps.get(kmmNamespace, mirror.getName())).thenReturn(mirror.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(eq(kmmNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmmNamespace), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(eq(kmmNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), eq(kmmNamespace), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(eq(kmmNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), eq(kmmNamespace), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor pdbCaptor = ArgumentCaptor.forClass(PodDisruptionBudget.class); - when(mockPdbOps.reconcile(anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), pdbCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); // Mock CM get when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); @@ -333,7 +333,7 @@ public void testUpdateCluster(VertxTestContext context) { doAnswer(invocation -> { metricsCms.add(invocation.getArgument(1)); return Future.succeededFuture(); - }).when(mockCmOps).reconcile(eq(kmmNamespace), anyString(), any()); + }).when(mockCmOps).reconcile(any(), eq(kmmNamespace), anyString(), any()); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -344,7 +344,7 @@ public void testUpdateCluster(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName), kmm) .onComplete(context.succeeding(v -> context.verify(() -> { - KafkaMirrorMakerCluster compareTo = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster compareTo = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); // Verify Deployment @@ -368,7 +368,7 @@ public void testUpdateCluster(VertxTestContext context) { assertThat(dcScaleUpNameCaptor.getAllValues(), hasSize(1)); // No metrics config => no CMs created - verify(mockCmOps, never()).createOrUpdate(any()); + verify(mockCmOps, never()).createOrUpdate(any(), any()); async.flag(); }))); } @@ -395,36 +395,36 @@ public void testUpdateClusterFailure(VertxTestContext context) { Map metricsCm = new HashMap<>(); metricsCm.put("foo", "bar"); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include, metricsCm); - KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); kmm.getSpec().setImage("some/different:image"); // Change the image to generate some diff when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockDcOps.get(kmmNamespace, mirror.getName())).thenReturn(mirror.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcCaptor = ArgumentCaptor.forClass(Deployment.class); - when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); + when(mockDcOps.reconcile(any(), dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed")); ArgumentCaptor dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class); - when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleDown(any(), dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockMirrorOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); + when(mockMirrorOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockMirrorOps.updateStatusAsync(any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -461,28 +461,28 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { Map metricsCm = new HashMap<>(); metricsCm.put("foo", "bar"); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include, metricsCm); - KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); kmm.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleUp when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockDcOps.get(kmmNamespace, mirror.getName())).thenReturn(mirror.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kmmNamespace, mirror.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kmmNamespace), eq(mirror.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kmmNamespace, mirror.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kmmNamespace), eq(mirror.getName()), eq(scaleTo)); - when(mockMirrorOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); + when(mockMirrorOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockMirrorOps.updateStatusAsync(any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -493,7 +493,7 @@ public void testUpdateClusterScaleUp(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName), kmm) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kmmNamespace, mirror.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kmmNamespace), eq(mirror.getName()), eq(scaleTo)); async.flag(); }))); } @@ -522,28 +522,28 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { Map metricsCm = new HashMap<>(); metricsCm.put("foo", "bar"); KafkaMirrorMaker kmm = ResourceUtils.createKafkaMirrorMaker(kmmNamespace, kmmName, image, producer, consumer, include, metricsCm); - KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(kmm, + KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kmm, VERSIONS); kmm.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleDown when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockMirrorOps.updateStatusAsync(any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); + when(mockMirrorOps.updateStatusAsync(any(), any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture()); when(mockDcOps.get(kmmNamespace, mirror.getName())).thenReturn(mirror.generateDeployment(new HashMap(), true, null, null)); - when(mockDcOps.readiness(eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.reconcile(eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleUp(kmmNamespace, mirror.getName(), scaleTo); + .when(mockDcOps).scaleUp(any(), eq(kmmNamespace), eq(mirror.getName()), eq(scaleTo)); doAnswer(i -> Future.succeededFuture(scaleTo)) - .when(mockDcOps).scaleDown(kmmNamespace, mirror.getName(), scaleTo); + .when(mockDcOps).scaleDown(any(), eq(kmmNamespace), eq(mirror.getName()), eq(scaleTo)); - when(mockMirrorOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockMirrorOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -554,7 +554,7 @@ public void testUpdateClusterScaleDown(VertxTestContext context) { Checkpoint async = context.checkpoint(); ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName), kmm) .onComplete(context.succeeding(v -> context.verify(() -> { - verify(mockDcOps).scaleUp(kmmNamespace, mirror.getName(), scaleTo); + verify(mockDcOps).scaleUp(any(), eq(kmmNamespace), eq(mirror.getName()), eq(scaleTo)); async.flag(); }))); } @@ -590,19 +590,19 @@ public void testReconcile(VertxTestContext context) { // providing the list of ALL Deployments for all the Kafka Mirror Maker clusters Labels newLabels = Labels.forStrimziKind(KafkaMirrorMaker.RESOURCE_KIND); when(mockDcOps.list(eq(kmmNamespace), eq(newLabels))).thenReturn( - asList(KafkaMirrorMakerCluster.fromCrd(bar, + asList(KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null))); // providing the list Deployments for already "existing" Kafka Mirror Maker clusters Labels barLabels = Labels.forStrimziCluster("bar"); when(mockDcOps.list(eq(kmmNamespace), eq(barLabels))).thenReturn( - asList(KafkaMirrorMakerCluster.fromCrd(bar, + asList(KafkaMirrorMakerCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, bar, VERSIONS).generateDeployment(new HashMap(), true, null, null)) ); - when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockSecretOps.reconcile(eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), eq(kmmNamespace), any(), any())).thenReturn(Future.succeededFuture()); Set createdOrUpdated = new CopyOnWriteArraySet<>(); @@ -657,17 +657,17 @@ public void testCreateClusterStatusNotReady(VertxTestContext context) throws Int when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.readiness(eq(kmmNamespace), eq(kmmName), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.failedFuture(failureMsg)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.readiness(any(), eq(kmmNamespace), eq(kmmName), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor statusCaptor = ArgumentCaptor.forClass(KafkaMirrorMaker.class); - when(mockMirrorOps.updateStatusAsync(statusCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorOps.updateStatusAsync(any(), statusCaptor.capture())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), new MockCertManager(), new PasswordGenerator(10, "a", "a"), @@ -713,15 +713,15 @@ public void testCreateOrUpdateZeroReplica(VertxTestContext context) { when(mockMirrorOps.get(kmmNamespace, kmmName)).thenReturn(kmm); when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(kmm)); - when(mockDcOps.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); - when(mockDcOps.scaleUp(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.scaleDown(anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); - when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture()); - when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockDcOps.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(mockDcOps.scaleUp(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.scaleDown(any(), anyString(), anyString(), anyInt())).thenReturn(Future.succeededFuture(42)); + when(mockDcOps.waitForObserved(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(mockPdbOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture()); + when(mockCmOps.reconcile(any(), anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); ArgumentCaptor mirrorMakerCaptor = ArgumentCaptor.forClass(KafkaMirrorMaker.class); - when(mockMirrorOps.updateStatusAsync(mirrorMakerCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockMirrorOps.updateStatusAsync(any(), mirrorMakerCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion), @@ -733,7 +733,7 @@ public void testCreateOrUpdateZeroReplica(VertxTestContext context) { ops.reconcile(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, kmmNamespace, kmmName)) .onComplete(context.succeeding(v -> context.verify(() -> { // 0 Replicas - readiness should never get called. - verify(mockDcOps, never()).readiness(anyString(), anyString(), anyLong(), anyLong()); + verify(mockDcOps, never()).readiness(any(), anyString(), anyString(), anyLong(), anyLong()); assertThat(mirrorMakerCaptor.getValue().getStatus().getConditions().get(0).getType(), is("Ready")); async.flag(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java index 0710a10a4b..05e66173d9 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaRebalanceAssemblyOperatorTest.java @@ -783,7 +783,6 @@ public void run() { * Tests the transition from 'New' to to 'ProposalReady' * The rebalance proposal is approved and the resource moves to 'Rebalancing' then to 'Stopped' (via annotation) * - * * 1. A new KafkaRebalance resource is created; it is in the 'New' state * 2. The operator requests a rebalance proposal through the Cruise Control REST API * 3. The rebalance proposal is ready on the first call @@ -1240,24 +1239,24 @@ private void mockRebalanceOperator(CrdOperator { + when(mockRebalanceOps.updateStatusAsync(any(), any(KafkaRebalance.class))).thenAnswer(invocation -> { try { return Future.succeededFuture(Crds.kafkaRebalanceOperation(client) .inNamespace(namespace) .withName(resource) - .patch(invocation.getArgument(0))); + .patch(invocation.getArgument(1))); } catch (Exception e) { return Future.failedFuture(e); } }); - when(mockCmOps.reconcile(eq(CLUSTER_NAMESPACE), eq(RESOURCE_NAME), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); + when(mockCmOps.reconcile(any(), eq(CLUSTER_NAMESPACE), eq(RESOURCE_NAME), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap()))); - when(mockRebalanceOps.patchAsync(any(KafkaRebalance.class))).thenAnswer(invocation -> { + when(mockRebalanceOps.patchAsync(any(), any(KafkaRebalance.class))).thenAnswer(invocation -> { try { return Future.succeededFuture(Crds.kafkaRebalanceOperation(client) .inNamespace(namespace) .withName(resource) - .patch(invocation.getArgument(0))); + .patch(invocation.getArgument(1))); } catch (Exception e) { return Future.failedFuture(e); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java index ef4bf929e3..7fb2b1c4fd 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java @@ -144,7 +144,7 @@ public void testStatusAfterSuccessfulReconciliationWithPreviousFailure(VertxTest when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockWorkingKafkaAssemblyOperator kao = new MockWorkingKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, @@ -193,7 +193,7 @@ public void testPauseReconciliationsStatus(VertxTestContext context) throws Pars when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockWorkingKafkaAssemblyOperator kao = new MockWorkingKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, @@ -251,7 +251,7 @@ public void testStatusAfterSuccessfulReconciliationWithPreviousSuccess(VertxTest when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(readyKafka)); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockWorkingKafkaAssemblyOperator kao = new MockWorkingKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, @@ -290,7 +290,7 @@ public void testStatusAfterFailedReconciliationWithPreviousFailure(VertxTestCont when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockFailingKafkaAssemblyOperator kao = new MockFailingKafkaAssemblyOperator( exception, @@ -361,7 +361,7 @@ public void testStatusAfterFailedReconciliationWithPreviousSuccess(VertxTestCont when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(readyKafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockFailingKafkaAssemblyOperator kao = new MockFailingKafkaAssemblyOperator( new RuntimeException("Something went wrong"), @@ -471,7 +471,7 @@ public void testKafkaListenerNodePortAddressInStatus(VertxTestContext context) t .endKafka() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -481,7 +481,7 @@ public void testKafkaListenerNodePortAddressInStatus(VertxTestContext context) t when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSetOperator KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -590,7 +590,7 @@ public void testKafkaListenerNodePortAddressInStatusWithOverrides(VertxTestConte .endKafka() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -600,7 +600,7 @@ public void testKafkaListenerNodePortAddressInStatusWithOverrides(VertxTestConte when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSetOperator KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -699,7 +699,7 @@ public void testKafkaListenerNodePortAddressWithPreferred(VertxTestContext conte .endKafka() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -709,7 +709,7 @@ public void testKafkaListenerNodePortAddressWithPreferred(VertxTestContext conte when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSetOperator KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -805,7 +805,7 @@ public void testKafkaListenerNodePortAddressSameNode(VertxTestContext context) t .endKafka() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -815,7 +815,7 @@ public void testKafkaListenerNodePortAddressSameNode(VertxTestContext context) t when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSetOperator KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -910,7 +910,7 @@ public void testKafkaListenerNodePortAddressMissingNodes(VertxTestContext contex .endKafka() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -920,7 +920,7 @@ public void testKafkaListenerNodePortAddressMissingNodes(VertxTestContext contex when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSetOperator KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; @@ -989,7 +989,7 @@ public void testInitialStatusOnNewResource() throws ParseException { when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockInitialStatusKafkaAssemblyOperator kao = new MockInitialStatusKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, @@ -1027,7 +1027,7 @@ public void testInitialStatusOnOldResource() throws ParseException { when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); MockInitialStatusKafkaAssemblyOperator kao = new MockInitialStatusKafkaAssemblyOperator(vertx, new PlatformFeaturesAvailability(false, kubernetesVersion), certManager, @@ -1045,7 +1045,7 @@ public void testInitialStatusOnOldResource() throws ParseException { @Test public void testKafkaClusterIdInStatus(VertxTestContext context) throws ParseException { Kafka kafka = new KafkaBuilder(getKafkaCrd()).build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -1055,7 +1055,7 @@ public void testKafkaClusterIdInStatus(VertxTestContext context) throws ParseExc when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSecretOperator SecretOperator mockSecretOps = supplier.secretOperations; @@ -1096,7 +1096,7 @@ public void testModelWarnings(VertxTestContext context) throws ParseException { .endKafka() .endSpec() .build(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(oldKafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -1106,7 +1106,7 @@ public void testModelWarnings(VertxTestContext context) throws ParseException { when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); ArgumentCaptor kafkaCaptor = ArgumentCaptor.forClass(Kafka.class); - when(mockKafkaOps.updateStatusAsync(kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockKafkaOps.updateStatusAsync(any(), kafkaCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the KafkaSetOperator KafkaSetOperator mockKafkaSetOps = supplier.kafkaSetOperations; diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TestingConnector.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TestingConnector.java index 44f557b495..c03c8888c8 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TestingConnector.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TestingConnector.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.cluster.operator.assembly; +import io.strimzi.operator.common.ReconciliationLogger; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.connect.connector.Task; import org.apache.kafka.connect.data.Schema; @@ -11,8 +12,6 @@ import org.apache.kafka.connect.source.SourceConnector; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.Collections; @@ -25,7 +24,7 @@ */ public class TestingConnector extends SourceConnector { - private static final Logger LOGGER = LogManager.getLogger(TestingConnector.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(TestingConnector.class); public static final String START_TIME_MS = "start.time.ms"; public static final String STOP_TIME_MS = "stop.time.ms"; public static final String TASK_START_TIME_MS = "task.start.time.ms"; @@ -59,22 +58,22 @@ public String version() { @Override public void start(Map map) { - LOGGER.info("Starting task {}", this); + LOGGER.infoOp("Starting task {}", this); long taskStartTime = getLong(map, "task.start.time.ms"); taskStopTime = getLong(map, "task.stop.time.ms"); taskPollTime = getLong(map, "task.poll.time.ms"); taskPollRecords = getLong(map, "task.poll.records"); topicName = map.get("topic.name"); numPartitions = Integer.parseInt(map.get("num.partitions")); - LOGGER.info("Sleeping for {}ms", taskStartTime); + LOGGER.infoOp("Sleeping for {}ms", taskStartTime); sleep(taskStartTime); - LOGGER.info("Started task {}", this); + LOGGER.infoOp("Started task {}", this); } @Override public List poll() throws InterruptedException { - LOGGER.info("Poll {}", this); - LOGGER.info("Sleeping for {}ms in poll", taskPollTime); + LOGGER.infoOp("Poll {}", this); + LOGGER.infoOp("Sleeping for {}ms in poll", taskPollTime); sleep(taskPollTime); Schema valueSchema = new SchemaBuilder(Schema.Type.INT64).valueSchema(); List records = new ArrayList<>(); @@ -85,20 +84,20 @@ public List poll() throws InterruptedException { null, null, valueSchema, record++)); } - LOGGER.warn("Returning {} records for topic {} from poll", taskPollRecords, topicName); + LOGGER.warnOp("Returning {} records for topic {} from poll", taskPollRecords, topicName); return records; } @Override public void stop() { - LOGGER.info("Stopping task {}", this); + LOGGER.infoOp("Stopping task {}", this); sleep(taskStopTime); } } @Override public void start(Map map) { - LOGGER.info("Starting connector {}", this); + LOGGER.infoOp("Starting connector {}", this); long startTime = getLong(map, START_TIME_MS); stopTime = getLong(map, STOP_TIME_MS); taskStartTime = getLong(map, TASK_START_TIME_MS); @@ -108,7 +107,7 @@ public void start(Map map) { topicName = map.get(TOPIC_NAME); numPartitions = Integer.parseInt(map.get(NUM_PARTITIONS)); sleep(startTime); - LOGGER.info("Started connector {}", this); + LOGGER.infoOp("Started connector {}", this); } private static void sleep(long ms) { @@ -116,7 +115,7 @@ private static void sleep(long ms) { try { Thread.sleep(ms); } catch (InterruptedException e) { - LOGGER.warn("Interrupted during sleep", e); + LOGGER.warnOp("Interrupted during sleep", e); } } } @@ -148,9 +147,9 @@ public List> taskConfigs(int count) { @Override public void stop() { - LOGGER.info("Stopping connector {}", this); + LOGGER.infoOp("Stopping connector {}", this); sleep(stopTime); - LOGGER.info("Stopped connector {}", this); + LOGGER.infoOp("Stopped connector {}", this); } @Override diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TolerationsIT.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TolerationsIT.java index 666bfdfd13..c0187ea4eb 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TolerationsIT.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/TolerationsIT.java @@ -13,6 +13,7 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.strimzi.operator.cluster.model.ModelUtils; import io.strimzi.operator.cluster.operator.resource.StatefulSetDiff; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.k8s.KubeClusterResource; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; @@ -86,7 +87,7 @@ public void testEmptyStringValueIntoleration(VertxTestContext context) { client.apps().statefulSets().inNamespace(namespace).create(ss); StatefulSet stsk8s = client.apps().statefulSets().inNamespace(namespace).withName("foo").get(); - StatefulSetDiff diff = new StatefulSetDiff(ss, stsk8s); + StatefulSetDiff diff = new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss, stsk8s); Checkpoint checkpoint = context.checkpoint(); context.verify(() -> { assertThat(diff.changesSpecTemplate(), is(false)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/VolumeResizingTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/VolumeResizingTest.java index eca30952ac..e752ffbda4 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/VolumeResizingTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/VolumeResizingTest.java @@ -40,6 +40,7 @@ import static java.util.Collections.singletonMap; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; @@ -106,7 +107,7 @@ public Kafka getKafkaCrd() { @Test public void testNoExistingVolumes() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -119,7 +120,7 @@ public void testNoExistingVolumes() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; @@ -153,7 +154,7 @@ public void testNoExistingVolumes() { @Test public void testNotBoundVolumes() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -167,7 +168,7 @@ public void testNotBoundVolumes() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; @@ -201,7 +202,7 @@ public void testNotBoundVolumes() { @Test public void testVolumesBoundExpandableStorageClass() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -224,7 +225,7 @@ public void testVolumesBoundExpandableStorageClass() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; @@ -258,7 +259,7 @@ public void testVolumesBoundExpandableStorageClass() { @Test public void testVolumesBoundNonExpandableStorageClass() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -281,7 +282,7 @@ public void testVolumesBoundNonExpandableStorageClass() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; @@ -315,7 +316,7 @@ public void testVolumesBoundNonExpandableStorageClass() { @Test public void testVolumesResizing() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -341,7 +342,7 @@ public void testVolumesResizing() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; @@ -375,7 +376,7 @@ public void testVolumesResizing() { @Test public void testVolumesWaitingForRestart() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -401,7 +402,7 @@ public void testVolumesWaitingForRestart() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; @@ -441,7 +442,7 @@ public void testVolumesWaitingForRestart() { @Test public void testVolumesResized() { Kafka kafka = getKafkaCrd(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, VERSIONS); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator @@ -463,7 +464,7 @@ public void testVolumesResized() { }); ArgumentCaptor pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); - when(mockPvcOps.reconcile(anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailabilityTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailabilityTest.java index 24d99f157c..51aa3a5cbf 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailabilityTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaAvailabilityTest.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.cluster.operator.resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; @@ -292,7 +293,7 @@ public void testBelowMinIsr(VertxTestContext context) { .addBroker(4); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -331,7 +332,7 @@ public void testAtMinIsr(VertxTestContext context) { .addBroker(2); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -370,7 +371,7 @@ public void testAboveMinIsr(VertxTestContext context) { .addBroker(3); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -396,7 +397,7 @@ public void testMinIsrEqualsReplicas(VertxTestContext context) { .addBroker(3); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -423,7 +424,7 @@ public void testMinIsrEqualsReplicasWithOfflineReplicas(VertxTestContext context .addBroker(3); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -449,7 +450,7 @@ public void testMinIsrMoreThanReplicas(VertxTestContext context) { .endTopic() .addBroker(3); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -484,7 +485,7 @@ public void testNoLeader(VertxTestContext context) { .addBroker(3); - KafkaAvailability kafkaSorted = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaSorted = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -521,7 +522,7 @@ public void testNoMinIsr(VertxTestContext context) { .addBroker(3); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -557,7 +558,7 @@ public void testCanRollThrowsTimeoutExceptionWhenTopicsListThrowsException(Vertx .addBroker(3) .listTopicsResult(new TimeoutException()); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -591,7 +592,7 @@ public void testCanRollThrowsExceptionWhenTopicDescribeThrows(VertxTestContext c .addBroker(3) .describeTopicsResult("A", new UnknownTopicOrPartitionException()); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { @@ -625,7 +626,7 @@ public void testCanRollThrowsExceptionWhenDescribeConfigsThrows(VertxTestContext .addBroker(3) .describeConfigsResult(new ConfigResource(ConfigResource.Type.TOPIC, "A"), new UnknownTopicOrPartitionException()); - KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac(), null); + KafkaAvailability kafkaAvailability = new KafkaAvailability(new Reconciliation("dummy", "kind", "namespace", "A"), ksb.ac()); Checkpoint a = context.checkpoint(ksb.brokers.size()); for (Integer brokerId : ksb.brokers.keySet()) { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java index 4b4d89b137..cda88d4608 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerConfigurationDiffTest.java @@ -7,6 +7,7 @@ import io.strimzi.operator.cluster.KafkaVersionTestUtils; import io.strimzi.operator.cluster.model.KafkaVersion; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.TestUtils; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.Config; @@ -84,14 +85,14 @@ private void assertConfig(KafkaBrokerConfigurationDiff kcd, ConfigEntry ce) { @Test public void testDefaultValue() { - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), getDesiredConfiguration(emptyList()), kafkaVersion, brokerId); + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(emptyList()), kafkaVersion, brokerId); assertThat(kcd.isDesiredPropertyDefaultValue("offset.metadata.max.bytes", getCurrentConfiguration(emptyList())), is(true)); } @Test public void testNonDefaultValue() { List ces = singletonList(new ConfigEntry("offset.metadata.max.bytes", "4097")); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(emptyList()), kafkaVersion, brokerId); assertThat(kcd.isDesiredPropertyDefaultValue("offset.metadata.max.bytes", getCurrentConfiguration(ces)), is(false)); } @@ -100,7 +101,7 @@ public void testNonDefaultValue() { public void testCustomPropertyAdded() { ArrayList ces = new ArrayList<>(); ces.add(new ConfigEntry("custom.property", "42")); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(new ArrayList<>()), getDesiredConfiguration(ces), kafkaVersion, brokerId); + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(new ArrayList<>()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -109,7 +110,7 @@ public void testCustomPropertyAdded() { @Test public void testCustomPropertyRemoved() { List ces = singletonList(new ConfigEntry("custom.property", "42", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(emptyList()), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -119,7 +120,7 @@ public void testCustomPropertyRemoved() { @Test public void testCustomPropertyKept() { List ces = singletonList(new ConfigEntry("custom.property", "42", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -129,7 +130,7 @@ public void testCustomPropertyKept() { public void testCustomPropertyChanged() { List ces = singletonList(new ConfigEntry("custom.property", "42", false, true, false)); List ces2 = singletonList(new ConfigEntry("custom.property", "43", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(ces2), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -138,7 +139,7 @@ public void testCustomPropertyChanged() { @Test public void testChangedPresentValue() { List ces = singletonList(new ConfigEntry("min.insync.replicas", "2", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -148,7 +149,7 @@ public void testChangedPresentValue() { @Test public void testChangedPresentValueToDefault() { List ces = singletonList(new ConfigEntry("min.insync.replicas", "1", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -157,7 +158,7 @@ public void testChangedPresentValueToDefault() { @Test public void testChangedAdvertisedListener() { List ces = singletonList(new ConfigEntry("advertised.listeners", "karel", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -166,7 +167,7 @@ public void testChangedAdvertisedListener() { @Test public void testChangedAdvertisedListenerFromNothingToDefault() { List ces = singletonList(new ConfigEntry("advertised.listeners", "null", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -176,7 +177,7 @@ public void testChangedAdvertisedListenerFromNothingToDefault() { public void testChangedAdvertisedListenerFromNonDefaultToDefault() { // advertised listeners are filled after the pod started List ces = singletonList(new ConfigEntry("advertised.listeners", "null", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -185,7 +186,7 @@ public void testChangedAdvertisedListenerFromNonDefaultToDefault() { @Test public void testChangedZookeeperConnect() { List ces = singletonList(new ConfigEntry("zookeeper.connect", "karel", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -194,7 +195,7 @@ public void testChangedZookeeperConnect() { @Test public void testChangedLogDirs() { List ces = singletonList(new ConfigEntry("log.dirs", "/var/lib/kafka/data/karel", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(false)); @@ -204,7 +205,7 @@ public void testChangedLogDirs() { @Test public void testLogDirsNonDefaultToDefault() { List ces = singletonList(new ConfigEntry("log.dirs", "null", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(false)); @@ -214,7 +215,7 @@ public void testLogDirsNonDefaultToDefault() { @Test public void testLogDirsDefaultToDefault() { List ces = singletonList(new ConfigEntry("log.dirs", "null", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -223,7 +224,7 @@ public void testLogDirsDefaultToDefault() { @Test public void testUnchangedLogDirs() { List ces = singletonList(new ConfigEntry("log.dirs", "null", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(0)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -232,7 +233,7 @@ public void testUnchangedLogDirs() { @Test public void testChangedInterBrokerListenerName() { List ces = singletonList(new ConfigEntry("inter.broker.listener.name", "david", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(false)); @@ -241,7 +242,7 @@ public void testChangedInterBrokerListenerName() { @Test public void testChangedListenerSecurityProtocolMap() { List ces = singletonList(new ConfigEntry("listener.security.protocol.map", "david", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -251,7 +252,7 @@ public void testChangedListenerSecurityProtocolMap() { public void testChangedListenerSecurityProtocolMapFromNonDefault() { List ces = singletonList(new ConfigEntry("listener.security.protocol.map", "REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT,TLS-9093:SSL,EXTERNAL-9094:SSL", false, true, false)); List ces2 = singletonList(new ConfigEntry("listener.security.protocol.map", "REPLICATION-9091:SSL,PLAIN-9092:SASL_PLAINTEXT,TLS-9093:SSL", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(ces2), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(true)); @@ -264,7 +265,7 @@ public void testChangedMoreProperties() { ces.add(new ConfigEntry("inter.broker.listener.name", "david", false, true, false)); ces.add(new ConfigEntry("group.min.session.timeout.ms", "42", false, true, false)); ces.add(new ConfigEntry("host.name", "honza", false, true, false)); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(emptyList()), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(ces), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(3)); assertThat(kcd.canBeUpdatedDynamically(), is(false)); @@ -274,7 +275,7 @@ public void testChangedMoreProperties() { public void testRemoveDefaultPropertyWhichIsNotDefault() { // it is not seen as default because the ConfigEntry.ConfigSource.DEFAULT_CONFIG is not set List ces = singletonList(new ConfigEntry("log.retention.hours", "168")); - KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(getCurrentConfiguration(ces), + KafkaBrokerConfigurationDiff kcd = new KafkaBrokerConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(ces), getDesiredConfiguration(emptyList()), kafkaVersion, brokerId); assertThat(kcd.getDiffSize(), is(1)); assertThat(kcd.canBeUpdatedDynamically(), is(false)); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiffTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiffTest.java index 33f5d6a935..b68a739293 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiffTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaBrokerLoggingConfigurationDiffTest.java @@ -5,6 +5,7 @@ package io.strimzi.operator.cluster.operator.resource; +import io.strimzi.operator.common.Reconciliation; import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.Config; import org.apache.kafka.clients.admin.ConfigEntry; @@ -52,7 +53,7 @@ private Config getCurrentConfiguration(List additional) { @Test public void testReplaceRootLogger() { - KafkaBrokerLoggingConfigurationDiff klcd = new KafkaBrokerLoggingConfigurationDiff(getCurrentConfiguration(emptyList()), getDesiredConfiguration(emptyList()), brokerId); + KafkaBrokerLoggingConfigurationDiff klcd = new KafkaBrokerLoggingConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, getCurrentConfiguration(emptyList()), getDesiredConfiguration(emptyList()), brokerId); assertThat(klcd.getDiffSize(), is(0)); } @@ -64,7 +65,7 @@ public void testDiffUsingLoggerInheritance() { // Prepare currentConfig Config currentConfig = getRealisticConfig(); - KafkaBrokerLoggingConfigurationDiff diff = new KafkaBrokerLoggingConfigurationDiff(currentConfig, desiredConfig, brokerId); + KafkaBrokerLoggingConfigurationDiff diff = new KafkaBrokerLoggingConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, currentConfig, desiredConfig, brokerId); assertThat(diff.getLoggingDiff(), is(getRealisticConfigDiff())); } @@ -166,7 +167,9 @@ public void testExpansion() { "log4j.logger.kafka.authorizer.logger=INFO\n" + "monitorInterval=30\n"; - Map res = KafkaBrokerLoggingConfigurationDiff.readLog4jConfig(input); + KafkaBrokerLoggingConfigurationDiff kdiff = new KafkaBrokerLoggingConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, null, null, 0); + + Map res = kdiff.readLog4jConfig(input); assertThat(res.get("root"), is("INFO")); assertThat(res.get("kafka.request.logger"), is("WARN")); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaRollerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaRollerTest.java index b4bb399dac..185b1b69a3 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaRollerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaRollerTest.java @@ -63,6 +63,8 @@ @ExtendWith(VertxExtension.class) public class KafkaRollerTest { + private static final Logger LOGGER = LogManager.getLogger(KafkaRollerTest.class); + private static Vertx vertx; private List restarted; @@ -287,8 +289,6 @@ public void testNonControllerNotInitiallyRollable(VertxTestContext testContext) asList(0, 3, 4, 1, 2)); } - private static final Logger log = LogManager.getLogger(KafkaRollerTest.class); - @Test public void testControllerNotInitiallyRollable(VertxTestContext testContext) { PodOperator podOps = mockPodOps(podId -> succeededFuture()); @@ -299,7 +299,7 @@ public void testControllerNotInitiallyRollable(VertxTestContext testContext) { brokerId -> { if (brokerId == 2) { boolean b = count.getAndDecrement() == 0; - log.info("Can broker {} be rolled now ? {}", brokerId, b); + LOGGER.info("Can broker {} be rolled now ? {}", brokerId, b); return succeededFuture(b); } else { return succeededFuture(true); @@ -529,8 +529,8 @@ private PodOperator mockPodOps(Function> readiness) { .endMetadata() .build() ); - when(podOps.readiness(any(), any(), anyLong(), anyLong())).thenAnswer(invocationOnMock -> { - String podName = invocationOnMock.getArgument(1); + when(podOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenAnswer(invocationOnMock -> { + String podName = invocationOnMock.getArgument(2); return readiness.apply(podName2Number(podName)); }); when(podOps.isReady(anyString(), anyString())).thenAnswer(invocationOnMock -> { @@ -583,7 +583,7 @@ private TestingKafkaRoller(StatefulSet sts, Secret clusterCaCertSecret, Secret c Function getConfigsException, Function> canRollFn, int... controllers) { - super(KafkaRollerTest.vertx, new Reconciliation("test", "Kafka", stsNamespace(), clusterName()), podOps, 500, 1000, + super(new Reconciliation("test", "Kafka", stsNamespace(), clusterName()), KafkaRollerTest.vertx, podOps, 500, 1000, () -> new BackOff(10L, 2, 4), sts, clusterCaCertSecret, coKeySecret, "", "", KafkaVersionTestUtils.getLatestVersion(), true); this.controllers = controllers; diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperatorTest.java index 8de049ba08..cc3c6fe660 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSetOperatorTest.java @@ -12,6 +12,7 @@ import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.cluster.model.KafkaCluster; import io.strimzi.operator.cluster.model.KafkaVersion; +import io.strimzi.operator.common.Reconciliation; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -38,8 +39,8 @@ public class KafkaSetOperatorTest { @BeforeEach public void before() { KafkaVersion.Lookup versions = new KafkaVersion.Lookup(emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); - currectSts = KafkaCluster.fromCrd(getResource(), versions).generateStatefulSet(true, null, null); - desiredSts = KafkaCluster.fromCrd(getResource(), versions).generateStatefulSet(true, null, null); + currectSts = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, getResource(), versions).generateStatefulSet(true, null, null); + desiredSts = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, getResource(), versions).generateStatefulSet(true, null, null); } private Kafka getResource() { @@ -68,18 +69,18 @@ private Kafka getResource() { } private StatefulSetDiff createDiff() { - return new StatefulSetDiff(currectSts, desiredSts); + return new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, currectSts, desiredSts); } @Test public void testNotNeedsRollingUpdateWhenIdentical() { - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(false)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(false)); } @Test public void testNotNeedsRollingUpdateWhenReplicasDecrease() { currectSts.getSpec().setReplicas(desiredSts.getSpec().getReplicas() + 1); - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(false)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(false)); } @Test @@ -87,28 +88,28 @@ public void testNeedsRollingUpdateWhenLabelsRemoved() { Map labels = new HashMap(desiredSts.getMetadata().getLabels()); labels.put("foo", "bar"); currectSts.getMetadata().setLabels(labels); - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(true)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(true)); } @Test public void testNeedsRollingUpdateWhenImageChanges() { String newImage = currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).getImage() + "-foo"; currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).setImage(newImage); - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(true)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(true)); } @Test public void testNeedsRollingUpdateWhenReadinessDelayChanges() { Integer newDelay = currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds() + 1; currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().setInitialDelaySeconds(newDelay); - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(true)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(true)); } @Test public void testNeedsRollingUpdateWhenReadinessTimeoutChanges() { Integer newTimeout = currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds() + 1; currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().setTimeoutSeconds(newTimeout); - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(true)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(true)); } @Test @@ -116,6 +117,6 @@ public void testNeedsRollingUpdateWhenNewEnvRemoved() { String envVar = "SOME_RANDOM_ENV"; currectSts.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv().add(new EnvVar(envVar, "foo", null)); - assertThat(KafkaSetOperator.needsRollingUpdate(createDiff()), is(true)); + assertThat(KafkaSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, createDiff()), is(true)); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSpecCheckerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSpecCheckerTest.java index fac17d8d79..8e1df09e4e 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSpecCheckerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/KafkaSpecCheckerTest.java @@ -16,6 +16,7 @@ import io.strimzi.operator.cluster.model.KafkaConfiguration; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.ZookeeperCluster; +import io.strimzi.operator.common.Reconciliation; import org.junit.jupiter.api.Test; import java.util.HashMap; @@ -38,8 +39,8 @@ public class KafkaSpecCheckerTest { private KafkaSpecChecker generateChecker(Kafka kafka) { KafkaVersion.Lookup versions = KafkaVersionTestUtils.getKafkaVersionLookup(); - KafkaCluster kafkaCluster = KafkaCluster.fromCrd(kafka, versions); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(kafka, versions); + KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, versions); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, versions); return new KafkaSpecChecker(kafka.getSpec(), versions, kafkaCluster, zkCluster); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiffTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiffTest.java index 096aba0441..7d9ddc36c8 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiffTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetDiffTest.java @@ -12,6 +12,7 @@ import io.fabric8.kubernetes.api.model.VolumeBuilder; import io.fabric8.kubernetes.api.model.apps.StatefulSet; import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder; +import io.strimzi.operator.common.Reconciliation; import org.junit.jupiter.api.Test; import static java.util.Collections.singletonMap; @@ -51,7 +52,7 @@ public void testSpecVolumesIgnored() { .endTemplate() .endSpec() .build(); - assertThat(new StatefulSetDiff(ss1, ss2).changesSpecTemplate(), is(false)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesSpecTemplate(), is(false)); } public StatefulSetDiff testCpuResources(ResourceRequirements requirements1, ResourceRequirements requirements2) { @@ -85,7 +86,7 @@ public StatefulSetDiff testCpuResources(ResourceRequirements requirements1, Reso .endTemplate() .endSpec() .build(); - return new StatefulSetDiff(ss1, ss2); + return new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2); } @Test @@ -185,8 +186,8 @@ public void testPvcSizeChangeIgnored() { .build()) .endSpec() .build(); - assertThat(new StatefulSetDiff(ss1, ss2).changesVolumeClaimTemplates(), is(false)); - assertThat(new StatefulSetDiff(ss1, ss2).changesVolumeSize(), is(true)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeClaimTemplates(), is(false)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeSize(), is(true)); } @Test @@ -235,8 +236,8 @@ public void testPvcSizeUnitChangeIgnored() { .build()) .endSpec() .build(); - assertThat(new StatefulSetDiff(ss1, ss2).changesVolumeClaimTemplates(), is(false)); - assertThat(new StatefulSetDiff(ss1, ss2).changesVolumeSize(), is(false)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeClaimTemplates(), is(false)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeSize(), is(false)); } @Test @@ -292,7 +293,7 @@ public void testNewPvcNotIgnored() { .build()) .endSpec() .build(); - assertThat(new StatefulSetDiff(ss1, ss2).changesVolumeClaimTemplates(), is(true)); - assertThat(new StatefulSetDiff(ss1, ss2).changesVolumeSize(), is(false)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeClaimTemplates(), is(true)); + assertThat(new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, ss1, ss2).changesVolumeSize(), is(false)); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperatorTest.java index 887b7e8483..e410f16992 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperatorTest.java @@ -29,6 +29,7 @@ import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; import io.fabric8.kubernetes.client.dsl.RollableScalableResource; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.operator.resource.AbstractResourceOperatorTest; import io.strimzi.operator.common.operator.resource.AbstractScalableResourceOperator; import io.strimzi.operator.common.operator.resource.PodOperator; @@ -116,12 +117,12 @@ protected void mocker(KubernetesClient mockClient, MixedOperation op) { protected StatefulSetOperator createResourceOperations(Vertx vertx, KubernetesClient mockClient) { return new StatefulSetOperator(vertx, mockClient, 60_000L) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return false; } }; @@ -131,22 +132,22 @@ protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { protected StatefulSetOperator createResourceOperationsWithMockedReadiness(Vertx vertx, KubernetesClient mockClient) { return new StatefulSetOperator(vertx, mockClient, 60_000L) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - public Future readiness(String namespace, String name, long pollIntervalMs, long timeoutMs) { + public Future readiness(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } @Override - protected Future podReadiness(String namespace, StatefulSet desired, long pollInterval, long operationTimeoutMs) { + protected Future podReadiness(Reconciliation reconciliation, String namespace, StatefulSet desired, long pollInterval, long operationTimeoutMs) { return Future.succeededFuture(); } }; @@ -171,17 +172,17 @@ public void testRollingUpdateSuccess(VertxTestContext context) { when(mockResource.get()).thenReturn(resource); PodOperator podOperator = mock(PodOperator.class); - when(podOperator.waitFor(anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); - when(podOperator.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(podOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(podOperator.waitFor(any(), anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(podOperator.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(podOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); when(podOperator.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(new PodBuilder().withNewMetadata().withName("my-pod-0").endMetadata().build())); - when(podOperator.restart(anyString(), any(), anyLong())).thenReturn(Future.succeededFuture()); + when(podOperator.restart(any(), any(), anyLong())).thenReturn(Future.succeededFuture()); PvcOperator pvcOperator = mock(PvcOperator.class); - when(pvcOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(pvcOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); SecretOperator secretOperator = mock(SecretOperator.class); - when(secretOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(secretOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); @@ -194,18 +195,18 @@ public void testRollingUpdateSuccess(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint a = context.checkpoint(); - op.maybeRestartPod(resource, "my-pod-0", pod -> singletonList("roll")) + op.maybeRestartPod(new Reconciliation("test", "kind", "namespace", "name"), resource, "my-pod-0", pod -> singletonList("roll")) .onComplete(context.succeeding(v -> a.flag())); } @@ -216,8 +217,8 @@ public void testRollingUpdateDeletionTimeout(VertxTestContext context) { when(mockResource.get()).thenReturn(resource); PodOperator podOperator = mock(PodOperator.class); - when(podOperator.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(podOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(podOperator.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(podOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); AtomicInteger call = new AtomicInteger(); when(podOperator.getAsync(anyString(), anyString())).thenAnswer(invocation -> { if (call.getAndIncrement() == 0) { @@ -226,13 +227,13 @@ public void testRollingUpdateDeletionTimeout(VertxTestContext context) { return null; } }); - when(podOperator.restart(anyString(), any(), anyLong())).thenReturn(Future.failedFuture(new TimeoutException())); + when(podOperator.restart(any(), any(), anyLong())).thenReturn(Future.failedFuture(new TimeoutException())); PvcOperator pvcOperator = mock(PvcOperator.class); - when(pvcOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(pvcOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); SecretOperator secretOperator = mock(SecretOperator.class); - when(secretOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(secretOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); @@ -245,18 +246,18 @@ public void testRollingUpdateDeletionTimeout(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint a = context.checkpoint(); - op.maybeRestartPod(resource, "my-pod-0", pod -> singletonList("roll")) + op.maybeRestartPod(new Reconciliation("test", "kind", "namespace", "name"), resource, "my-pod-0", pod -> singletonList("roll")) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); a.flag(); @@ -270,17 +271,17 @@ public void testRollingUpdateReadinessTimeout(VertxTestContext context) { when(mockResource.get()).thenReturn(resource); PodOperator podOperator = mock(PodOperator.class); - when(podOperator.waitFor(anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); - when(podOperator.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.failedFuture(new TimeoutException())); - when(podOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(podOperator.waitFor(any(), anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(podOperator.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.failedFuture(new TimeoutException())); + when(podOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); when(podOperator.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(new PodBuilder().withNewMetadata().withName("my-pod-0").endMetadata().build())); - when(podOperator.restart(anyString(), any(), anyLong())).thenReturn(Future.succeededFuture()); + when(podOperator.restart(any(), any(), anyLong())).thenReturn(Future.succeededFuture()); PvcOperator pvcOperator = mock(PvcOperator.class); - when(pvcOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(pvcOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); SecretOperator secretOperator = mock(SecretOperator.class); - when(secretOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(secretOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); @@ -293,17 +294,17 @@ public void testRollingUpdateReadinessTimeout(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint a = context.checkpoint(); - op.maybeRestartPod(resource, "my-pod-0", pod -> singletonList("roll")).onComplete(context.failing(e -> context.verify(() -> { + op.maybeRestartPod(new Reconciliation("test", "kind", "namespace", "name"), resource, "my-pod-0", pod -> singletonList("roll")).onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); a.flag(); }))); @@ -316,16 +317,16 @@ public void testRollingUpdateReconcileFailed(VertxTestContext context) { when(mockResource.get()).thenReturn(resource); PodOperator podOperator = mock(PodOperator.class); - when(podOperator.waitFor(anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); - when(podOperator.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(podOperator.waitFor(any(), anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(podOperator.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(podOperator.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(new PodBuilder().withNewMetadata().withName("my-pod-0").endMetadata().build())); - when(podOperator.restart(anyString(), any(), anyLong())).thenReturn(Future.failedFuture("reconcile failed")); + when(podOperator.restart(any(), any(), anyLong())).thenReturn(Future.failedFuture("reconcile failed")); PvcOperator pvcOperator = mock(PvcOperator.class); - when(pvcOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(pvcOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); SecretOperator secretOperator = mock(SecretOperator.class); - when(secretOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(secretOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); @@ -338,17 +339,17 @@ public void testRollingUpdateReconcileFailed(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint a = context.checkpoint(); - op.maybeRestartPod(resource, "my-pod-0", pod -> singletonList("roll")) + op.maybeRestartPod(new Reconciliation("test", "kind", "namespace", "name"), resource, "my-pod-0", pod -> singletonList("roll")) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e.getMessage(), is("reconcile failed")); a.flag(); @@ -415,13 +416,13 @@ public void testInternalReplace(VertxTestContext context) { when(mockResource.create(any(StatefulSet.class))).thenReturn(sts1); PodOperator podOperator = mock(PodOperator.class); - when(podOperator.waitFor(anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); - when(podOperator.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); - when(podOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(podOperator.waitFor(any(), anyString(), anyString(), anyLong(), anyLong(), any(BiPredicate.class))).thenReturn(Future.succeededFuture()); + when(podOperator.readiness(any(), anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); + when(podOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); when(podOperator.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(new PodBuilder().withNewMetadata().withName("my-pod-0").endMetadata().build())); PvcOperator pvcOperator = mock(PvcOperator.class); - when(pvcOperator.reconcile(anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(pvcOperator.reconcile(any(), anyString(), anyString(), any())).thenReturn(Future.succeededFuture()); NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); when(mockNameable.withName(matches(RESOURCE_NAME))).thenReturn(mockResource); @@ -434,23 +435,23 @@ public void testInternalReplace(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } @Override - public Future waitFor(String namespace, String name, String logState, long pollIntervalMs, final long timeoutMs, BiPredicate predicate) { + public Future waitFor(Reconciliation reconciliation, String namespace, String name, String logState, long pollIntervalMs, final long timeoutMs, BiPredicate predicate) { return Future.succeededFuture(); } }; Checkpoint async = context.checkpoint(); - op.reconcile(sts1.getMetadata().getNamespace(), sts1.getMetadata().getName(), sts2) + op.reconcile(new Reconciliation("test", "kind", "namespace", "name"), sts1.getMetadata().getNamespace(), sts1.getMetadata().getName(), sts2) .onComplete(context.succeeding(rrState -> { verify(mockDeletable).delete(); async.flag(); @@ -484,18 +485,18 @@ public void testCascadingDeleteAsync(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint async = context.checkpoint(); - op.deleteAsync(NAMESPACE, RESOURCE_NAME, true) + op.deleteAsync(new Reconciliation("test", "kind", "namespace", "name"), NAMESPACE, RESOURCE_NAME, true) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(cascadingCaptor.getValue(), is(DeletionPropagation.FOREGROUND)); async.flag(); @@ -529,18 +530,18 @@ public void testNonCascadingDeleteAsync(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint a = context.checkpoint(); - op.deleteAsync(NAMESPACE, RESOURCE_NAME, false) + op.deleteAsync(new Reconciliation("test", "kind", "namespace", "name"), NAMESPACE, RESOURCE_NAME, false) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(cascadingCaptor.getValue(), is(DeletionPropagation.ORPHAN)); a.flag(); @@ -569,18 +570,18 @@ public void testDeleteAsyncNotDeleted(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint a = context.checkpoint(); - op.deleteAsync(NAMESPACE, RESOURCE_NAME, false) + op.deleteAsync(new Reconciliation("test", "kind", "namespace", "name"), NAMESPACE, RESOURCE_NAME, false) .onComplete(context.failing(e -> a.flag())); } @@ -610,18 +611,18 @@ public void testDeleteAsyncFailing(VertxTestContext context) { StatefulSetOperator op = new StatefulSetOperator(AbstractResourceOperatorTest.vertx, mockClient, 5_000L, podOperator, pvcOperator) { @Override - public Future maybeRollingUpdate(StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { + public Future maybeRollingUpdate(Reconciliation reconciliation, StatefulSet sts, Function> podNeedsRestart, Secret clusterCaSecret, Secret coKeySecret) { return Future.succeededFuture(); } @Override - protected boolean shouldIncrementGeneration(StatefulSetDiff diff) { + protected boolean shouldIncrementGeneration(Reconciliation reconciliation, StatefulSetDiff diff) { return true; } }; Checkpoint async = context.checkpoint(); - op.deleteAsync(NAMESPACE, RESOURCE_NAME, false) + op.deleteAsync(new Reconciliation("test", "kind", "namespace", "name"), NAMESPACE, RESOURCE_NAME, false) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(MockitoException.class)); assertThat(e.getMessage(), is("Something failed")); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java index a89847f3a6..c193aec227 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperLeaderFinderTest.java @@ -13,6 +13,7 @@ import io.strimzi.operator.cluster.model.Ca; import io.strimzi.operator.cluster.model.ZookeeperCluster; import io.strimzi.operator.common.BackOff; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.resource.SecretOperator; import io.vertx.core.Future; @@ -60,7 +61,7 @@ @ExtendWith(VertxExtension.class) public class ZookeeperLeaderFinderTest { - private static final Logger log = LogManager.getLogger(ZookeeperLeaderFinderTest.class); + private static final Logger LOGGER = LogManager.getLogger(ZookeeperLeaderFinderTest.class); public static final String NAMESPACE = "testns"; public static final String CLUSTER = "testcluster"; @@ -91,7 +92,7 @@ public TestingZookeeperLeaderFinder(Supplier backOffSupplier, int[] por } @Override - NetClientOptions clientOptions(Secret coCertKeySecret, Secret clusterCaCertificateSecret) { + NetClientOptions clientOptions(Reconciliation reconciliation, Secret coCertKeySecret, Secret clusterCaCertificateSecret) { return new NetClientOptions() .setKeyCertOptions(coCertificate.keyCertOptions()) .setTrustOptions(zkCertificate.trustOptions()) @@ -137,17 +138,17 @@ public void stop() { try { countDownLatch.await(10, TimeUnit.SECONDS); } catch (InterruptedException e) { - log.error("Failed to close zk instance {}", e); + LOGGER.error("Failed to close zk instance {}", e); } } public Future start() { Promise promise = Promise.promise(); - netServer.exceptionHandler(ex -> log.error(ex)) + netServer.exceptionHandler(ex -> LOGGER.error(ex)) .connectHandler(socket -> { - log.debug("ZK {}: client connection to {}, from {}", id, socket.localAddress(), socket.remoteAddress()); - socket.exceptionHandler(ex -> log.error(ex)); + LOGGER.debug("ZK {}: client connection to {}, from {}", id, socket.localAddress(), socket.remoteAddress()); + socket.exceptionHandler(ex -> LOGGER.error(ex)); StringBuffer sb = new StringBuffer(); socket.handler(buf -> { sb.append(buf.toString()); @@ -155,14 +156,14 @@ public Future start() { socket.write("vesvsebserb\n"); int attempt = attempts.getAndIncrement(); if (isLeader.apply(attempt)) { - log.debug("ZK {}: is leader on attempt {}", id, attempt); + LOGGER.debug("ZK {}: is leader on attempt {}", id, attempt); socket.write("Mode: "); socket.write("leader\n"); } else { - log.debug("ZK {}: is not leader on attempt {}", id, attempt); + LOGGER.debug("ZK {}: is not leader on attempt {}", id, attempt); } socket.write("vesvsebserb\n"); - log.debug("ZK {}: Sent response, closing", id); + LOGGER.debug("ZK {}: Sent response, closing", id); socket.close(); } }); @@ -186,7 +187,7 @@ private int[] startMockZks(VertxTestContext context, int num, BiFunction fn.apply(id, attempt)); zks.add(zk); zk.start().onComplete(context.succeeding(port -> { - log.debug("ZK {} listening on port {}", id, port); + LOGGER.debug("ZK {} listening on port {}", id, port); result[id] = port; async.countDown(); })); @@ -216,7 +217,7 @@ BackOff backoff() { public void test0PodsClusterReturnsUnknowLeader(VertxTestContext context) { ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, null, this::backoff); Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, emptyList(), coKeySecret()) + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, emptyList(), coKeySecret()) .onComplete(context.succeeding(leader -> { context.verify(() -> assertThat(leader, is(Integer.valueOf(ZookeeperLeaderFinder.UNKNOWN_LEADER)))); a.flag(); @@ -228,7 +229,7 @@ public void test1PodClusterReturnsOnlyPodAsLeader(VertxTestContext context) { ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, null, this::backoff); Checkpoint a = context.checkpoint(); int firstPodIndex = 0; - finder.findZookeeperLeader(CLUSTER, NAMESPACE, asList(createPodWithId(firstPodIndex)), coKeySecret()) + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(firstPodIndex)), coKeySecret()) .onComplete(context.succeeding(leader -> { context.verify(() -> assertThat(leader, is(Integer.valueOf(firstPodIndex)))); a.flag(); @@ -261,7 +262,7 @@ public void testSecretWithMissingClusterOperatorKeyThrowsException(VertxTestCont Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), secretWithMissingClusterOperatorKey) + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), secretWithMissingClusterOperatorKey) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(RuntimeException.class)); assertThat(e.getMessage(), @@ -299,7 +300,7 @@ public void testSecretsCorrupted(VertxTestContext context) { Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), secretWithBadCertificate) + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), secretWithBadCertificate) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(RuntimeException.class)); assertThat(e.getMessage(), is("Bad/corrupt certificate found in data.cluster-operator\\.crt of Secret testcluster-cluster-operator-certs in namespace testns")); @@ -338,7 +339,7 @@ public void testReturnUnknownLeaderWhenMaxAttemptsExceeded(VertxTestContext cont ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), coKeySecret()) .onComplete(context.succeeding(leader -> context.verify(() -> { assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER)); @@ -378,7 +379,7 @@ public void testReturnUnknownLeaderDuringNetworkExceptions(VertxTestContext cont ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), coKeySecret()) .onComplete(context.succeeding(leader -> context.verify(() -> { assertThat(leader, is(ZookeeperLeaderFinder.UNKNOWN_LEADER)); @@ -418,7 +419,7 @@ public void testFinderHandlesFailureByLeaderFoundOnThirdAttempt(VertxTestContext TestingZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), coKeySecret()) + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), coKeySecret()) .onComplete(context.succeeding(leader -> context.verify(() -> { assertThat(leader, is(desiredLeaderId)); for (FakeZk zk : zks) { @@ -456,7 +457,7 @@ public void testLeaderFoundFirstAttempt(VertxTestContext context) throws Interru ZookeeperLeaderFinder finder = new TestingZookeeperLeaderFinder(this::backoff, ports); Checkpoint a = context.checkpoint(); - finder.findZookeeperLeader(CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), coKeySecret()) + finder.findZookeeperLeader(Reconciliation.DUMMY_RECONCILIATION, CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), coKeySecret()) .onComplete(context.succeeding(l -> context.verify(() -> { assertThat(l, is(leader)); for (FakeZk zk : zks) { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java index d1156a1d68..791d5f94d3 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperScalerTest.java @@ -7,6 +7,7 @@ import io.fabric8.kubernetes.api.model.Secret; import io.fabric8.kubernetes.api.model.SecretBuilder; import io.strimzi.operator.cluster.model.Ca; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.operator.MockCertManager; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; @@ -186,7 +187,7 @@ public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTime } }; - ZookeeperScaler scaler = new ZookeeperScaler(vertx, zooKeeperAdminProvider, "zookeeper:2181", null, dummyCaSecret, dummyCoSecret, 1_000); + ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, zooKeeperAdminProvider, "zookeeper:2181", null, dummyCaSecret, dummyCoSecret, 1_000); Checkpoint check = context.checkpoint(); scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { @@ -213,7 +214,7 @@ public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTime } }; - ZookeeperScaler scaler = new ZookeeperScaler(vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyCaSecret, dummyCoSecret, 1_000); + ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyCaSecret, dummyCoSecret, 1_000); Checkpoint check = context.checkpoint(); scaler.scale(1).onComplete(context.succeeding(res -> context.verify(() -> { @@ -245,7 +246,7 @@ public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTime } }; - ZookeeperScaler scaler = new ZookeeperScaler(vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyCaSecret, dummyCoSecret, 1_000); + ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyCaSecret, dummyCoSecret, 1_000); Checkpoint check = context.checkpoint(); scaler.scale(1).onComplete(context.succeeding(res -> context.verify(() -> { @@ -274,7 +275,7 @@ public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTime } }; - ZookeeperScaler scaler = new ZookeeperScaler(vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyCaSecret, dummyCoSecret, 1_000); + ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, zooKeeperAdminProvider, "zookeeper:2181", zkNodeAddress, dummyCaSecret, dummyCoSecret, 1_000); Checkpoint check = context.checkpoint(); scaler.scale(1).onComplete(context.failing(cause -> context.verify(() -> { @@ -286,7 +287,7 @@ public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTime @Test public void testConnectionToNonExistingHost(VertxTestContext context) { - ZookeeperScaler scaler = new ZookeeperScaler(vertx, new DefaultZooKeeperAdminProvider(), "i-do-not-exist.com:2181", null, dummyCaSecret, dummyCoSecret, 2_000); + ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, new DefaultZooKeeperAdminProvider(), "i-do-not-exist.com:2181", null, dummyCaSecret, dummyCoSecret, 2_000); Checkpoint check = context.checkpoint(); scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { @@ -309,7 +310,7 @@ public ZooKeeperAdmin createZookeeperAdmin(String connectString, int sessionTime } }; - ZookeeperScaler scaler = new ZookeeperScaler(vertx, zooKeeperAdminProvider, "zookeeper:2181", null, dummyCaSecret, dummyCoSecret, 1_000); + ZookeeperScaler scaler = new ZookeeperScaler(new Reconciliation("test", "TestResource", "my-namespace", "my-resource"), vertx, zooKeeperAdminProvider, "zookeeper:2181", null, dummyCaSecret, dummyCoSecret, 1_000); Checkpoint check = context.checkpoint(); scaler.scale(5).onComplete(context.failing(cause -> context.verify(() -> { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperatorTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperatorTest.java index 8e2c6c4535..91f4e42b3a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperatorTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/resource/ZookeeperSetOperatorTest.java @@ -10,6 +10,7 @@ import io.strimzi.operator.cluster.ResourceUtils; import io.strimzi.operator.cluster.model.KafkaVersion; import io.strimzi.operator.cluster.model.ZookeeperCluster; +import io.strimzi.operator.common.Reconciliation; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -30,8 +31,8 @@ public class ZookeeperSetOperatorTest { @BeforeEach public void before() { KafkaVersion.Lookup versions = new KafkaVersion.Lookup(emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); - a = ZookeeperCluster.fromCrd(getResource(), versions).generateStatefulSet(true, null, null); - b = ZookeeperCluster.fromCrd(getResource(), versions).generateStatefulSet(true, null, null); + a = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, getResource(), versions).generateStatefulSet(true, null, null); + b = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, getResource(), versions).generateStatefulSet(true, null, null); } private Kafka getResource() { @@ -45,18 +46,18 @@ private Kafka getResource() { } private StatefulSetDiff diff() { - return new StatefulSetDiff(a, b); + return new StatefulSetDiff(Reconciliation.DUMMY_RECONCILIATION, a, b); } @Test public void testNotNeedsRollingUpdateIdentical() { - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(false)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(false)); } @Test public void testNotNeedsRollingUpdateReplicas() { a.getSpec().setReplicas(b.getSpec().getReplicas() + 1); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(false)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(false)); } @Test @@ -64,28 +65,28 @@ public void testNeedsRollingUpdateLabels() { Map labels = new HashMap<>(b.getMetadata().getLabels()); labels.put("foo", "bar"); a.getMetadata().setLabels(labels); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(true)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(true)); } @Test public void testNeedsRollingUpdateImage() { a.getSpec().getTemplate().getSpec().getContainers().get(0).setImage( a.getSpec().getTemplate().getSpec().getContainers().get(0).getImage() + "-foo"); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(true)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(true)); } @Test public void testNeedsRollingUpdateReadinessDelay() { a.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().setInitialDelaySeconds( a.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().getInitialDelaySeconds() + 1); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(true)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(true)); } @Test public void testNeedsRollingUpdateReadinessTimeout() { a.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().setTimeoutSeconds( a.getSpec().getTemplate().getSpec().getContainers().get(0).getReadinessProbe().getTimeoutSeconds() + 1); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(true)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(true)); } @Test @@ -93,7 +94,7 @@ public void testNeedsRollingUpdateEnvZkMetricsEnabled() { String envVar = ENV_VAR_ZOOKEEPER_METRICS_ENABLED; a.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv().add(new EnvVar(envVar, containerEnvVars(a.getSpec().getTemplate().getSpec().getContainers().get(0)).get(envVar) + "-foo", null)); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(true)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(true)); } @Test @@ -101,6 +102,6 @@ public void testNeedsRollingUpdateEnvSomeOtherThing() { String envVar = "SOME_RANDOM_ENV"; a.getSpec().getTemplate().getSpec().getContainers().get(0).getEnv().add(new EnvVar(envVar, "foo", null)); - assertThat(ZookeeperSetOperator.needsRollingUpdate(diff()), is(true)); + assertThat(ZookeeperSetOperator.needsRollingUpdate(Reconciliation.DUMMY_RECONCILIATION, diff()), is(true)); } } diff --git a/kafka-init/src/main/java/io/strimzi/kafka/init/InitWriter.java b/kafka-init/src/main/java/io/strimzi/kafka/init/InitWriter.java index 83ca531e4b..8f172309ae 100644 --- a/kafka-init/src/main/java/io/strimzi/kafka/init/InitWriter.java +++ b/kafka-init/src/main/java/io/strimzi/kafka/init/InitWriter.java @@ -20,7 +20,7 @@ public class InitWriter { - private static final Logger log = LogManager.getLogger(InitWriter.class); + private static final Logger LOGGER = LogManager.getLogger(InitWriter.class); private KubernetesClient client; private InitWriterConfig config; @@ -41,12 +41,12 @@ public InitWriter(KubernetesClient client, InitWriterConfig config) { public boolean writeRack() { Map nodeLabels = client.nodes().withName(config.getNodeName()).get().getMetadata().getLabels(); - log.info("NodeLabels = {}", nodeLabels); + LOGGER.info("NodeLabels = {}", nodeLabels); String rackId = nodeLabels.get(config.getRackTopologyKey()); - log.info("Rack: {} = {}", config.getRackTopologyKey(), rackId); + LOGGER.info("Rack: {} = {}", config.getRackTopologyKey(), rackId); if (rackId == null) { - log.error("Node {} doesn't have the label {} for getting the rackid", + LOGGER.error("Node {} doesn't have the label {} for getting the rackid", config.getNodeName(), config.getRackTopologyKey()); return false; } @@ -66,16 +66,16 @@ public boolean writeExternalAddress() { String address = NodeUtils.findAddress(addresses, null); if (address == null) { - log.error("External address not found"); + LOGGER.error("External address not found"); return false; } else { - log.info("Default External address found {}", address); + LOGGER.info("Default External address found {}", address); externalAddresses.append(externalAddressExport(null, address)); } for (NodeAddressType type : NodeAddressType.values()) { address = NodeUtils.findAddress(addresses, type); - log.info("External {} address found {}", type.toValue(), address); + LOGGER.info("External {} address found {}", type.toValue(), address); externalAddresses.append(externalAddressExport(type, address)); } @@ -115,14 +115,14 @@ private boolean write(String file, String information) { writer.write(information); if (writer.checkError()) { - log.error("Failed to write the information {} to file {}", information, file); + LOGGER.error("Failed to write the information {} to file {}", information, file); isWritten = false; } else { - log.info("Information {} written successfully to file {}", information, file); + LOGGER.info("Information {} written successfully to file {}", information, file); isWritten = true; } } catch (IOException e) { - log.error("Error writing the information {} to file {}", information, file, e); + LOGGER.error("Error writing the information {} to file {}", information, file, e); isWritten = false; } diff --git a/kafka-init/src/main/java/io/strimzi/kafka/init/Main.java b/kafka-init/src/main/java/io/strimzi/kafka/init/Main.java index 1a52d07791..3181503f70 100644 --- a/kafka-init/src/main/java/io/strimzi/kafka/init/Main.java +++ b/kafka-init/src/main/java/io/strimzi/kafka/init/Main.java @@ -11,16 +11,16 @@ public class Main { - private static final Logger log = LogManager.getLogger(Main.class); + private static final Logger LOGGER = LogManager.getLogger(Main.class); public static void main(String[] args) { - log.info("Init-kafka {} is starting", Main.class.getPackage().getImplementationVersion()); + LOGGER.info("Init-kafka {} is starting", Main.class.getPackage().getImplementationVersion()); InitWriterConfig config = InitWriterConfig.fromMap(System.getenv()); KubernetesClient client = new DefaultKubernetesClient(); - log.info("Init-kafka started with config: {}", config); + LOGGER.info("Init-kafka started with config: {}", config); InitWriter writer = new InitWriter(client, config); diff --git a/operator-common/src/main/java/io/strimzi/operator/PlatformFeaturesAvailability.java b/operator-common/src/main/java/io/strimzi/operator/PlatformFeaturesAvailability.java index 095f361f17..da125a6447 100644 --- a/operator-common/src/main/java/io/strimzi/operator/PlatformFeaturesAvailability.java +++ b/operator-common/src/main/java/io/strimzi/operator/PlatformFeaturesAvailability.java @@ -23,7 +23,7 @@ * Gives a info about certain features availability regarding to kubernetes version */ public class PlatformFeaturesAvailability { - private static final Logger log = LogManager.getLogger(PlatformFeaturesAvailability.class.getName()); + private static final Logger LOGGER = LogManager.getLogger(PlatformFeaturesAvailability.class.getName()); private boolean routes = false; private boolean builds = false; @@ -67,7 +67,7 @@ private static OkHttpClient getOkHttpClient(KubernetesClient client) { if (client.isAdaptable(OkHttpClient.class)) { return client.adapt(OkHttpClient.class); } else { - log.error("Cannot adapt KubernetesClient to OkHttpClient"); + LOGGER.error("Cannot adapt KubernetesClient to OkHttpClient"); throw new RuntimeException("Cannot adapt KubernetesClient to OkHttpClient"); } } @@ -149,7 +149,7 @@ private static Future getVersionInfoFromKubernetes(Vertx vertx, Kub try { request.complete(client.getVersion()); } catch (Exception e) { - log.error("Detection of Kubernetes version failed.", e); + LOGGER.error("Detection of Kubernetes version failed.", e); request.fail(e); } }, promise); @@ -166,17 +166,17 @@ private static Future checkApiAvailability(Vertx vertx, OkHttpClient ht Response resp = httpClient.newCall(new Request.Builder().get().url(masterUrl + "apis/" + api + "/" + version).build()).execute(); if (resp.code() >= 200 && resp.code() < 300) { - log.debug("{} returned {}. This API is supported.", resp.request().url(), resp.code()); + LOGGER.debug("{} returned {}. This API is supported.", resp.request().url(), resp.code()); isSupported = true; } else { - log.debug("{} returned {}. This API is not supported.", resp.request().url(), resp.code()); + LOGGER.debug("{} returned {}. This API is not supported.", resp.request().url(), resp.code()); isSupported = false; } resp.close(); request.complete(isSupported); } catch (Exception e) { - log.error("Detection of {}/{} API failed. This API will be disabled.", api, version, e); + LOGGER.error("Detection of {}/{} API failed. This API will be disabled.", api, version, e); request.complete(false); } }, promise); diff --git a/operator-common/src/main/java/io/strimzi/operator/cluster/model/Ca.java b/operator-common/src/main/java/io/strimzi/operator/cluster/model/Ca.java index 78da0c9b68..1427e27558 100644 --- a/operator-common/src/main/java/io/strimzi/operator/cluster/model/Ca.java +++ b/operator-common/src/main/java/io/strimzi/operator/cluster/model/Ca.java @@ -14,10 +14,9 @@ import io.strimzi.certs.Subject; import io.strimzi.operator.common.Annotations; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; -import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.ByteArrayInputStream; import java.io.File; @@ -63,7 +62,7 @@ @SuppressWarnings("checkstyle:CyclomaticComplexity") public abstract class Ca { - protected static final Logger log = LogManager.getLogger(Ca.class); + protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(Ca.class); private static final DateTimeFormatter DATE_TIME_FORMATTER = new DateTimeFormatterBuilder() .appendValue(YEAR, 4, 10, SignStyle.EXCEEDS_PAD) @@ -98,6 +97,7 @@ public abstract class Ca { public static final int INIT_GENERATION = 0; private final PasswordGenerator passwordGenerator; + protected final Reconciliation reconciliation; /** * Set the {@code strimzi.io/force-renew} annotation on the given {@code caCert} if the given {@code caKey} has @@ -191,10 +191,11 @@ public String postDescription(String keySecretName, String certSecretName) { private boolean caCertsRemoved; private final CertificateExpirationPolicy policy; - public Ca(CertManager certManager, PasswordGenerator passwordGenerator, String commonName, + public Ca(Reconciliation reconciliation, CertManager certManager, PasswordGenerator passwordGenerator, String commonName, String caCertSecretName, Secret caCertSecret, String caKeySecretName, Secret caKeySecret, int validityDays, int renewalDays, boolean generateCa, CertificateExpirationPolicy policy) { + this.reconciliation = reconciliation; this.commonName = commonName; this.caCertSecret = caCertSecret; this.caCertSecretName = caCertSecretName; @@ -209,9 +210,9 @@ public Ca(CertManager certManager, PasswordGenerator passwordGenerator, String c this.renewalType = RenewalType.NOOP; } - private static void delete(File file) { + private static void delete(Reconciliation reconciliation, File file) { if (!file.delete()) { - log.warn("{} cannot be deleted", file.getName()); + LOGGER.warnCr(reconciliation, "{} cannot be deleted", file.getName()); } } @@ -220,6 +221,7 @@ private static void delete(File file) { * or null if the given {@code secret} is null. * An exception is thrown if the given {@code secret} is non-null, but does not contain the given * entries in its {@code data}. + * * @param secret The secret. * @param key The key. * @param cert The cert. @@ -268,16 +270,16 @@ public CertAndKey addKeyAndCertToKeyStore(String alias, byte[] key, byte[] cert) Files.readAllBytes(keyStoreFile.toPath()), keyStorePassword); - delete(keyFile); - delete(certFile); - delete(keyStoreFile); + delete(reconciliation, keyFile); + delete(reconciliation, certFile); + delete(reconciliation, keyStoreFile); return result; } /*test*/ CertAndKey generateSignedCert(Subject subject, - File csrFile, File keyFile, File certFile, File keyStoreFile) throws IOException { - log.debug("Generating certificate {} with SAN {}, signed by CA {}", subject, subject.subjectAltNames(), this); + File csrFile, File keyFile, File certFile, File keyStoreFile) throws IOException { + LOGGER.debugCr(reconciliation, "Generating certificate {} with SAN {}, signed by CA {}", subject, subject.subjectAltNames(), this); certManager.generateCsr(keyFile, csrFile, subject); certManager.generateCert(csrFile, currentCaKey(), currentCaCertBytes(), @@ -296,6 +298,7 @@ public CertAndKey addKeyAndCertToKeyStore(String alias, byte[] key, byte[] cert) /** * Generates a certificate signed by this CA + * * @param commonName The CN of the certificate to be generated. * @return The CertAndKey * @throws IOException If the cert could not be generated. @@ -306,6 +309,7 @@ public CertAndKey generateSignedCert(String commonName) throws IOException { /** * Generates a certificate signed by this CA + * * @param commonName The CN of the certificate to be generated. * @param organization The O of the certificate to be generated. May be null. * @return The CertAndKey @@ -328,10 +332,10 @@ public CertAndKey generateSignedCert(String commonName, String organization) thr CertAndKey result = generateSignedCert(subject, csrFile, keyFile, certFile, keyStoreFile); - delete(csrFile); - delete(keyFile); - delete(certFile); - delete(keyStoreFile); + delete(reconciliation, csrFile); + delete(reconciliation, keyFile); + delete(reconciliation, certFile); + delete(reconciliation, keyStoreFile); return result; } @@ -340,6 +344,7 @@ public CertAndKey generateSignedCert(String commonName, String organization) thr * and maybe generate new ones for new replicas (i.e. scale-up). */ protected Map maybeCopyOrGenerateCerts( + Reconciliation reconciliation, int replicas, Function subjectFn, Secret secret, @@ -364,7 +369,7 @@ protected Map maybeCopyOrGenerateCerts( // scale down -> it will copy just the requested number of replicas for (int i = 0; i < replicasInNewSecret; i++) { String podName = podNameFn.apply(i); - log.debug("Certificate for {} already exists", podName); + LOGGER.debugCr(reconciliation, "Certificate for {} already exists", podName); Subject subject = subjectFn.apply(i); CertAndKey certAndKey; @@ -398,7 +403,7 @@ protected Map maybeCopyOrGenerateCerts( } if (!reasons.isEmpty()) { - log.debug("Certificate for pod {} need to be regenerated because: {}", podName, String.join(", ", reasons)); + LOGGER.debugCr(reconciliation, "Certificate for pod {} need to be regenerated because: {}", podName, String.join(", ", reasons)); CertAndKey newCertAndKey = generateSignedCert(subject, brokerCsrFile, brokerKeyFile, brokerCertFile, brokerKeyStoreFile); certs.put(podName, newCertAndKey); @@ -413,15 +418,15 @@ protected Map maybeCopyOrGenerateCerts( for (int i = replicasInSecret; i < replicas; i++) { String podName = podNameFn.apply(i); - log.debug("Certificate for {} to generate", podName); + LOGGER.debugCr(reconciliation, "Certificate for {} to generate", podName); CertAndKey k = generateSignedCert(subjectFn.apply(i), brokerCsrFile, brokerKeyFile, brokerCertFile, brokerKeyStoreFile); certs.put(podName, k); } - delete(brokerCsrFile); - delete(brokerKeyFile); - delete(brokerCertFile); - delete(brokerKeyStoreFile); + delete(reconciliation, brokerCsrFile); + delete(reconciliation, brokerKeyFile); + delete(reconciliation, brokerCertFile); + delete(reconciliation, brokerKeyStoreFile); return certs; } @@ -441,7 +446,7 @@ public boolean isExpiring(Secret secret, String certKey) { isExpiring = certNeedsRenewal(currentCert); } catch (RuntimeException e) { // TODO: We should mock the certificates properly so that this doesn't fail in tests (not now => long term :-o) - log.debug("Failed to parse existing certificate", e); + LOGGER.debugCr(reconciliation, "Failed to parse existing certificate", e); } return isExpiring; @@ -460,10 +465,10 @@ public boolean isExpiring(Secret secret, String certKey) { Collection currentAltNames = getSubjectAltNames(certAndKey.cert()); if (currentAltNames != null && desiredAltNames.containsAll(currentAltNames) && currentAltNames.containsAll(desiredAltNames)) { - log.trace("Alternate subjects match. No need to refresh cert for pod {}.", podName); + LOGGER.traceCr(reconciliation, "Alternate subjects match. No need to refresh cert for pod {}.", podName); return false; } else { - log.debug("Alternate subjects for pod {} differ - current: {}; desired: {}", podName, currentAltNames, desiredAltNames); + LOGGER.debugCr(reconciliation, "Alternate subjects for pod {} differ - current: {}; desired: {}", podName, currentAltNames, desiredAltNames); return true; } } @@ -486,7 +491,7 @@ protected List getSubjectAltNames(byte[] certificate) { .collect(Collectors.toList()); } catch (CertificateException | RuntimeException e) { // TODO: We should mock the certificates properly so that this doesn't fail in tests (not now => long term :-o) - log.debug("Failed to parse existing certificate", e); + LOGGER.debugCr(reconciliation, "Failed to parse existing certificate", e); } return subjectAltNames; @@ -517,7 +522,7 @@ public void createRenewOrReplace(String namespace, String clusterName, Map(1); @@ -554,10 +559,10 @@ public void createRenewOrReplace(String namespace, String clusterName, Map newData) { Instant expiryDate = cert.getNotAfter().toInstant(); remove = expiryDate.isBefore(Instant.now()); if (remove) { - log.debug("The certificate (data.{}) in Secret expired {}; removing it", + LOGGER.debugCr(reconciliation, "The certificate (data.{}) in Secret expired {}; removing it", certName.replace(".", "\\."), expiryDate); } } catch (CertificateException e) { @@ -776,12 +784,12 @@ private int removeExpiredCerts(Map newData) { // doesn't remove stores and related password if (!certName.endsWith(".p12") && !certName.endsWith(".password")) { remove = true; - log.debug("The certificate (data.{}) in Secret is not an X.509 certificate; removing it", + LOGGER.debugCr(reconciliation, "The certificate (data.{}) in Secret is not an X.509 certificate; removing it", certName.replace(".", "\\.")); } } if (remove) { - log.debug("Removing data.{} from Secret", + LOGGER.debugCr(reconciliation, "Removing data.{} from Secret", certName.replace(".", "\\.")); iter.remove(); removed.add(certName); @@ -798,7 +806,7 @@ private int removeExpiredCerts(Map newData) { certManager.deleteFromTrustStore(removed, trustStoreFile, trustStorePassword); newData.put(CA_STORE, Base64.getEncoder().encodeToString(Files.readAllBytes(trustStoreFile.toPath()))); } finally { - delete(trustStoreFile); + delete(reconciliation, trustStoreFile); } } catch (IOException | CertificateException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); @@ -810,7 +818,7 @@ private int removeExpiredCerts(Map newData) { public boolean certNeedsRenewal(X509Certificate cert) { Date notAfter = cert.getNotAfter(); - log.trace("Certificate {} expires on {}", cert.getSubjectDN(), notAfter); + LOGGER.traceCr(reconciliation, "Certificate {} expires on {}", cert.getSubjectDN(), notAfter); long msTillExpired = notAfter.getTime() - System.currentTimeMillis(); return msTillExpired < renewalDays * 24L * 60L * 60L * 1000L; } @@ -880,10 +888,10 @@ private void addCertCaToTrustStore(String alias, Map certData) { certData.put(CA_STORE, Base64.getEncoder().encodeToString(Files.readAllBytes(trustStoreFile.toPath()))); certData.put(CA_STORE_PASSWORD, Base64.getEncoder().encodeToString(trustStorePassword.getBytes(StandardCharsets.US_ASCII))); } finally { - delete(trustStoreFile); + delete(reconciliation, trustStoreFile); } } finally { - delete(certFile); + delete(reconciliation, certFile); } } catch (IOException | CertificateException | KeyStoreException | NoSuchAlgorithmException e) { @@ -893,7 +901,7 @@ private void addCertCaToTrustStore(String alias, Map certData) { private void generateCaKeyAndCert(Subject subject, Map keyData, Map certData) { try { - log.debug("Generating CA with subject={}", subject); + LOGGER.debugCr(reconciliation, "Generating CA with subject={}", subject); File keyFile = File.createTempFile("tls", subject.commonName() + "-key"); try { File certFile = File.createTempFile("tls", subject.commonName() + "-cert"); @@ -921,13 +929,13 @@ private void generateCaKeyAndCert(Subject subject, Map keyData, certData.put(CA_STORE, ca.trustStoreAsBase64String()); certData.put(CA_STORE_PASSWORD, ca.storePasswordAsBase64String()); } finally { - delete(trustStoreFile); + delete(reconciliation, trustStoreFile); } } finally { - delete(certFile); + delete(reconciliation, certFile); } } finally { - delete(keyFile); + delete(reconciliation, keyFile); } } catch (IOException | CertificateException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); @@ -936,7 +944,7 @@ private void generateCaKeyAndCert(Subject subject, Map keyData, private void renewCaCert(Subject subject, Map certData) { try { - log.debug("Renewing CA with subject={}, org={}", subject); + LOGGER.debugCr(reconciliation, "Renewing CA with subject={}, org={}", subject); Base64.Decoder decoder = Base64.getDecoder(); byte[] bytes = decoder.decode(caKeySecret.getData().get(CA_KEY)); @@ -960,13 +968,13 @@ private void renewCaCert(Subject subject, Map certData) { certData.put(CA_STORE, ca.trustStoreAsBase64String()); certData.put(CA_STORE_PASSWORD, ca.storePasswordAsBase64String()); } finally { - delete(trustStoreFile); + delete(reconciliation, trustStoreFile); } } finally { - delete(certFile); + delete(reconciliation, certFile); } } finally { - delete(keyFile); + delete(reconciliation, keyFile); } } catch (IOException | CertificateException | KeyStoreException | NoSuchAlgorithmException e) { throw new RuntimeException(e); diff --git a/operator-common/src/main/java/io/strimzi/operator/cluster/model/ClientsCa.java b/operator-common/src/main/java/io/strimzi/operator/cluster/model/ClientsCa.java index ac7d0ca2d1..aeeed85802 100644 --- a/operator-common/src/main/java/io/strimzi/operator/cluster/model/ClientsCa.java +++ b/operator-common/src/main/java/io/strimzi/operator/cluster/model/ClientsCa.java @@ -8,15 +8,16 @@ import io.strimzi.api.kafka.model.CertificateExpirationPolicy; import io.strimzi.certs.CertManager; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; public class ClientsCa extends Ca { - public ClientsCa(CertManager certManager, PasswordGenerator passwordGenerator, String caCertSecretName, Secret clientsCaCert, + public ClientsCa(Reconciliation reconciliation, CertManager certManager, PasswordGenerator passwordGenerator, String caCertSecretName, Secret clientsCaCert, String caSecretKeyName, Secret clientsCaKey, int validityDays, int renewalDays, boolean generateCa, CertificateExpirationPolicy policy) { - super(certManager, passwordGenerator, "clients-ca", - caCertSecretName, forceRenewal(clientsCaCert, clientsCaKey, "clients-ca.key"), - caSecretKeyName, adapt060ClientsCaSecret(clientsCaKey), - validityDays, renewalDays, generateCa, policy); + super(reconciliation, certManager, passwordGenerator, + "clients-ca", caCertSecretName, + forceRenewal(clientsCaCert, clientsCaKey, "clients-ca.key"), caSecretKeyName, + adapt060ClientsCaSecret(clientsCaKey), validityDays, renewalDays, generateCa, policy); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/cluster/model/NodeUtils.java b/operator-common/src/main/java/io/strimzi/operator/cluster/model/NodeUtils.java index ae8d4ee7b2..96d42d96e2 100644 --- a/operator-common/src/main/java/io/strimzi/operator/cluster/model/NodeUtils.java +++ b/operator-common/src/main/java/io/strimzi/operator/cluster/model/NodeUtils.java @@ -6,15 +6,14 @@ import io.fabric8.kubernetes.api.model.NodeAddress; import io.strimzi.api.kafka.model.listener.NodeAddressType; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import io.strimzi.operator.common.ReconciliationLogger; import java.util.List; import java.util.Map; import java.util.stream.Collectors; public class NodeUtils { - private static final Logger log = LogManager.getLogger(NodeUtils.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(NodeUtils.class); /** * Tries to find the right address of the node. The different addresses has different prioprities: @@ -36,7 +35,7 @@ public static String findAddress(List addresses, NodeAddressType pr Map addressMap = addresses.stream() .collect(Collectors.toMap(NodeAddress::getType, NodeAddress::getAddress, (address1, address2) -> { - log.warn("Found multiple addresses with the same type. Only the first address '{}' will be used.", address1); + LOGGER.warnOp("Found multiple addresses with the same type. Only the first address '{}' will be used.", address1); return address1; })); diff --git a/operator-common/src/main/java/io/strimzi/operator/cluster/model/StatusDiff.java b/operator-common/src/main/java/io/strimzi/operator/cluster/model/StatusDiff.java index 35f1d630dd..2af9f0d557 100644 --- a/operator-common/src/main/java/io/strimzi/operator/cluster/model/StatusDiff.java +++ b/operator-common/src/main/java/io/strimzi/operator/cluster/model/StatusDiff.java @@ -9,9 +9,8 @@ import com.fasterxml.jackson.databind.SerializationFeature; import io.fabric8.zjsonpatch.JsonDiff; import io.strimzi.api.kafka.model.status.Status; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.operator.resource.AbstractJsonDiff; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.regex.Pattern; @@ -19,7 +18,7 @@ public class StatusDiff extends AbstractJsonDiff { - private static final Logger log = LogManager.getLogger(StatusDiff.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(StatusDiff.class.getName()); // use SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS just for better human readability in the logs public static final ObjectMapper PATCH_MAPPER = patchMapper().copy().configure(SerializationFeature.ORDER_MAP_ENTRIES_BY_KEYS, true); @@ -40,15 +39,13 @@ public StatusDiff(Status current, Status desired) { String pathValue = d.get("path").asText(); if (IGNORABLE_PATHS.matcher(pathValue).matches()) { - log.debug("Ignoring Status diff {}", d); + LOGGER.debugOp("Ignoring Status diff {}", d); continue; } - if (log.isDebugEnabled()) { - log.debug("Status differs: {}", d); - log.debug("Current Status path {} has value {}", pathValue, lookupPath(source, pathValue)); - log.debug("Desired Status path {} has value {}", pathValue, lookupPath(target, pathValue)); - } + LOGGER.debugOp("Status differs: {}", d); + LOGGER.debugOp("Current Status path {} has value {}", pathValue, lookupPath(source, pathValue)); + LOGGER.debugOp("Desired Status path {} has value {}", pathValue, lookupPath(target, pathValue)); num++; } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/AbstractOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/AbstractOperator.java index b5b0f801fa..a183249709 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/AbstractOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/AbstractOperator.java @@ -33,8 +33,6 @@ import io.vertx.core.Promise; import io.vertx.core.Vertx; import io.vertx.core.shareddata.Lock; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.LinkedHashSet; @@ -58,7 +56,7 @@ * {@link #reconcile(Reconciliation)} by delegating to abstract {@link #createOrUpdate(Reconciliation, CustomResource)} * and {@link #delete(Reconciliation)} methods for subclasses to implement. * - *
  • add support for operator-side {@linkplain #validate(CustomResource) validation}. + *
  • add support for operator-side {@linkplain #validate(Reconciliation, CustomResource) validation}. * This can be used to automatically log warnings about source resources which used deprecated part of the CR API. *Ä… * @@ -73,7 +71,7 @@ public abstract class AbstractOperator< O extends AbstractWatchableStatusedResourceOperator> implements Operator { - private static final Logger log = LogManager.getLogger(AbstractOperator.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractOperator.class); private static final long PROGRESS_WARNING = 60_000L; protected static final int LOCK_TIMEOUT_MS = 10000; @@ -206,14 +204,14 @@ public final Future reconcile(Reconciliation reconciliation) { // and might not be really deleted. We have to filter these situations out and ignore the // reconciliation because such resource might be already operated by another instance (where the // same change triggered ADDED event). - log.debug("{}: {} {} in namespace {} does not match label selector {} and will be ignored", reconciliation, kind(), name, namespace, selector().get().getMatchLabels()); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} does not match label selector {} and will be ignored", kind(), name, namespace, selector().get().getMatchLabels()); return Future.succeededFuture(); } Promise createOrUpdate = Promise.promise(); if (Annotations.isReconciliationPausedWithAnnotation(cr)) { S status = createStatus(); - Set conditions = validate(cr); + Set conditions = validate(reconciliation, cr); conditions.add(StatusUtils.getPausedCondition()); status.setConditions(new ArrayList<>(conditions)); status.setObservedGeneration(cr.getStatus() != null ? cr.getStatus().getObservedGeneration() : 0); @@ -226,7 +224,7 @@ public final Future reconcile(Reconciliation reconciliation) { } }); getPausedResourceCounter().getAndIncrement(); - log.debug("{}: Reconciliation of {} {} is paused", reconciliation, kind, name); + LOGGER.debugCr(reconciliation, "Reconciliation of {} {} is paused", kind, name); return createOrUpdate.future(); } else if (cr.getSpec() == null) { InvalidResourceException exception = new InvalidResourceException("Spec cannot be null"); @@ -242,7 +240,7 @@ public final Future reconcile(Reconciliation reconciliation) { status.setObservedGeneration(cr.getMetadata().getGeneration()); status.addCondition(errorCondition); - log.error("{}: {} spec cannot be null", reconciliation, cr.getMetadata().getName()); + LOGGER.errorCr(reconciliation, "{} spec cannot be null", cr.getMetadata().getName()); updateStatus(reconciliation, status).onComplete(notUsed -> { createOrUpdate.fail(exception); }); @@ -250,9 +248,9 @@ public final Future reconcile(Reconciliation reconciliation) { return createOrUpdate.future(); } - Set unknownAndDeprecatedConditions = validate(cr); + Set unknownAndDeprecatedConditions = validate(reconciliation, cr); - log.info("{}: {} {} will be checked for creation or modification", reconciliation, kind, name); + LOGGER.infoCr(reconciliation, "{} {} will be checked for creation or modification", kind, name); createOrUpdate(reconciliation, cr) .onComplete(res -> { @@ -273,13 +271,13 @@ public final Future reconcile(Reconciliation reconciliation) { Status status = e.getStatus(); addWarningsToStatus(status, unknownAndDeprecatedConditions); - log.error("{}: createOrUpdate failed", reconciliation, e.getCause()); + LOGGER.errorCr(reconciliation, "createOrUpdate failed", e.getCause()); updateStatus(reconciliation, (S) status).onComplete(statusResult -> { createOrUpdate.fail(e.getCause()); }); } else { - log.error("{}: createOrUpdate failed", reconciliation, res.cause()); + LOGGER.errorCr(reconciliation, "createOrUpdate failed", res.cause()); createOrUpdate.fail(res.cause()); } } @@ -287,16 +285,16 @@ public final Future reconcile(Reconciliation reconciliation) { return createOrUpdate.future(); } else { - log.info("{}: {} {} should be deleted", reconciliation, kind, name); + LOGGER.infoCr(reconciliation, "{} {} should be deleted", kind, name); return delete(reconciliation).map(deleteResult -> { if (deleteResult) { - log.info("{}: {} {} deleted", reconciliation, kind, name); + LOGGER.infoCr(reconciliation, "{} {} deleted", kind, name); } else { - log.info("{}: Assembly {} or some parts of it will be deleted by garbage collection", reconciliation, name); + LOGGER.infoCr(reconciliation, "Assembly {} or some parts of it will be deleted by garbage collection", name); } return (Void) null; }).recover(deleteResult -> { - log.error("{}: Deletion of {} {} failed", reconciliation, kind, name, deleteResult); + LOGGER.errorCr(reconciliation, "Deletion of {} {} failed", kind, name, deleteResult); return Future.failedFuture(deleteResult); }); } @@ -328,7 +326,7 @@ protected void addWarningsToStatus(Status status, Set unknownAndDepre */ Future updateStatus(Reconciliation reconciliation, S desiredStatus) { if (desiredStatus == null) { - log.debug("{}: Desired status is null - status will not be updated", reconciliation); + LOGGER.debugCr(reconciliation, "Desired status is null - status will not be updated"); return Future.succeededFuture(); } @@ -344,24 +342,24 @@ Future updateStatus(Reconciliation reconciliation, S desiredStatus) { if (!sDiff.isEmpty()) { res.setStatus(desiredStatus); - return resourceOperator.updateStatusAsync(res) + return resourceOperator.updateStatusAsync(reconciliation, res) .compose(notUsed -> { - log.debug("{}: Completed status update", reconciliation); + LOGGER.debugCr(reconciliation, "Completed status update"); return Future.succeededFuture(); }, error -> { - log.error("{}: Failed to update status", reconciliation, error); + LOGGER.errorCr(reconciliation, "Failed to update status", error); return Future.failedFuture(error); }); } else { - log.debug("{}: Status did not change", reconciliation); + LOGGER.debugCr(reconciliation, "Status did not change"); return Future.succeededFuture(); } } else { - log.error("{}: Current {} resource not found", reconciliation, reconciliation.kind()); + LOGGER.errorCr(reconciliation, "Current {} resource not found", reconciliation.kind()); return Future.failedFuture("Current " + reconciliation.kind() + " resource with name " + name + " not found"); } }, error -> { - log.error("{}: Failed to get the current {} resource and its status", reconciliation, reconciliation.kind(), error); + LOGGER.errorCr(reconciliation, "Failed to get the current {} resource and its status", reconciliation.kind(), error); return Future.failedFuture(error); }); } @@ -389,14 +387,14 @@ protected final Future withLock(Reconciliation reconciliation, long lockT String namespace = reconciliation.namespace(); String name = reconciliation.name(); final String lockName = getLockName(namespace, name); - log.debug("{}: Try to acquire lock {}", reconciliation, lockName); + LOGGER.debugCr(reconciliation, "Try to acquire lock {}", lockName); vertx.sharedData().getLockWithTimeout(lockName, lockTimeoutMs, res -> { if (res.succeeded()) { - log.debug("{}: Lock {} acquired", reconciliation, lockName); + LOGGER.debugCr(reconciliation, "Lock {} acquired", lockName); Lock lock = res.result(); long timerId = vertx.setPeriodic(PROGRESS_WARNING, timer -> { - log.info("{}: Reconciliation is in progress", reconciliation); + LOGGER.infoCr(reconciliation, "Reconciliation is in progress"); }); try { @@ -409,17 +407,17 @@ protected final Future withLock(Reconciliation reconciliation, long lockT vertx.cancelTimer(timerId); lock.release(); - log.debug("{}: Lock {} released", reconciliation, lockName); + LOGGER.debugCr(reconciliation, "Lock {} released", lockName); }); } catch (Throwable ex) { vertx.cancelTimer(timerId); lock.release(); - log.debug("{}: Lock {} released", reconciliation, lockName); - log.error("{}: Reconciliation failed", reconciliation, ex); + LOGGER.debugCr(reconciliation, "Lock {} released", lockName); + LOGGER.errorCr(reconciliation, "Reconciliation failed", ex); handler.fail(ex); } } else { - log.debug("{}: Failed to acquire lock {} within {}ms.", reconciliation, lockName, lockTimeoutMs); + LOGGER.debugCr(reconciliation, "Failed to acquire lock {} within {}ms.", lockName, lockTimeoutMs); handler.fail(new UnableToAcquireLockException()); } }); @@ -430,15 +428,16 @@ protected final Future withLock(Reconciliation reconciliation, long lockT * Validate the Custom Resource. * This should log at the WARN level (rather than throwing) * if the resource can safely be reconciled (e.g. it merely using deprecated API). + * @param reconciliation The reconciliation * @param resource The custom resource * @throws InvalidResourceException if the resource cannot be safely reconciled. * @return set of conditions */ - /*test*/ public Set validate(T resource) { + /*test*/ public Set validate(Reconciliation reconciliation, T resource) { if (resource != null) { Set warningConditions = new LinkedHashSet<>(0); - ResourceVisitor.visit(resource, new ValidationVisitor(resource, log, warningConditions)); + ResourceVisitor.visit(reconciliation, resource, new ValidationVisitor(resource, LOGGER, warningConditions)); return warningConditions; } @@ -480,10 +479,10 @@ public Consumer recreateWatch(String namespace) { @Override public void accept(WatcherException e) { if (e != null) { - log.error("Watcher closed with exception in namespace {}", namespace, e); + LOGGER.errorOp("Watcher closed with exception in namespace {}", namespace, e); createWatch(namespace, this); } else { - log.info("Watcher closed in namespace {}", namespace); + LOGGER.infoOp("Watcher closed in namespace {}", namespace); } } }; @@ -498,7 +497,7 @@ private void handleResult(Reconciliation reconciliation, AsyncResult resul updateResourceState(reconciliation, true, null); successfulReconciliationsCounter.increment(); reconciliationTimerSample.stop(reconciliationsTimer); - log.info("{}: reconciled", reconciliation); + LOGGER.infoCr(reconciliation, "reconciled"); } else { Throwable cause = result.cause(); @@ -506,14 +505,14 @@ private void handleResult(Reconciliation reconciliation, AsyncResult resul updateResourceState(reconciliation, false, cause); failedReconciliationsCounter.increment(); reconciliationTimerSample.stop(reconciliationsTimer); - log.warn("{}: Failed to reconcile {}", reconciliation, cause.getMessage()); + LOGGER.warnCr(reconciliation, "Failed to reconcile {}", cause.getMessage()); } else if (cause instanceof UnableToAcquireLockException) { lockedReconciliationsCounter.increment(); } else { updateResourceState(reconciliation, false, cause); failedReconciliationsCounter.increment(); reconciliationTimerSample.stop(reconciliationsTimer); - log.warn("{}: Failed to reconcile", reconciliation, cause); + LOGGER.warnCr(reconciliation, "Failed to reconcile", cause); } } } @@ -560,7 +559,7 @@ private void updateResourceState(Reconciliation reconciliation, boolean ready, T // remove metric so it can be re-added with new tags metrics.meterRegistry().remove(metric.get().getId()); resourcesStateCounter.remove(key); - log.debug("{}: Removed metric " + METRICS_PREFIX + "resource.state{}", reconciliation, key); + LOGGER.debugCr(reconciliation, "Removed metric " + METRICS_PREFIX + "resource.state{}", key); } if (cr != null) { @@ -568,7 +567,7 @@ private void updateResourceState(Reconciliation reconciliation, boolean ready, T metrics.gauge(METRICS_PREFIX + "resource.state", "Current state of the resource: 1 ready, 0 fail", metricTags) ); resourcesStateCounter.get(key).set(ready ? 1 : 0); - log.debug("{}: Updated metric " + METRICS_PREFIX + "resource.state{} = {}", reconciliation, metricTags, ready ? 1 : 0); + LOGGER.debugCr(reconciliation, "Updated metric " + METRICS_PREFIX + "resource.state{} = {}", metricTags, ready ? 1 : 0); } } @@ -577,18 +576,19 @@ private void updateResourceState(Reconciliation reconciliation, boolean ready, T * we want to ignore the error and return success. This is used to let Strimzi work without some Cluster-wide RBAC * rights when the features they are needed for are not used by the user. * + * @param reconciliation The reconciliation * @param reconcileFuture The original reconciliation future * @param desired The desired state of the resource. * @return A future which completes when the resource was reconciled. */ - public Future> withIgnoreRbacError(Future> reconcileFuture, ClusterRoleBinding desired) { + public Future> withIgnoreRbacError(Reconciliation reconciliation, Future> reconcileFuture, ClusterRoleBinding desired) { return reconcileFuture.compose( rr -> Future.succeededFuture(), e -> { if (desired == null && e.getMessage() != null && e.getMessage().contains("Message: Forbidden!")) { - log.debug("Ignoring forbidden access to ClusterRoleBindings resource which does not seem to be required."); + LOGGER.debugCr(reconciliation, "Ignoring forbidden access to ClusterRoleBindings resource which does not seem to be required."); return Future.succeededFuture(); } return Future.failedFuture(e.getMessage()); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/OperatorWatcher.java b/operator-common/src/main/java/io/strimzi/operator/common/OperatorWatcher.java index bec7821af0..3a425a2519 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/OperatorWatcher.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/OperatorWatcher.java @@ -7,8 +7,6 @@ import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.client.Watcher; import io.fabric8.kubernetes.client.WatcherException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.function.Consumer; @@ -20,7 +18,7 @@ class OperatorWatcher implements Watcher { private final String namespace; private final Consumer onClose; private Operator operator; - private static final Logger log = LogManager.getLogger(OperatorWatcher.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(OperatorWatcher.class); OperatorWatcher(Operator operator, String namespace, Consumer onClose) { this.namespace = namespace; @@ -37,15 +35,15 @@ public void eventReceived(Action action, T resource) { case DELETED: case MODIFIED: Reconciliation reconciliation = new Reconciliation("watch", operator.kind(), namespace, name); - log.info("{}: {} {} in namespace {} was {}", reconciliation, operator.kind(), name, namespace, action); + LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", operator.kind(), name, namespace, action); operator.reconcile(reconciliation); break; case ERROR: - log.error("Failed {} {} in namespace{} ", operator.kind(), name, namespace); + LOGGER.errorCr(new Reconciliation("watch", operator.kind(), namespace, name), "Failed {} {} in namespace{} ", operator.kind(), name, namespace); operator.reconcileAll("watch error", namespace, ignored -> { }); break; default: - log.error("Unknown action: {} in namespace {}", name, namespace); + LOGGER.errorCr(new Reconciliation("watch", operator.kind(), namespace, name), "Unknown action: {} in namespace {}", name, namespace); operator.reconcileAll("watch unknown", namespace, ignored -> { }); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/Reconciliation.java b/operator-common/src/main/java/io/strimzi/operator/common/Reconciliation.java index 2001983dd3..a29169d258 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/Reconciliation.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/Reconciliation.java @@ -4,6 +4,9 @@ */ package io.strimzi.operator.common; +import org.apache.logging.log4j.Marker; +import org.apache.logging.log4j.MarkerManager; + import java.util.concurrent.atomic.AtomicInteger; /** @@ -17,11 +20,14 @@ public class Reconciliation { private static final AtomicInteger IDS = new AtomicInteger(); + /* test */ public static final Reconciliation DUMMY_RECONCILIATION = new Reconciliation("test", "kind", "namespace", "name"); + private final String trigger; private final String kind; private final String namespace; private final String name; private final int id; + private final Marker marker; public Reconciliation(String trigger, String kind, String namespace, String assemblyName) { this.trigger = trigger; @@ -29,6 +35,7 @@ public Reconciliation(String trigger, String kind, String namespace, String asse this.namespace = namespace; this.name = assemblyName; this.id = IDS.getAndIncrement(); + this.marker = MarkerManager.getMarker(this.kind + "(" + this.namespace + "/" + this.name + ")"); } public String kind() { @@ -43,6 +50,10 @@ public String name() { return name; } + public Marker getMarker() { + return marker; + } + public String toString() { return "Reconciliation #" + id + "(" + trigger + ") " + kind() + "(" + namespace() + "/" + name() + ")"; } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/ReconciliationLogger.java b/operator-common/src/main/java/io/strimzi/operator/common/ReconciliationLogger.java new file mode 100644 index 0000000000..d5f290244f --- /dev/null +++ b/operator-common/src/main/java/io/strimzi/operator/common/ReconciliationLogger.java @@ -0,0 +1,8439 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.common; + +import java.io.Serializable; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.Marker; +import org.apache.logging.log4j.message.Message; +import org.apache.logging.log4j.message.MessageFactory; +import org.apache.logging.log4j.spi.AbstractLogger; +import org.apache.logging.log4j.spi.ExtendedLoggerWrapper; +import org.apache.logging.log4j.util.MessageSupplier; +import org.apache.logging.log4j.util.Supplier; + +/** + * Custom Logger interface with convenience methods for + * the OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL custom log levels. + *

    Compatible with Log4j 2.6 or higher.

    + */ +public class ReconciliationLogger implements Serializable { + private static final long serialVersionUID = 258810740149174L; + private final ExtendedLoggerWrapper logger; + + private static final String FQCN = ReconciliationLogger.class.getName(); + private static final Level OFF = Level.forName("OFF", 0); + private static final Level FATAL = Level.forName("FATAL", 100); + private static final Level ERROR = Level.forName("ERROR", 200); + private static final Level WARN = Level.forName("WARN", 300); + private static final Level INFO = Level.forName("INFO", 400); + private static final Level DEBUG = Level.forName("DEBUG", 500); + private static final Level TRACE = Level.forName("TRACE", 600); + private static final Level ALL = Level.forName("ALL", 10000); + + protected ReconciliationLogger(final Logger logger) { + this.logger = new ExtendedLoggerWrapper((AbstractLogger) logger, logger.getName(), logger.getMessageFactory()); + } + + /** + * Returns a custom Logger with the name of the calling class. + * + * @return The custom Logger for the calling class. + */ + public static ReconciliationLogger create() { + final Logger wrapped = LogManager.getLogger(); + return new ReconciliationLogger(wrapped); + } + + /** + * Returns a custom Logger using the fully qualified name of the Class as + * the Logger name. + * + * @param loggerName The Class whose name should be used as the Logger name. + * If null it will default to the calling class. + * @return The custom Logger. + */ + public static ReconciliationLogger create(final Class loggerName) { + final Logger wrapped = LogManager.getLogger(loggerName); + return new ReconciliationLogger(wrapped); + } + + /** + * Returns a custom Logger using the fully qualified name of the Class as + * the Logger name. + * + * @param loggerName The Class whose name should be used as the Logger name. + * If null it will default to the calling class. + * @param messageFactory The message factory is used only when creating a + * logger, subsequent use does not change the logger but will log + * a warning if mismatched. + * @return The custom Logger. + */ + public static ReconciliationLogger create(final Class loggerName, final MessageFactory messageFactory) { + final Logger wrapped = LogManager.getLogger(loggerName, messageFactory); + return new ReconciliationLogger(wrapped); + } + + /** + * Returns a custom Logger using the fully qualified class name of the value + * as the Logger name. + * + * @param value The value whose class name should be used as the Logger + * name. If null the name of the calling class will be used as + * the logger name. + * @return The custom Logger. + */ + public static ReconciliationLogger create(final Object value) { + final Logger wrapped = LogManager.getLogger(value); + return new ReconciliationLogger(wrapped); + } + + /** + * Returns a custom Logger using the fully qualified class name of the value + * as the Logger name. + * + * @param value The value whose class name should be used as the Logger + * name. If null the name of the calling class will be used as + * the logger name. + * @param messageFactory The message factory is used only when creating a + * logger, subsequent use does not change the logger but will log + * a warning if mismatched. + * @return The custom Logger. + */ + public static ReconciliationLogger create(final Object value, final MessageFactory messageFactory) { + final Logger wrapped = LogManager.getLogger(value, messageFactory); + return new ReconciliationLogger(wrapped); + } + + /** + * Returns a custom Logger with the specified name. + * + * @param name The logger name. If null the name of the calling class will + * be used. + * @return The custom Logger. + */ + public static ReconciliationLogger create(final String name) { + final Logger wrapped = LogManager.getLogger(name); + return new ReconciliationLogger(wrapped); + } + + /** + * Returns a custom Logger with the specified name. + * + * @param name The logger name. If null the name of the calling class will + * be used. + * @param messageFactory The message factory is used only when creating a + * logger, subsequent use does not change the logger but will log + * a warning if mismatched. + * @return The custom Logger. + */ + public static ReconciliationLogger create(final String name, final MessageFactory messageFactory) { + final Logger wrapped = LogManager.getLogger(name, messageFactory); + return new ReconciliationLogger(wrapped); + } + + ////// Operator Logging + + /** + * Logs a message with the specific Marker at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void offOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, OFF, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void offOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, marker, msg, t); + } + + /** + * Logs a message object with the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void offOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, OFF, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, OFF, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void offOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, marker, message, t); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, marker, message, t); + } + + /** + * Logs a message object with the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void offOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, OFF, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void offOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, OFF, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, OFF, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void offOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, marker, message, t); + } + + /** + * Logs the specified Message at the {@code OFF} level. + * + * @param msg the message string to be logged + */ + public void offOp(final Message msg) { + logger.logIfEnabled(FQCN, OFF, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code OFF} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void offOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, null, msg, t); + } + + /** + * Logs a message object with the {@code OFF} level. + * + * @param message the message object to log. + */ + public void offOp(final Object message) { + logger.logIfEnabled(FQCN, OFF, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void offOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code OFF} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void offOp(final CharSequence message) { + logger.logIfEnabled(FQCN, OFF, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void offOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, null, message, t); + } + + /** + * Logs a message object with the {@code OFF} level. + * + * @param message the message object to log. + */ + public void offOp(final String message) { + logger.logIfEnabled(FQCN, OFF, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void offOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, OFF, null, message, params); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, OFF, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void offOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, OFF, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void offOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code OFF}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void offOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, OFF, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code OFF} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void offOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code OFF} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void offOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, OFF, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code OFF} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void offOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, OFF, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code OFF} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void offOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code OFF} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void offOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, OFF, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code OFF} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void offOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, OFF, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code OFF} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void offOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code OFF} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void offOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, OFF, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code OFF} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void offOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void fatalOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, FATAL, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void fatalOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, marker, msg, t); + } + + /** + * Logs a message object with the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void fatalOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, FATAL, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, FATAL, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void fatalOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, marker, message, t); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, marker, message, t); + } + + /** + * Logs a message object with the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void fatalOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, FATAL, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void fatalOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, FATAL, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, FATAL, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void fatalOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, marker, message, t); + } + + /** + * Logs the specified Message at the {@code FATAL} level. + * + * @param msg the message string to be logged + */ + public void fatalOp(final Message msg) { + logger.logIfEnabled(FQCN, FATAL, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code FATAL} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void fatalOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, null, msg, t); + } + + /** + * Logs a message object with the {@code FATAL} level. + * + * @param message the message object to log. + */ + public void fatalOp(final Object message) { + logger.logIfEnabled(FQCN, FATAL, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void fatalOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code FATAL} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void fatalOp(final CharSequence message) { + logger.logIfEnabled(FQCN, FATAL, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void fatalOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, null, message, t); + } + + /** + * Logs a message object with the {@code FATAL} level. + * + * @param message the message object to log. + */ + public void fatalOp(final String message) { + logger.logIfEnabled(FQCN, FATAL, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void fatalOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, FATAL, null, message, params); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void fatalOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, FATAL, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void fatalOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code FATAL}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void fatalOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, FATAL, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code FATAL} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void fatalOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code FATAL} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void fatalOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, FATAL, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code FATAL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void fatalOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, FATAL, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code FATAL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void fatalOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code FATAL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void fatalOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, FATAL, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code FATAL} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void fatalOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, FATAL, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code FATAL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void fatalOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code FATAL} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void fatalOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, FATAL, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code FATAL} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void fatalOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void errorOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, ERROR, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void errorOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, marker, msg, t); + } + + /** + * Logs a message object with the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void errorOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, ERROR, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, ERROR, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void errorOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, marker, message, t); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, marker, message, t); + } + + /** + * Logs a message object with the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void errorOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, ERROR, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void errorOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, ERROR, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, ERROR, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void errorOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, marker, message, t); + } + + /** + * Logs the specified Message at the {@code ERROR} level. + * + * @param msg the message string to be logged + */ + public void errorOp(final Message msg) { + logger.logIfEnabled(FQCN, ERROR, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code ERROR} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void errorOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, null, msg, t); + } + + /** + * Logs a message object with the {@code ERROR} level. + * + * @param message the message object to log. + */ + public void errorOp(final Object message) { + logger.logIfEnabled(FQCN, ERROR, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void errorOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code ERROR} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void errorOp(final CharSequence message) { + logger.logIfEnabled(FQCN, ERROR, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void errorOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, null, message, t); + } + + /** + * Logs a message object with the {@code ERROR} level. + * + * @param message the message object to log. + */ + public void errorOp(final String message) { + logger.logIfEnabled(FQCN, ERROR, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void errorOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, ERROR, null, message, params); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void errorOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, ERROR, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void errorOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code ERROR}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void errorOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, ERROR, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ERROR} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void errorOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ERROR} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void errorOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, ERROR, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code ERROR} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void errorOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, ERROR, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ERROR} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void errorOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code ERROR} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void errorOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, ERROR, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ERROR} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void errorOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, ERROR, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ERROR} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void errorOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ERROR} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void errorOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, ERROR, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ERROR} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void errorOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void warnOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, WARN, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void warnOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, marker, msg, t); + } + + /** + * Logs a message object with the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void warnOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, WARN, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, WARN, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void warnOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, marker, message, t); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, marker, message, t); + } + + /** + * Logs a message object with the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void warnOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, WARN, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void warnOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, WARN, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, WARN, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void warnOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, marker, message, t); + } + + /** + * Logs the specified Message at the {@code WARN} level. + * + * @param msg the message string to be logged + */ + public void warnOp(final Message msg) { + logger.logIfEnabled(FQCN, WARN, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code WARN} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void warnOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, msg, t); + } + + /** + * Logs a message object with the {@code WARN} level. + * + * @param message the message object to log. + */ + public void warnOp(final Object message) { + logger.logIfEnabled(FQCN, WARN, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void warnOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code WARN} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void warnOp(final CharSequence message) { + logger.logIfEnabled(FQCN, WARN, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void warnOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, message, t); + } + + /** + * Logs a message object with the {@code WARN} level. + * + * @param message the message object to log. + */ + public void warnOp(final String message) { + logger.logIfEnabled(FQCN, WARN, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void warnOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, WARN, null, message, params); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, WARN, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void warnOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, WARN, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void warnOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code WARN}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void warnOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void warnOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code WARN} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void warnOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code WARN} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void warnOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, WARN, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void warnOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code WARN} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void warnOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, WARN, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code WARN} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void warnOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void warnOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code WARN} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void warnOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void warnOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void infoOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, INFO, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void infoOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, marker, msg, t); + } + + /** + * Logs a message object with the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void infoOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, INFO, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, INFO, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void infoOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, marker, message, t); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, marker, message, t); + } + + /** + * Logs a message object with the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void infoOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, INFO, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void infoOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, INFO, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, INFO, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void infoOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, marker, message, t); + } + + /** + * Logs the specified Message at the {@code INFO} level. + * + * @param msg the message string to be logged + */ + public void infoOp(final Message msg) { + logger.logIfEnabled(FQCN, INFO, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code INFO} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void infoOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, null, msg, t); + } + + /** + * Logs a message object with the {@code INFO} level. + * + * @param message the message object to log. + */ + public void infoOp(final Object message) { + logger.logIfEnabled(FQCN, INFO, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void infoOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code INFO} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void infoOp(final CharSequence message) { + logger.logIfEnabled(FQCN, INFO, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void infoOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, null, message, t); + } + + /** + * Logs a message object with the {@code INFO} level. + * + * @param message the message object to log. + */ + public void infoOp(final String message) { + logger.logIfEnabled(FQCN, INFO, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void infoOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, INFO, null, message, params); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, INFO, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void infoOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, INFO, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void infoOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code INFO}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void infoOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, INFO, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code INFO} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void infoOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code INFO} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void infoOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, INFO, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code INFO} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void infoOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, INFO, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code INFO} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void infoOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code INFO} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void infoOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, INFO, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code INFO} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void infoOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, INFO, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code INFO} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void infoOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code INFO} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void infoOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, INFO, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code INFO} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void infoOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void debugOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, DEBUG, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void debugOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, marker, msg, t); + } + + /** + * Logs a message object with the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void debugOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void debugOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, t); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, t); + } + + /** + * Logs a message object with the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void debugOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void debugOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void debugOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, t); + } + + /** + * Logs the specified Message at the {@code DEBUG} level. + * + * @param msg the message string to be logged + */ + public void debugOp(final Message msg) { + logger.logIfEnabled(FQCN, DEBUG, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code DEBUG} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void debugOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, msg, t); + } + + /** + * Logs a message object with the {@code DEBUG} level. + * + * @param message the message object to log. + */ + public void debugOp(final Object message) { + logger.logIfEnabled(FQCN, DEBUG, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void debugOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code DEBUG} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void debugOp(final CharSequence message) { + logger.logIfEnabled(FQCN, DEBUG, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void debugOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, message, t); + } + + /** + * Logs a message object with the {@code DEBUG} level. + * + * @param message the message object to log. + */ + public void debugOp(final String message) { + logger.logIfEnabled(FQCN, DEBUG, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void debugOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, DEBUG, null, message, params); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void debugOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, DEBUG, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void debugOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code DEBUG}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void debugOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void debugOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code DEBUG} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void debugOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code DEBUG} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void debugOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, DEBUG, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void debugOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code DEBUG} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void debugOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, DEBUG, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code DEBUG} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void debugOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void debugOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code DEBUG} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void debugOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void debugOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void traceOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, TRACE, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void traceOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, marker, msg, t); + } + + /** + * Logs a message object with the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void traceOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, TRACE, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, TRACE, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void traceOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, marker, message, t); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, marker, message, t); + } + + /** + * Logs a message object with the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void traceOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, TRACE, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void traceOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, TRACE, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, TRACE, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void traceOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, marker, message, t); + } + + /** + * Logs the specified Message at the {@code TRACE} level. + * + * @param msg the message string to be logged + */ + public void traceOp(final Message msg) { + logger.logIfEnabled(FQCN, TRACE, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code TRACE} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void traceOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, null, msg, t); + } + + /** + * Logs a message object with the {@code TRACE} level. + * + * @param message the message object to log. + */ + public void traceOp(final Object message) { + logger.logIfEnabled(FQCN, TRACE, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void traceOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code TRACE} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void traceOp(final CharSequence message) { + logger.logIfEnabled(FQCN, TRACE, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void traceOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, null, message, t); + } + + /** + * Logs a message object with the {@code TRACE} level. + * + * @param message the message object to log. + */ + public void traceOp(final String message) { + logger.logIfEnabled(FQCN, TRACE, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void traceOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, TRACE, null, message, params); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void traceOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, TRACE, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void traceOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code TRACE}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void traceOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, TRACE, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code TRACE} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void traceOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code TRACE} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void traceOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, TRACE, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code TRACE} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void traceOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, TRACE, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code TRACE} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void traceOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code TRACE} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void traceOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, TRACE, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code TRACE} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void traceOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, TRACE, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code TRACE} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void traceOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code TRACE} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void traceOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, TRACE, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code TRACE} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void traceOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, null, msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + */ + public void allOp(final Marker marker, final Message msg) { + logger.logIfEnabled(FQCN, ALL, marker, msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void allOp(final Marker marker, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, marker, msg, t); + } + + /** + * Logs a message object with the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void allOp(final Marker marker, final Object message) { + logger.logIfEnabled(FQCN, ALL, marker, message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final CharSequence message) { + logger.logIfEnabled(FQCN, ALL, marker, message, (Throwable) null); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void allOp(final Marker marker, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, marker, message, t); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, marker, message, t); + } + + /** + * Logs a message object with the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message object to log. + */ + public void allOp(final Marker marker, final String message) { + logger.logIfEnabled(FQCN, ALL, marker, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void allOp(final Marker marker, final String message, final Object... params) { + logger.logIfEnabled(FQCN, ALL, marker, message, params); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final Marker marker, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, ALL, marker, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void allOp(final Marker marker, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, marker, message, t); + } + + /** + * Logs the specified Message at the {@code ALL} level. + * + * @param msg the message string to be logged + */ + public void allOp(final Message msg) { + logger.logIfEnabled(FQCN, ALL, null, msg, (Throwable) null); + } + + /** + * Logs the specified Message at the {@code ALL} level. + * + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void allOp(final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, null, msg, t); + } + + /** + * Logs a message object with the {@code ALL} level. + * + * @param message the message object to log. + */ + public void allOp(final Object message) { + logger.logIfEnabled(FQCN, ALL, null, message, (Throwable) null); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void allOp(final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, null, message, t); + } + + /** + * Logs a message CharSequence with the {@code ALL} level. + * + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void allOp(final CharSequence message) { + logger.logIfEnabled(FQCN, ALL, null, message, (Throwable) null); + } + + /** + * Logs a CharSequence at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void allOp(final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, null, message, t); + } + + /** + * Logs a message object with the {@code ALL} level. + * + * @param message the message object to log. + */ + public void allOp(final String message) { + logger.logIfEnabled(FQCN, ALL, null, message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + + */ + public void allOp(final String message, final Object... params) { + logger.logIfEnabled(FQCN, ALL, null, message, params); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0) { + logger.logIfEnabled(FQCN, ALL, null, message, p0); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + + * @since Log4j-2.6 + */ + public void allOp(final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, ALL, null, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void allOp(final String message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, null, message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code ALL}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void allOp(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, ALL, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ALL} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void allOp(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ALL} level with the specified Marker. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void allOp(final Marker marker, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, ALL, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code ALL} level. + * + * @param marker the marker data specific to this log statement + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void allOp(final Marker marker, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, ALL, marker, message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ALL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void allOp(final Marker marker, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, marker, msgSupplier, t); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is + * the {@code ALL} level. + * + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void allOp(final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, ALL, null, message, paramSuppliers); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ALL} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void allOp(final Marker marker, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, ALL, marker, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ALL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param marker the marker data specific to this log statement + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void allOp(final Marker marker, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, marker, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ALL} level. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void allOp(final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, ALL, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ALL} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * The {@code MessageSupplier} may or may not use the {@link MessageFactory} to construct the + * {@code Message}. + * + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void allOp(final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, null, msgSupplier, t); + } + + ////// CR logging + + /** + * Logs a message with the specific Marker at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void offCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void offCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void offCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void offCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void offCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code OFF} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void offCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code OFF} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void offCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code OFF} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void offCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code OFF} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void offCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code OFF} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void offCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code OFF} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void offCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, OFF, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void fatalCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void fatalCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void fatalCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void fatalCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void fatalCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code FATAL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code FATAL} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void fatalCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code FATAL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void fatalCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code FATAL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void fatalCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code FATAL} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void fatalCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code FATAL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void fatalCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, FATAL, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void errorCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void errorCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void errorCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void errorCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void errorCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code ERROR} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ERROR} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void errorCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code ERROR} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void errorCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ERROR} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void errorCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ERROR} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void errorCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ERROR} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void errorCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ERROR, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void warnCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void warnCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void warnCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void warnCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void warnCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code WARN} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code WARN}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void warnCr(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void warnCr(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code WARN} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void warnCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code WARN} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void warnCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void warnCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code WARN} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void warnCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code WARN} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void warnCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, WARN, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void infoCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void infoCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void infoCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void infoCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void infoCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code INFO} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code INFO} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void infoCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code INFO} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void infoCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code INFO} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void infoCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code INFO} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void infoCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code INFO} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void infoCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, INFO, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void debugCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void debugCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void debugCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void debugCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void debugCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code DEBUG} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the {@code DEBUG}level. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void debugCr(final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, null, msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) including the stack trace of the {@link Throwable} t passed as parameter. + * + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.4 + */ + public void debugCr(final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, null, msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code DEBUG} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void debugCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code DEBUG} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void debugCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void debugCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code DEBUG} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void debugCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code DEBUG} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void debugCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, DEBUG, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void traceCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void traceCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void traceCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void traceCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void traceCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code TRACE} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code TRACE} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void traceCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code TRACE} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void traceCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code TRACE} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void traceCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code TRACE} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void traceCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code TRACE} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void traceCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, TRACE, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message with the specific Marker at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + */ + public void allCr(final Reconciliation reconciliation, final Message msg) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), msg, (Throwable) null); + } + + /** + * Logs a message with the specific Marker at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param msg the message string to be logged + * @param t A Throwable or null. + */ + public void allCr(final Reconciliation reconciliation, final Message msg, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), msg, t); + } + + /** + * Logs a message object with the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void allCr(final Reconciliation reconciliation, final Object message) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message CharSequence with the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message CharSequence to log. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final CharSequence message) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void allCr(final Reconciliation reconciliation, final Object message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the CharSequence to log. + * @param t the exception to log, including its stack trace. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final CharSequence message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message object with the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message object to log. + */ + public void allCr(final Reconciliation reconciliation, final String message) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, (Throwable) null); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param params parameters to the message. + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object... params) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, params); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8); + } + + /** + * Logs a message with parameters at the {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param p0 parameter to the message. + * @param p1 parameter to the message. + * @param p2 parameter to the message. + * @param p3 parameter to the message. + * @param p4 parameter to the message. + * @param p5 parameter to the message. + * @param p6 parameter to the message. + * @param p7 parameter to the message. + * @param p8 parameter to the message. + * @param p9 parameter to the message. + * @since Log4j-2.6 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Object p0, final Object p1, final Object p2, + final Object p3, final Object p4, final Object p5, final Object p6, + final Object p7, final Object p8, final Object p9) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); + } + + /** + * Logs a message at the {@code ALL} level including the stack trace of + * the {@link Throwable} {@code t} passed as parameter. + * + * @param reconciliation The reconciliation + * @param message the message to log. + * @param t the exception to log, including its stack trace. + */ + public void allCr(final Reconciliation reconciliation, final String message, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ALL} level with the specified Marker. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @since Log4j-2.4 + */ + public void allCr(final Reconciliation reconciliation, final Supplier msgSupplier) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message with parameters which are only to be constructed if the logging level is the + * {@code ALL} level. + * + * @param reconciliation The reconciliation + * @param message the message to log; the format depends on the message factory. + * @param paramSuppliers An array of functions, which when called, produce the desired log message parameters. + * @since Log4j-2.4 + */ + public void allCr(final Reconciliation reconciliation, final String message, final Supplier... paramSuppliers) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), reconciliation.toString() + ": " + message, paramSuppliers); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ALL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message; + * the format depends on the message factory. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void allCr(final Reconciliation reconciliation, final Supplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), msgSupplier, t); + } + + /** + * Logs a message which is only to be constructed if the logging level is the + * {@code ALL} level with the specified Marker. The {@code MessageSupplier} may or may + * not use the {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @since Log4j-2.4 + */ + public void allCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), msgSupplier, (Throwable) null); + } + + /** + * Logs a message (only to be constructed if the logging level is the {@code ALL} + * level) with the specified Marker and including the stack trace of the {@link Throwable} + * t passed as parameter. The {@code MessageSupplier} may or may not use the + * {@link MessageFactory} to construct the {@code Message}. + * + * @param reconciliation The reconciliation + * @param msgSupplier A function, which when called, produces the desired log message. + * @param t A Throwable or null. + * @since Log4j-2.4 + */ + public void allCr(final Reconciliation reconciliation, final MessageSupplier msgSupplier, final Throwable t) { + logger.logIfEnabled(FQCN, ALL, reconciliation.getMarker(), msgSupplier, t); + } + + public boolean isFatalEnabled() { + return logger.isFatalEnabled(); + } + + public boolean isErrorEnabled() { + return logger.isErrorEnabled(); + } + + public boolean isWarnEnabled() { + return logger.isWarnEnabled(); + } + + public boolean isInfoEnabled() { + return logger.isInfoEnabled(); + } + + public boolean isDebugEnabled() { + return logger.isDebugEnabled(); + } + + public boolean isTraceEnabled() { + return logger.isTraceEnabled(); + } +} + diff --git a/operator-common/src/main/java/io/strimzi/operator/common/Util.java b/operator-common/src/main/java/io/strimzi/operator/common/Util.java index f169b28442..337d8bdc0f 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/Util.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/Util.java @@ -24,8 +24,6 @@ import io.vertx.core.Vertx; import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.config.ConfigResource; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.BufferedOutputStream; import java.io.File; @@ -55,7 +53,7 @@ import java.util.stream.Collectors; public class Util { - private static final Logger LOGGER = LogManager.getLogger(Util.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(Util.class); public static Future async(Vertx vertx, Supplier supplier) { Promise result = Promise.promise(); @@ -74,6 +72,7 @@ public static Future async(Vertx vertx, Supplier supplier) { /** * Invoke the given {@code completed} supplier on a pooled thread approximately every {@code pollIntervalMs} * milliseconds until it returns true or {@code timeoutMs} milliseconds have elapsed. + * @param reconciliation The reconciliation * @param vertx The vertx instance. * @param logContext A string used for context in logging. * @param logState The state we are waiting for use in log messages @@ -82,13 +81,14 @@ public static Future async(Vertx vertx, Supplier supplier) { * @param completed Determines when the wait is complete by returning true. * @return A future that completes when the given {@code completed} indicates readiness. */ - public static Future waitFor(Vertx vertx, String logContext, String logState, long pollIntervalMs, long timeoutMs, BooleanSupplier completed) { - return waitFor(vertx, logContext, logState, pollIntervalMs, timeoutMs, completed, error -> false); + public static Future waitFor(Reconciliation reconciliation, Vertx vertx, String logContext, String logState, long pollIntervalMs, long timeoutMs, BooleanSupplier completed) { + return waitFor(reconciliation, vertx, logContext, logState, pollIntervalMs, timeoutMs, completed, error -> false); } /** * Invoke the given {@code completed} supplier on a pooled thread approximately every {@code pollIntervalMs} * milliseconds until it returns true or {@code timeoutMs} milliseconds have elapsed. + * @param reconciliation The reconciliation * @param vertx The vertx instance. * @param logContext A string used for context in logging. * @param logState The state we are waiting for use in log messages @@ -99,10 +99,10 @@ public static Future waitFor(Vertx vertx, String logContext, String logSta * should result in the immediate completion of the returned Future. * @return A future that completes when the given {@code completed} indicates readiness. */ - public static Future waitFor(Vertx vertx, String logContext, String logState, long pollIntervalMs, long timeoutMs, BooleanSupplier completed, + public static Future waitFor(Reconciliation reconciliation, Vertx vertx, String logContext, String logState, long pollIntervalMs, long timeoutMs, BooleanSupplier completed, Predicate failOnError) { Promise promise = Promise.promise(); - LOGGER.debug("Waiting for {} to get {}", logContext, logState); + LOGGER.debugCr(reconciliation, "Waiting for {} to get {}", logContext, logState); long deadline = System.currentTimeMillis() + timeoutMs; Handler handler = new Handler() { @Override @@ -113,18 +113,18 @@ public void handle(Long timerId) { if (completed.getAsBoolean()) { future.complete(); } else { - LOGGER.trace("{} is not {}", logContext, logState); + LOGGER.traceCr(reconciliation, "{} is not {}", logContext, logState); future.fail("Not " + logState + " yet"); } } catch (Throwable e) { - LOGGER.warn("Caught exception while waiting for {} to get {}", logContext, logState, e); + LOGGER.warnCr(reconciliation, "Caught exception while waiting for {} to get {}", logContext, logState, e); future.fail(e); } }, true, res -> { if (res.succeeded()) { - LOGGER.debug("{} is {}", logContext, logState); + LOGGER.debugCr(reconciliation, "{} is {}", logContext, logState); promise.complete(); } else { if (failOnError.test(res.cause())) { @@ -133,7 +133,7 @@ public void handle(Long timerId) { long timeLeft = deadline - System.currentTimeMillis(); if (timeLeft <= 0) { String exceptionMessage = String.format("Exceeded timeout of %dms while waiting for %s to be %s", timeoutMs, logContext, logState); - LOGGER.error(exceptionMessage); + LOGGER.errorCr(reconciliation, exceptionMessage); promise.fail(new TimeoutException(exceptionMessage)); } else { // Schedule ourselves to run again @@ -214,7 +214,7 @@ public static File createFileStore(String prefix, String suffix, byte[] bytes) { return f; } catch (IOException e) { if (f != null && !f.delete()) { - LOGGER.warn("Failed to delete temporary file in exception handler"); + LOGGER.warnOp("Failed to delete temporary file in exception handler"); } throw new RuntimeException(e); } @@ -264,7 +264,7 @@ private static File store(String prefix, String suffix, KeyStore trustStore, cha return f; } catch (IOException | KeyStoreException | NoSuchAlgorithmException | CertificateException | RuntimeException e) { if (f != null && !f.delete()) { - LOGGER.warn("Failed to delete temporary file in exception handler"); + LOGGER.warnOp("Failed to delete temporary file in exception handler"); } throw e; } @@ -281,7 +281,7 @@ public static void printEnvInfo() { sb.append("\t").append(entry.getKey()).append(": ").append(maskPassword(entry.getKey(), entry.getValue())).append("\n"); } - LOGGER.info("Using config:\n" + sb.toString()); + LOGGER.infoOp("Using config:\n" + sb.toString()); } /** @@ -346,7 +346,7 @@ public static Map mergeLabelsOrAnnotations(Map b return merged; } - public static Future kafkaFutureToVertxFuture(Vertx vertx, KafkaFuture kf) { + public static Future kafkaFutureToVertxFuture(Reconciliation reconciliation, Vertx vertx, KafkaFuture kf) { Promise promise = Promise.promise(); if (kf != null) { kf.whenComplete((result, error) -> { @@ -360,7 +360,7 @@ public static Future kafkaFutureToVertxFuture(Vertx vertx, KafkaFuture }); return promise.future(); } else { - LOGGER.trace("KafkaFuture is null"); + LOGGER.traceCr(reconciliation, "KafkaFuture is null"); return Future.succeededFuture(); } } @@ -499,7 +499,8 @@ public static Future getExternalLoggingCm(ConfigMapOperator configMap return loggingCmFut; } - public static Future metricsAndLogging(ConfigMapOperator configMapOperations, + public static Future metricsAndLogging(Reconciliation reconciliation, + ConfigMapOperator configMapOperations, String namespace, Logging logging, MetricsConfig metricsConfigInCm) { List configMaps = new ArrayList<>(2); @@ -508,7 +509,7 @@ public static Future metricsAndLogging(ConfigMapOperator conf } else if (metricsConfigInCm == null) { configMaps.add(Future.succeededFuture(null)); } else { - LOGGER.warn("Unknown metrics type {}", metricsConfigInCm.getType()); + LOGGER.warnCr(reconciliation, "Unknown metrics type {}", metricsConfigInCm.getType()); throw new InvalidResourceException("Unknown metrics type " + metricsConfigInCm.getType()); } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/model/ResourceVisitor.java b/operator-common/src/main/java/io/strimzi/operator/common/model/ResourceVisitor.java index 001a34658b..5aa6156c38 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/model/ResourceVisitor.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/model/ResourceVisitor.java @@ -5,8 +5,8 @@ package io.strimzi.operator.common.model; import io.fabric8.kubernetes.api.model.HasMetadata; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Field; @@ -20,36 +20,39 @@ public class ResourceVisitor { - private static final Logger LOGGER = LogManager.getLogger(ResourceVisitor.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ResourceVisitor.class); public interface Visitor { /** * Called when a property is visited. + * @param reconciliation The reconciliation * @param path The property path for reaching this property * @param owner The object with the property * @param method The getter method for the property. * @param property abstraction for using the method. * @param propertyValue The value of the property. */ - default void visitMethodProperty(List path, Object owner, + default void visitMethodProperty(Reconciliation reconciliation, List path, Object owner, Method method, Property property, Object propertyValue) { - visitProperty(path, owner, method, property, propertyValue); + visitProperty(reconciliation, path, owner, method, property, propertyValue); } /** * Called when a field property is visited. + * @param reconciliation The reconciliation * @param path The property path for reaching this property * @param owner The object with the property * @param field The field for the property. * @param property abstraction for using the method. * @param propertyValue The value of the property. */ - default void visitFieldProperty(List path, Object owner, + default void visitFieldProperty(Reconciliation reconciliation, List path, Object owner, Field field, Property property, Object propertyValue) { - visitProperty(path, owner, field, property, propertyValue); + visitProperty(reconciliation, path, owner, field, property, propertyValue); } /** * Called when a property is visited. + * @param reconciliation The reconciliation * @param path The property path for reaching this property * @param owner The object with the property * @param member The getter method or field for the property. @@ -57,23 +60,24 @@ default void visitFieldProperty(List path, Object owner, * @param propertyValue The value of the property. * @param The type of member ({@code Field} or {@code Method}). */ - void visitProperty(List path, Object owner, + void visitProperty(Reconciliation reconciliation, List path, Object owner, M member, Property property, Object propertyValue); /** * Called when an object is visited. + * @param reconciliation The reconciliation * @param path The property path to this object. * @param object The object */ - void visitObject(List path, Object object); + void visitObject(Reconciliation reconciliation, List path, Object object); } - public static void visit(T resource, Visitor visitor) { + public static void visit(Reconciliation reconciliation, T resource, Visitor visitor) { ArrayList path = new ArrayList<>(); try { - visit(path, resource, visitor); + visit(reconciliation, path, resource, visitor); } catch (RuntimeException | ReflectiveOperationException | StackOverflowError e) { - LOGGER.error("Error while visiting {}", path, e); + LOGGER.errorCr(reconciliation, "Error while visiting {}", path, e); if (e instanceof RuntimeException) { throw (RuntimeException) e; } else { @@ -82,13 +86,13 @@ public static void visit(T resource, Visitor visitor) { } } - private static void visit(List path, Object resource, Visitor visitor) throws ReflectiveOperationException { + private static void visit(Reconciliation reconciliation, List path, Object resource, Visitor visitor) throws ReflectiveOperationException { Class cls = resource.getClass(); - visitor.visitObject(path, resource); + visitor.visitObject(reconciliation, path, resource); for (Field field : cls.getFields()) { Object propertyValue = field.get(resource); - visitor.visitFieldProperty(path, resource, field, FIELD_PROPERTY, propertyValue); - visitProperty(path, field, FIELD_PROPERTY, propertyValue, visitor); + visitor.visitFieldProperty(reconciliation, path, resource, field, FIELD_PROPERTY, propertyValue); + visitProperty(reconciliation, path, field, FIELD_PROPERTY, propertyValue, visitor); } for (Method method : cls.getMethods()) { String name = method.getName(); @@ -107,8 +111,8 @@ private static void visit(List path, Object resource, Visitor visitor) t } if (property != null) { Object propertyValue = method.invoke(resource); - visitor.visitMethodProperty(path, resource, method, property, propertyValue); - visitProperty(path, method, property, propertyValue, visitor); + visitor.visitMethodProperty(reconciliation, path, resource, method, property, propertyValue); + visitProperty(reconciliation, path, method, property, propertyValue, visitor); } } } @@ -128,7 +132,7 @@ private static boolean isScalar(Class returnType) { || isFloat; } - static void visitProperty(List path, M member, + static void visitProperty(Reconciliation reconciliation, List path, M member, Property property, Object propertyValue, Visitor visitor) throws ReflectiveOperationException { @@ -139,7 +143,7 @@ static void visitProperty(List pat path.add(propertyName); if (propertyValue instanceof Object[]) { for (Object element : (Object[]) propertyValue) { - visit(path, element, visitor); + visit(reconciliation, path, element, visitor); } } // otherwise it's an array of primitives, in which case there are not further objects to visit @@ -150,7 +154,7 @@ static void visitProperty(List pat if (element != null && !element.getClass().isEnum() && !isScalar(element.getClass())) { - visit(path, element, visitor); + visit(reconciliation, path, element, visitor); } } path.remove(path.size() - 1); @@ -158,7 +162,7 @@ static void visitProperty(List pat && !Map.class.isAssignableFrom(returnType) && !returnType.isEnum()) { path.add(propertyName); - visit(path, propertyValue, visitor); + visit(reconciliation, path, propertyValue, visitor); path.remove(path.size() - 1); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/model/ValidationVisitor.java b/operator-common/src/main/java/io/strimzi/operator/common/model/ValidationVisitor.java index d26044182a..8adc1eeed6 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/model/ValidationVisitor.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/model/ValidationVisitor.java @@ -10,8 +10,9 @@ import io.strimzi.api.annotations.DeprecatedType; import io.strimzi.api.kafka.model.UnknownPropertyPreserving; import io.strimzi.api.kafka.model.status.Condition; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.operator.resource.StatusUtils; -import org.apache.logging.log4j.Logger; import java.lang.reflect.AnnotatedElement; import java.lang.reflect.Member; @@ -21,22 +22,17 @@ import java.util.Set; public class ValidationVisitor implements ResourceVisitor.Visitor { - private final Logger logger; + private final ReconciliationLogger logger; private final HasMetadata resource; private final Set warningConditions; private final String transitionTime = StatusUtils.iso8601Now(); - public ValidationVisitor(HasMetadata resource, Logger logger, Set warningConditions) { + public ValidationVisitor(HasMetadata resource, ReconciliationLogger logger, Set warningConditions) { this.resource = resource; this.logger = logger; this.warningConditions = warningConditions; } - String context() { - return resource.getKind() + " resource " + resource.getMetadata().getName() - + " in namespace " + resource.getMetadata().getNamespace(); - } - boolean isPresent(M member, Object propertyValue) { JsonInclude annotation = member.getAnnotation(JsonInclude.class); @@ -70,7 +66,8 @@ boolean isPresent(M member, return propertyValue != null; } - private void checkForDeprecated(List path, + private void checkForDeprecated(Reconciliation reconciliation, + List path, M member, Object propertyValue, String propertyName) { @@ -94,7 +91,7 @@ && isPresent(member, propertyValue)) { } warningConditions.add(StatusUtils.buildWarningCondition("DeprecatedFields", msg, transitionTime)); - logger.warn("{}: {}", context(), msg); + logger.warnCr(reconciliation, msg); } // Look for deprecated objects. With OneOf, the field might not be deprecated, but the used value might be @@ -116,7 +113,7 @@ && isPresent(member, propertyValue)) { msg += "."; warningConditions.add(StatusUtils.buildWarningCondition("DeprecatedObjects", msg, transitionTime)); - logger.warn("{}: {}", context(), msg); + logger.warnCr(reconciliation, msg); } } } @@ -126,13 +123,13 @@ private String path(List path, String propertyName) { } @Override - public void visitProperty(List path, Object resource, + public void visitProperty(Reconciliation reconciliation, List path, Object resource, M method, ResourceVisitor.Property property, Object propertyValue) { - checkForDeprecated(path, method, propertyValue, property.propertyName(method)); + checkForDeprecated(reconciliation, path, method, propertyValue, property.propertyName(method)); } @Override - public void visitObject(List path, Object object) { + public void visitObject(Reconciliation reconciliation, List path, Object object) { if (object instanceof UnknownPropertyPreserving) { Map properties = ((UnknownPropertyPreserving) object).getAdditionalProperties(); if (properties != null && !properties.isEmpty()) { @@ -141,7 +138,7 @@ public void visitObject(List path, Object object) { properties.size() == 1 ? "an unknown property" : "unknown properties", String.join(", ", properties.keySet())); - logger.warn("{}: {}", context(), msg); + logger.warnCr(reconciliation, msg); warningConditions.add(StatusUtils.buildWarningCondition("UnknownFields", msg, transitionTime)); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java index c81d7226f2..cd5af6cf24 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java @@ -13,13 +13,13 @@ import io.fabric8.kubernetes.client.dsl.FilterWatchListMultiDeletable; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.model.Labels; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.List; import java.util.Map; @@ -42,7 +42,7 @@ public abstract class AbstractNonNamespacedResourceOperator> createOrUpdate(T resource) { + public Future> createOrUpdate(Reconciliation reconciliation, T resource) { if (resource == null) { throw new NullPointerException(); } - return reconcile(resource.getMetadata().getName(), resource); + return reconcile(reconciliation, resource.getMetadata().getName(), resource); } /** * Asynchronously reconciles the resource with the given name to match the given * desired resource, returning a future for the result. + * @param reconciliation The reconciliation * @param name The name of the resource to reconcile. * @param desired The desired state of the resource. * @return A future which completes when the resource was reconciled. */ - public Future> reconcile(String name, T desired) { + public Future> reconcile(Reconciliation reconciliation, String name, T desired) { if (desired != null && !name.equals(desired.getMetadata().getName())) { return Future.failedFuture("Given name " + name + " incompatible with desired name " + desired.getMetadata().getName()); @@ -96,19 +98,19 @@ public Future> reconcile(String name, T desired) { T current = operation().withName(name).get(); if (desired != null) { if (current == null) { - log.debug("{} {} does not exist, creating it", resourceKind, name); - internalCreate(name, desired).onComplete(future); + log.debugCr(reconciliation, "{} {} does not exist, creating it", resourceKind, name); + internalCreate(reconciliation, name, desired).onComplete(future); } else { - log.debug("{} {} already exists, patching it", resourceKind, name); - internalPatch(name, current, desired).onComplete(future); + log.debugCr(reconciliation, "{} {} already exists, patching it", resourceKind, name); + internalPatch(reconciliation, name, current, desired).onComplete(future); } } else { if (current != null) { // Deletion is desired - log.debug("{} {} exist, deleting it", resourceKind, name); - internalDelete(name).onComplete(future); + log.debugCr(reconciliation, "{} {} exist, deleting it", resourceKind, name); + internalDelete(reconciliation, name).onComplete(future); } else { - log.debug("{} {} does not exist, noop", resourceKind, name); + log.debugCr(reconciliation, "{} {} does not exist, noop", resourceKind, name); future.complete(ReconcileResult.noop(null)); } } @@ -128,18 +130,19 @@ protected long deleteTimeoutMs() { * Asynchronously deletes the resource with the given {@code name}, * returning a Future which completes once the resource * is observed to have been deleted. + * @param reconciliation The reconciliation * @param name The resource to be deleted. * @return A future which will be completed on the context thread * once the resource has been deleted. */ - private Future> internalDelete(String name) { + private Future> internalDelete(Reconciliation reconciliation, String name) { R resourceOp = operation().withName(name); Future> watchForDeleteFuture = resourceSupport.selfClosingWatch(resourceOp, deleteTimeoutMs(), "observe deletion of " + resourceKind + " " + name, (action, resource) -> { if (action == Watcher.Action.DELETED) { - log.debug("{} {} has been deleted", resourceKind, name); + log.debugCr(reconciliation, "{} {} has been deleted", resourceKind, name); return ReconcileResult.deleted(); } else { return null; @@ -159,50 +162,52 @@ protected Pattern ignorablePaths() { /** * Returns the diff of the current and desired resources * + * @param reconciliation The reconciliation * @param resourceName Name of the resource used for logging * @param current Current resource * @param desired Desired resource * * @return The ResourceDiff instance */ - protected ResourceDiff diff(String resourceName, T current, T desired) { - return new ResourceDiff<>(resourceKind, resourceName, current, desired, ignorablePaths()); + protected ResourceDiff diff(Reconciliation reconciliation, String resourceName, T current, T desired) { + return new ResourceDiff<>(reconciliation, resourceKind, resourceName, current, desired, ignorablePaths()); } /** * Checks whether the current and desired resources differ and need to be patched in the Kubernetes API server. * + * @param reconciliation The reconciliation * @param name Name of the resource used for logging * @param current Current resource * @param desired desired resource * * @return True if the resources differ and need patching */ - protected boolean needsPatching(String name, T current, T desired) { - return !diff(name, current, desired).isEmpty(); + protected boolean needsPatching(Reconciliation reconciliation, String name, T current, T desired) { + return !diff(reconciliation, name, current, desired).isEmpty(); } /** * Patches the resource with the given name to match the given desired resource * and completes the given future accordingly. */ - protected Future> internalPatch(String name, T current, T desired) { - return internalPatch(name, current, desired, true); + protected Future> internalPatch(Reconciliation reconciliation, String name, T current, T desired) { + return internalPatch(reconciliation, name, current, desired, true); } - protected Future> internalPatch(String name, T current, T desired, boolean cascading) { - if (needsPatching(name, current, desired)) { + protected Future> internalPatch(Reconciliation reconciliation, String name, T current, T desired, boolean cascading) { + if (needsPatching(reconciliation, name, current, desired)) { try { T result = operation().withName(name).withPropagationPolicy(cascading ? DeletionPropagation.FOREGROUND : DeletionPropagation.ORPHAN).patch(desired); - log.debug("{} {} has been patched", resourceKind, name); + log.debugCr(reconciliation, "{} {} has been patched", resourceKind, name); return Future.succeededFuture(wasChanged(current, result) ? ReconcileResult.patched(result) : ReconcileResult.noop(result)); } catch (Exception e) { - log.debug("Caught exception while patching {} {}", resourceKind, name, e); + log.debugCr(reconciliation, "Caught exception while patching {} {}", resourceKind, name, e); return Future.failedFuture(e); } } else { - log.debug("{} {} did not changed and doesn't need patching", resourceKind, name); + log.debugCr(reconciliation, "{} {} did not changed and doesn't need patching", resourceKind, name); return Future.succeededFuture(ReconcileResult.noop(current)); } } @@ -222,13 +227,13 @@ private boolean wasChanged(T oldVersion, T newVersion) { * Creates a resource with the name with the given desired state * and completes the given future accordingly. */ - protected Future> internalCreate(String name, T desired) { + protected Future> internalCreate(Reconciliation reconciliation, String name, T desired) { try { ReconcileResult result = ReconcileResult.created(operation().withName(name).create(desired)); - log.debug("{} {} has been created", resourceKind, name); + log.debugCr(reconciliation, "{} {} has been created", resourceKind, name); return Future.succeededFuture(result); } catch (Exception e) { - log.debug("Caught exception while creating {} {}", resourceKind, name, e); + log.debugCr(reconciliation, "Caught exception while creating {} {}", resourceKind, name, e); return Future.failedFuture(e); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperator.java index 3ae28e7dd1..6ad8592a1d 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperator.java @@ -8,12 +8,14 @@ import io.fabric8.kubernetes.api.model.KubernetesResourceList; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; /** * Specializes {@link AbstractResourceOperator} for resources which also have a notion * of being "ready". + * * @param The type of client used to interact with kubernetes. * @param The Kubernetes resource type. * @param The list variant of the Kubernetes resource type. @@ -36,8 +38,8 @@ public AbstractReadyResourceOperator(Vertx vertx, C client, String resourceKind) super(vertx, client, resourceKind); } - public Future readiness(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, pollIntervalMs, timeoutMs, this::isReady); + public Future readiness(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, pollIntervalMs, timeoutMs, this::isReady); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperator.java index 8488ee3372..ef8dddbd72 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperator.java @@ -15,14 +15,14 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.List; import java.util.Map; @@ -49,7 +49,7 @@ public abstract class AbstractResourceOperator> createOrUpdate(T resource) { + public Future> createOrUpdate(Reconciliation reconciliation, T resource) { if (resource == null) { throw new NullPointerException(); } - return reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), resource); + return reconcile(reconciliation, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), resource); } /** * Asynchronously reconciles the resource with the given namespace and name to match the given * desired resource, returning a future for the result. + * @param reconciliation Reconciliation object * @param namespace The namespace of the resource to reconcile * @param name The name of the resource to reconcile * @param desired The desired state of the resource. * @return A future which completes when the resource has been updated. */ - public Future> reconcile(String namespace, String name, T desired) { + public Future> reconcile(Reconciliation reconciliation, String namespace, String name, T desired) { if (desired != null && !namespace.equals(desired.getMetadata().getNamespace())) { return Future.failedFuture("Given namespace " + namespace + " incompatible with desired namespace " + desired.getMetadata().getNamespace()); } else if (desired != null && !name.equals(desired.getMetadata().getName())) { @@ -105,19 +107,19 @@ public Future> reconcile(String namespace, String name, T des T current = operation().inNamespace(namespace).withName(name).get(); if (desired != null) { if (current == null) { - log.debug("{} {}/{} does not exist, creating it", resourceKind, namespace, name); - internalCreate(namespace, name, desired).onComplete(future); + LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, creating it", resourceKind, namespace, name); + internalCreate(reconciliation, namespace, name, desired).onComplete(future); } else { - log.debug("{} {}/{} already exists, patching it", resourceKind, namespace, name); - internalPatch(namespace, name, current, desired).onComplete(future); + LOGGER.debugCr(reconciliation, "{} {}/{} already exists, patching it", resourceKind, namespace, name); + internalPatch(reconciliation, namespace, name, current, desired).onComplete(future); } } else { if (current != null) { // Deletion is desired - log.debug("{} {}/{} exist, deleting it", resourceKind, namespace, name); - internalDelete(namespace, name).onComplete(future); + LOGGER.debugCr(reconciliation, "{} {}/{} exist, deleting it", resourceKind, namespace, name); + internalDelete(reconciliation, namespace, name).onComplete(future); } else { - log.debug("{} {}/{} does not exist, noop", resourceKind, namespace, name); + LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, noop", resourceKind, namespace, name); future.complete(ReconcileResult.noop(null)); } } @@ -133,14 +135,15 @@ public Future> reconcile(String namespace, String name, T des * Deletes the resource with the given namespace and name and completes the given future accordingly. * This method will do a cascading delete. * + * @param reconciliation The reconciliation * @param namespace Namespace of the resource which should be deleted * @param name Name of the resource which should be deleted * * @return A future which will be completed on the context thread * once the resource has been deleted. */ - protected Future> internalDelete(String namespace, String name) { - return internalDelete(namespace, name, true); + protected Future> internalDelete(Reconciliation reconciliation, String namespace, String name) { + return internalDelete(reconciliation, namespace, name, true); } /** @@ -148,6 +151,7 @@ protected Future> internalDelete(String namespace, String nam * returning a Future which completes once the resource * is observed to have been deleted. * + * @param reconciliation The reconciliation * @param namespace Namespace of the resource which should be deleted * @param name Name of the resource which should be deleted * @param cascading Defines whether the delete should be cascading or not (e.g. whether a STS deletion should delete pods etc.) @@ -155,7 +159,7 @@ protected Future> internalDelete(String namespace, String nam * @return A future which will be completed on the context thread * once the resource has been deleted. */ - protected Future> internalDelete(String namespace, String name, boolean cascading) { + protected Future> internalDelete(Reconciliation reconciliation, String namespace, String name, boolean cascading) { R resourceOp = operation().inNamespace(namespace).withName(name); Future> watchForDeleteFuture = resourceSupport.selfClosingWatch(resourceOp, @@ -163,7 +167,7 @@ protected Future> internalDelete(String namespace, String nam "observe deletion of " + resourceKind + " " + namespace + "/" + name, (action, resource) -> { if (action == Watcher.Action.DELETED) { - log.debug("{} {}/{} has been deleted", resourceKind, namespace, name); + LOGGER.debugCr(reconciliation, "{} {}/{} has been deleted", resourceKind, namespace, name); return ReconcileResult.deleted(); } else { return null; @@ -189,49 +193,51 @@ protected Pattern ignorablePaths() { /** * Returns the diff of the current and desired resources * + * @param reconciliation The reconciliation * @param resourceName Name of the resource used for logging * @param current Current resource * @param desired Desired resource * * @return The ResourceDiff instance */ - protected ResourceDiff diff(String resourceName, T current, T desired) { - return new ResourceDiff<>(resourceKind, resourceName, current, desired, ignorablePaths()); + protected ResourceDiff diff(Reconciliation reconciliation, String resourceName, T current, T desired) { + return new ResourceDiff<>(reconciliation, resourceKind, resourceName, current, desired, ignorablePaths()); } /** * Checks whether the current and desired resources differ and need to be patched in the Kubernetes API server. * + * @param reconciliation The reconciliation * @param name Name of the resource used for logging * @param current Current resource * @param desired Desired resource * * @return True if the resources differ and need patching */ - protected boolean needsPatching(String name, T current, T desired) { - return !diff(name, current, desired).isEmpty(); + protected boolean needsPatching(Reconciliation reconciliation, String name, T current, T desired) { + return !diff(reconciliation, name, current, desired).isEmpty(); } /** * Patches the resource with the given namespace and name to match the given desired resource * and completes the given future accordingly. */ - protected Future> internalPatch(String namespace, String name, T current, T desired) { - return internalPatch(namespace, name, current, desired, true); + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, T current, T desired) { + return internalPatch(reconciliation, namespace, name, current, desired, true); } - protected Future> internalPatch(String namespace, String name, T current, T desired, boolean cascading) { - if (needsPatching(name, current, desired)) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, T current, T desired, boolean cascading) { + if (needsPatching(reconciliation, name, current, desired)) { try { T result = operation().inNamespace(namespace).withName(name).withPropagationPolicy(cascading ? DeletionPropagation.FOREGROUND : DeletionPropagation.ORPHAN).patch(desired); - log.debug("{} {} in namespace {} has been patched", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been patched", resourceKind, name, namespace); return Future.succeededFuture(wasChanged(current, result) ? ReconcileResult.patched(result) : ReconcileResult.noop(result)); } catch (Exception e) { - log.debug("Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.debugCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); return Future.failedFuture(e); } } else { - log.debug("{} {} in namespace {} did not changed and doesn't need patching", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} did not changed and doesn't need patching", resourceKind, name, namespace); return Future.succeededFuture(ReconcileResult.noop(current)); } } @@ -251,13 +257,13 @@ protected boolean wasChanged(T oldVersion, T newVersion) { * Creates a resource with the given namespace and name with the given desired state * and completes the given future accordingly. */ - protected Future> internalCreate(String namespace, String name, T desired) { + protected Future> internalCreate(Reconciliation reconciliation, String namespace, String name, T desired) { try { ReconcileResult result = ReconcileResult.created(operation().inNamespace(namespace).withName(name).create(desired)); - log.debug("{} {} in namespace {} has been created", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been created", resourceKind, name, namespace); return Future.succeededFuture(result); } catch (Exception e) { - log.debug("Caught exception while creating {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.debugCr(reconciliation, "Caught exception while creating {} {} in namespace {}", resourceKind, name, namespace, e); return Future.failedFuture(e); } } @@ -375,6 +381,7 @@ public Future> listAsync(String namespace, Optional selec * Returns a future that completes when the resource identified by the given {@code namespace} and {@code name} * is ready. * + * @param reconciliation The reconciliation * @param namespace The namespace. * @param name The resource name. * @param pollIntervalMs The poll interval in milliseconds. @@ -383,14 +390,15 @@ public Future> listAsync(String namespace, Optional selec * @return A future that completes when the resource identified by the given {@code namespace} and {@code name} * is ready. */ - public Future waitFor(String namespace, String name, long pollIntervalMs, final long timeoutMs, BiPredicate predicate) { - return waitFor(namespace, name, "ready", pollIntervalMs, timeoutMs, predicate); + public Future waitFor(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, final long timeoutMs, BiPredicate predicate) { + return waitFor(reconciliation, namespace, name, "ready", pollIntervalMs, timeoutMs, predicate); } /** * Returns a future that completes when the resource identified by the given {@code namespace} and {@code name} * is ready. * + * @param reconciliation The reconciliation * @param namespace The namespace. * @param name The resource name. * @param logState The state we are waiting for use in log messages @@ -400,8 +408,8 @@ public Future waitFor(String namespace, String name, long pollIntervalMs, * @return A future that completes when the resource identified by the given {@code namespace} and {@code name} * is ready. */ - public Future waitFor(String namespace, String name, String logState, long pollIntervalMs, final long timeoutMs, BiPredicate predicate) { - return Util.waitFor(vertx, + public Future waitFor(Reconciliation reconciliation, String namespace, String name, String logState, long pollIntervalMs, final long timeoutMs, BiPredicate predicate) { + return Util.waitFor(reconciliation, vertx, String.format("%s resource %s in namespace %s", resourceKind, name, namespace), logState, pollIntervalMs, diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractScalableResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractScalableResourceOperator.java index 1d72348205..722708bbd8 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractScalableResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractScalableResourceOperator.java @@ -9,12 +9,11 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.ScalableResource; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - /** * An {@link AbstractResourceOperator} that can be scaled up and down in addition to the usual operations. @@ -29,11 +28,11 @@ public abstract class AbstractScalableResourceOperator> extends AbstractReadyResourceOperator { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractScalableResourceOperator.class); + public static final String ANNO_STRIMZI_IO_GENERATION = Annotations.STRIMZI_DOMAIN + "generation"; public static final String ANNO_STRIMZI_IO_DELETE_POD_AND_PVC = Annotations.STRIMZI_DOMAIN + "delete-pod-and-pvc"; - private final Logger log = LogManager.getLogger(getClass()); - /** * Constructor * @param vertx The Vertx instance @@ -52,6 +51,7 @@ private R resource(String namespace, String name) { * Asynchronously scale up the resource given by {@code namespace} and {@code name} to have the scale given by * {@code scaleTo}, returning a future for the outcome. * If the resource does not exist, or has a current scale >= the given {@code scaleTo}, then complete successfully. + * @param reconciliation The reconciliation * @param namespace The namespace of the resource to scale. * @param name The name of the resource to scale. * @param scaleTo The desired scale. @@ -59,20 +59,20 @@ private R resource(String namespace, String name) { * If the scale was initially > the given {@code scaleTo} then this value will be the original scale, * The value will be null if the resource didn't exist (hence no scaling occurred). */ - public Future scaleUp(String namespace, String name, int scaleTo) { + public Future scaleUp(Reconciliation reconciliation, String namespace, String name, int scaleTo) { Promise promise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { try { Integer currentScale = currentScale(namespace, name); if (currentScale != null && currentScale < scaleTo) { - log.info("Scaling up to {} replicas", scaleTo); + LOGGER.infoCr(reconciliation, "Scaling up to {} replicas", scaleTo); resource(namespace, name).scale(scaleTo, true); currentScale = scaleTo; } future.complete(currentScale); } catch (Exception e) { - log.error("Caught exception while scaling up", e); + LOGGER.errorCr(reconciliation, "Caught exception while scaling up", e); future.fail(e); } }, @@ -88,6 +88,7 @@ public Future scaleUp(String namespace, String name, int scaleTo) { * Asynchronously scale down the resource given by {@code namespace} and {@code name} to have the scale given by * {@code scaleTo}, returning a future for the outcome. * If the resource does not exists, is has a current scale <= the given {@code scaleTo} then complete successfully. + * @param reconciliation The reconciliation * @param namespace The namespace of the resource to scale. * @param name The name of the resource to scale. * @param scaleTo The desired scale. @@ -95,7 +96,7 @@ public Future scaleUp(String namespace, String name, int scaleTo) { * If the scale was initially < the given {@code scaleTo} then this value will be the original scale, * The value will be null if the resource didn't exist (hence no scaling occurred). */ - public Future scaleDown(String namespace, String name, int scaleTo) { + public Future scaleDown(Reconciliation reconciliation, String namespace, String name, int scaleTo) { Promise promise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { @@ -104,13 +105,13 @@ public Future scaleDown(String namespace, String name, int scaleTo) { if (nextReplicas != null) { while (nextReplicas > scaleTo) { nextReplicas--; - log.info("Scaling down from {} to {}", nextReplicas + 1, nextReplicas); + LOGGER.infoCr(reconciliation, "Scaling down from {} to {}", nextReplicas + 1, nextReplicas); resource(namespace, name).scale(nextReplicas, true); } } future.complete(nextReplicas); } catch (Exception e) { - log.error("Caught exception while scaling down", e); + LOGGER.errorCr(reconciliation, "Caught exception while scaling down", e); future.fail(e); } }, diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedResourceOperator.java index ae3747989d..59154d4fdf 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedResourceOperator.java @@ -8,6 +8,7 @@ import io.fabric8.kubernetes.api.model.KubernetesResourceList; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -43,8 +44,9 @@ public AbstractWatchableStatusedResourceOperator(Vertx vertx, C client, String r /** * Updates status of the resource * + * @param reconciliation Reconciliation object * @param resource Resource with the status which should be updated in the Kube API server * @return Future with the updated resource */ - public abstract Future updateStatusAsync(T resource); + public abstract Future updateStatusAsync(Reconciliation reconciliation, T resource); } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java index c366d12da1..ac8c412d87 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java @@ -12,6 +12,7 @@ import io.fabric8.openshift.api.model.BuildRequest; import io.fabric8.openshift.client.OpenShiftClient; import io.fabric8.openshift.client.dsl.BuildConfigResource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -34,10 +35,10 @@ protected MixedOperation> internalPatch(String namespace, String name, BuildConfig current, BuildConfig desired) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, BuildConfig current, BuildConfig desired) { desired.getSpec().setTriggers(current.getSpec().getTriggers()); // Cascading needs to be set to false to make sure the Builds are not deleted during reconciliation - return super.internalPatch(namespace, name, current, desired, false); + return super.internalPatch(reconciliation, namespace, name, current, desired, false); } /** @@ -46,6 +47,7 @@ protected Future> internalPatch(String namespace, S * * This is n override for BuildConfigs because the {@code selfClosingWatch} used by {@code AbstractResourceoperator} does not work for them. * + * @param reconciliation The reconciliation * @param namespace Namespace of the resource which should be deleted * @param name Name of the resource which should be deleted * @param cascading Defines whether the delete should be cascading or not (e.g. whether a STS deletion should delete pods etc.) @@ -53,7 +55,7 @@ protected Future> internalPatch(String namespace, S * @return A future which will be completed on the context thread once the resource has been deleted. */ @Override - protected Future> internalDelete(String namespace, String name, boolean cascading) { + protected Future> internalDelete(Reconciliation reconciliation, String namespace, String name, boolean cascading) { BuildConfigResource resourceOp = operation().inNamespace(namespace).withName(name); return resourceSupport.deleteAsync(resourceOp.withPropagationPolicy(cascading ? DeletionPropagation.FOREGROUND : DeletionPropagation.ORPHAN).withGracePeriod(-1L)) diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java index f5b8372e32..1709a7e5d3 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java @@ -9,6 +9,8 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -19,8 +21,12 @@ * Operations for {@code ConfigMap}s. */ public class ConfigMapOperator extends AbstractResourceOperator> { + + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ConfigMapOperator.class); + /** * Constructor + * * @param vertx The Vertx instance * @param client The Kubernetes client */ @@ -34,7 +40,7 @@ protected MixedOperation> operatio } @Override - protected Future> internalPatch(String namespace, String name, ConfigMap current, ConfigMap desired) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, ConfigMap current, ConfigMap desired) { try { if (compareObjects(current.getData(), desired.getData()) && compareObjects(current.getMetadata().getName(), desired.getMetadata().getName()) @@ -43,13 +49,13 @@ && compareObjects(current.getMetadata().getAnnotations(), desired.getMetadata(). && compareObjects(current.getMetadata().getLabels(), desired.getMetadata().getLabels())) { // Checking some metadata. We cannot check entire metadata object because it contains // timestamps which would cause restarting loop - log.debug("{} {} in namespace {} has not been patched because resources are equal", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has not been patched because resources are equal", resourceKind, name, namespace); return Future.succeededFuture(ReconcileResult.noop(current)); } else { - return super.internalPatch(namespace, name, current, desired); + return super.internalPatch(reconciliation, namespace, name, current, desired); } } catch (Exception e) { - log.error("Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.errorCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); return Future.failedFuture(e); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java index 3020d05745..6df390f4f1 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java @@ -11,6 +11,8 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; @@ -24,6 +26,8 @@ public class CrdOperator> extends AbstractWatchableStatusedResourceOperator> { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(CrdOperator.class); + private final Class cls; private final Class listCls; @@ -41,6 +45,7 @@ public CrdOperator(Vertx vertx, C client, Class cls, Class listCls, String this.listCls = listCls; } + @Override protected MixedOperation> operation() { return client.customResources(cls, listCls); @@ -58,10 +63,10 @@ protected MixedOperation> operation() { * once the resource has been deleted. */ @Override - protected Future> internalDelete(String namespace, String name, boolean cascading) { + protected Future> internalDelete(Reconciliation reconciliation, String namespace, String name, boolean cascading) { Resource resourceOp = operation().inNamespace(namespace).withName(name); - Future watchForDeleteFuture = Util.waitFor(vertx, + Future watchForDeleteFuture = Util.waitFor(reconciliation, vertx, String.format("%s resource %s", resourceKind, name), "deleted", 1_000, @@ -73,11 +78,11 @@ protected Future> internalDelete(String namespace, String nam return CompositeFuture.join(watchForDeleteFuture, deleteFuture).map(ReconcileResult.deleted()); } - public Future patchAsync(T resource) { - return patchAsync(resource, true); + public Future patchAsync(Reconciliation reconciliation, T resource) { + return patchAsync(reconciliation, resource, true); } - public Future patchAsync(T resource, boolean cascading) { + public Future patchAsync(Reconciliation reconciliation, T resource, boolean cascading) { Promise blockingPromise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking(future -> { @@ -85,10 +90,10 @@ public Future patchAsync(T resource, boolean cascading) { String name = resource.getMetadata().getName(); try { T result = operation().inNamespace(namespace).withName(name).withPropagationPolicy(cascading ? DeletionPropagation.FOREGROUND : DeletionPropagation.ORPHAN).patch(resource); - log.debug("{} {} in namespace {} has been patched", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been patched", resourceKind, name, namespace); future.complete(result); } catch (Exception e) { - log.debug("Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.debugCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); future.fail(e); } }, true, blockingPromise); @@ -96,7 +101,7 @@ public Future patchAsync(T resource, boolean cascading) { return blockingPromise.future(); } - public Future updateStatusAsync(T resource) { + public Future updateStatusAsync(Reconciliation reconciliation, T resource) { Promise blockingPromise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking(future -> { @@ -105,10 +110,10 @@ public Future updateStatusAsync(T resource) { try { T result = operation().inNamespace(namespace).withName(name).updateStatus(resource); - log.info("Status of {} {} in namespace {} has been updated", resourceKind, name, namespace); + LOGGER.infoCr(reconciliation, "Status of {} {} in namespace {} has been updated", resourceKind, name, namespace); future.complete(result); } catch (Exception e) { - log.debug("Caught exception while updating status of {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.debugCr(reconciliation, "Caught exception while updating status of {} {} in namespace {}", resourceKind, name, namespace, e); future.fail(e); } }, true, blockingPromise); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentConfigOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentConfigOperator.java index ecc8fbe5ec..8f9ecb9c07 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentConfigOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentConfigOperator.java @@ -10,6 +10,7 @@ import io.fabric8.openshift.api.model.DeploymentConfigList; import io.fabric8.openshift.client.OpenShiftClient; import io.fabric8.openshift.client.dsl.DeployableScalableResource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -43,15 +44,16 @@ protected Integer currentScale(String namespace, String name) { } @Override - protected Future> internalPatch(String namespace, String name, DeploymentConfig current, DeploymentConfig desired) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, DeploymentConfig current, DeploymentConfig desired) { desired.getSpec().getTemplate().getSpec().getContainers().get(0).setImage(current.getSpec().getTemplate().getSpec().getContainers().get(0).getImage()); - return super.internalPatch(namespace, name, current, desired); + return super.internalPatch(reconciliation, namespace, name, current, desired); } /** * Asynchronously polls the deployment configuration until either the observed generation matches the desired * generation sequence number or timeout. * + * @param reconciliation The reconciliation * @param namespace The namespace. * @param name The resource name. * @param pollIntervalMs The polling interval @@ -59,8 +61,8 @@ protected Future> internalPatch(String namespa * @return A future which completes when the observed generation of the deployment configuration matches the * generation sequence number of the desired state. */ - public Future waitForObserved(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, "observed", pollIntervalMs, timeoutMs, this::isObserved); + public Future waitForObserved(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, "observed", pollIntervalMs, timeoutMs, this::isObserved); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java index 1e186b5186..8571f182d9 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java @@ -11,6 +11,7 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.RollableScalableResource; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -24,6 +25,7 @@ public class DeploymentOperator extends AbstractScalableResourceOperator rollingUpdate(String namespace, String name, long operationTimeoutMs) { + public Future rollingUpdate(Reconciliation reconciliation, String namespace, String name, long operationTimeoutMs) { return getAsync(namespace, name) - .compose(deployment -> deletePod(namespace, name)) - .compose(ignored -> readiness(namespace, name, 1_000, operationTimeoutMs)); + .compose(deployment -> deletePod(reconciliation, namespace, name)) + .compose(ignored -> readiness(reconciliation, namespace, name, 1_000, operationTimeoutMs)); } /** * Asynchronously delete the given pod. + * @param reconciliation The reconciliation * @param namespace The namespace of the pod. * @param name The name of the pod. * @return A Future which will complete once all the pods has been deleted. */ - public Future> deletePod(String namespace, String name) { + public Future> deletePod(Reconciliation reconciliation, String namespace, String name) { Labels labels = Labels.EMPTY.withStrimziName(name); String podName = podOperations.list(namespace, labels).get(0).getMetadata().getName(); - return podOperations.reconcile(namespace, podName, null); + return podOperations.reconcile(reconciliation, namespace, podName, null); } @Override - protected Future> internalPatch(String namespace, String name, Deployment current, Deployment desired, boolean cascading) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, Deployment current, Deployment desired, boolean cascading) { String k8sRev = Annotations.annotations(current).get(Annotations.ANNO_DEP_KUBE_IO_REVISION); Annotations.annotations(desired).put(Annotations.ANNO_DEP_KUBE_IO_REVISION, k8sRev); - return super.internalPatch(namespace, name, current, desired, cascading); + return super.internalPatch(reconciliation, namespace, name, current, desired, cascading); } /** * Asynchronously polls the deployment until either the observed generation matches the desired * generation sequence number or timeout. * + * @param reconciliation The reconciliation * @param namespace The namespace. * @param name The resource name. * @param pollIntervalMs The polling interval @@ -95,8 +101,8 @@ protected Future> internalPatch(String namespace, St * @return A future which completes when the observed generation of the deployment matches the * generation sequence number of the desired state. */ - public Future waitForObserved(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, "observed", pollIntervalMs, timeoutMs, this::isObserved); + public Future waitForObserved(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, "observed", pollIntervalMs, timeoutMs, this::isObserved); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java index 2a09593724..4d3c3ac8cf 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java @@ -9,6 +9,7 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -34,14 +35,15 @@ protected MixedOperation> operation() { /** * Succeeds when the Service has an assigned address * + * @param reconciliation The reconciliation * @param namespace Namespace * @param name Name of the service * @param pollIntervalMs Interval in which we poll * @param timeoutMs Timeout * @return A future that succeeds when the Service has an assigned address. */ - public Future hasIngressAddress(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isIngressAddressReady); + public Future hasIngressAddress(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isIngressAddressReady); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressV1Beta1Operator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressV1Beta1Operator.java index 2b48a5e595..75c4ed41be 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressV1Beta1Operator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressV1Beta1Operator.java @@ -9,6 +9,7 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -34,14 +35,15 @@ protected MixedOperation> operation() { /** * Succeeds when the Service has an assigned address * + * @param reconciliation The reconciliation * @param namespace Namespace * @param name Name of the service * @param pollIntervalMs Interval in which we poll * @param timeoutMs Timeout * @return A future that succeeds when the Service has an assigned address. */ - public Future hasIngressAddress(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isIngressAddressReady); + public Future hasIngressAddress(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isIngressAddressReady); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java index 1fadae403f..7b20364c0b 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java @@ -16,6 +16,7 @@ public class NodeOperator extends AbstractNonNamespacedResourceOperator> { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(PodOperator.class); private static final String NO_UID = "NULL"; /** @@ -52,31 +55,31 @@ public Watch watch(String namespace, String name, Watcher watcher) { /** * Asynchronously delete the given pod, return a Future which completes when the Pod has been recreated. * Note: The pod might not be "ready" when the returned Future completes. - * @param logContext Some context (for logging) + * @param reconciliation The reconciliation * @param pod The pod to be restarted * @param timeoutMs Timeout of the deletion * @return a Future which completes when the Pod has been recreated */ - public Future restart(String logContext, Pod pod, long timeoutMs) { + public Future restart(Reconciliation reconciliation, Pod pod, long timeoutMs) { long pollingIntervalMs = 1_000; String namespace = pod.getMetadata().getNamespace(); String podName = pod.getMetadata().getName(); Promise deleteFinished = Promise.promise(); - log.info("{}: Rolling pod {}", logContext, podName); + LOGGER.infoCr(reconciliation, "Rolling pod {}", podName); // Determine generation of deleted pod String deleted = getPodUid(pod); // Delete the pod - log.debug("{}: Waiting for pod {} to be deleted", logContext, podName); + LOGGER.debugCr(reconciliation, "Waiting for pod {} to be deleted", podName); Future podReconcileFuture = - reconcile(namespace, podName, null).compose(ignore -> { - Future del = waitFor(namespace, podName, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { + reconcile(reconciliation, namespace, podName, null).compose(ignore -> { + Future del = waitFor(reconciliation, namespace, podName, "deleted", pollingIntervalMs, timeoutMs, (ignore1, ignore2) -> { // predicate - changed generation means pod has been updated String newUid = getPodUid(get(namespace, podName)); boolean done = !deleted.equals(newUid); if (done) { - log.debug("Rolling pod {} finished", podName); + LOGGER.debugCr(reconciliation, "Rolling pod {} finished", podName); } return done; }); @@ -85,7 +88,7 @@ public Future restart(String logContext, Pod pod, long timeoutMs) { podReconcileFuture.onComplete(deleteResult -> { if (deleteResult.succeeded()) { - log.debug("{}: Pod {} was deleted", logContext, podName); + LOGGER.debugCr(reconciliation, "Pod {} was deleted", podName); } deleteFinished.handle(deleteResult); }); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PvcOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PvcOperator.java index b6d717e67b..64a2239162 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PvcOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PvcOperator.java @@ -9,6 +9,8 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -18,12 +20,14 @@ * Operations for {@code PersistentVolumeClaim}s. */ public class PvcOperator extends AbstractResourceOperator> { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(PvcOperator.class); protected static final Pattern IGNORABLE_PATHS = Pattern.compile( "^(/metadata/managedFields" + "|/metadata/annotations/pv.kubernetes.io~1bind-completed" + "|/metadata/finalizers" + "|/status)$"); + /** * Constructor * @param vertx The Vertx instance @@ -52,6 +56,7 @@ protected Pattern ignorablePaths() { * * PvcOperator needs to patch the volumeName field in spec which is immutable and which should contain the same value as the existing resource. * + * @param reconciliation The reconciliation * @param namespace Namespace of the pvc * @param name Name of the pvc * @param current Current pvc @@ -60,15 +65,15 @@ protected Pattern ignorablePaths() { * @return Future with reconciliation result */ @Override - protected Future> internalPatch(String namespace, String name, PersistentVolumeClaim current, PersistentVolumeClaim desired) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, PersistentVolumeClaim current, PersistentVolumeClaim desired) { try { if (current.getSpec() != null && desired.getSpec() != null) { revertImmutableChanges(current, desired); } - return super.internalPatch(namespace, name, current, desired); + return super.internalPatch(reconciliation, namespace, name, current, desired); } catch (Exception e) { - log.error("Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.errorCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); return Future.failedFuture(e); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceDiff.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceDiff.java index a163a0bcde..da4b3b4a7f 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceDiff.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceDiff.java @@ -7,19 +7,19 @@ import com.fasterxml.jackson.databind.JsonNode; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.zjsonpatch.JsonDiff; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import java.util.regex.Pattern; import static io.fabric8.kubernetes.client.internal.PatchUtils.patchMapper; class ResourceDiff extends AbstractJsonDiff { - private static final Logger log = LogManager.getLogger(ResourceDiff.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ResourceDiff.class.getName()); private final boolean isEmpty; - public ResourceDiff(String resourceKind, String resourceName, T current, T desired, Pattern ignorableFields) { + public ResourceDiff(Reconciliation reconciliation, String resourceKind, String resourceName, T current, T desired, Pattern ignorableFields) { JsonNode source = patchMapper().valueToTree(current == null ? "{}" : current); JsonNode target = patchMapper().valueToTree(desired == null ? "{}" : desired); JsonNode diff = JsonDiff.asJson(source, target); @@ -30,14 +30,14 @@ public ResourceDiff(String resourceKind, String resourceName, T current, T desir String pathValue = d.get("path").asText(); if (ignorableFields.matcher(pathValue).matches()) { - log.debug("Ignoring {} {} diff {}", resourceKind, resourceName, d); + LOGGER.debugCr(reconciliation, "Ignoring {} {} diff {}", resourceKind, resourceName, d); continue; } - if (log.isDebugEnabled()) { - log.debug("{} {} differs: {}", resourceKind, resourceName, d); - log.debug("Current {} {} path {} has value {}", resourceKind, resourceName, pathValue, lookupPath(source, pathValue)); - log.debug("Desired {} {} path {} has value {}", resourceKind, resourceName, pathValue, lookupPath(target, pathValue)); + if (LOGGER.isDebugEnabled()) { + LOGGER.debugCr(reconciliation, "{} {} differs: {}", resourceKind, resourceName, d); + LOGGER.debugCr(reconciliation, "Current {} {} path {} has value {}", resourceKind, resourceName, pathValue, lookupPath(source, pathValue)); + LOGGER.debugCr(reconciliation, "Desired {} {} path {} has value {}", resourceKind, resourceName, pathValue, lookupPath(target, pathValue)); } num++; diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceSupport.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceSupport.java index d7cbe84a8d..c48ded37b7 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceSupport.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ResourceSupport.java @@ -13,14 +13,13 @@ import io.fabric8.kubernetes.client.dsl.Gettable; import io.fabric8.kubernetes.client.dsl.Listable; import io.fabric8.kubernetes.client.dsl.Watchable; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.AsyncResult; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Handler; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.io.Closeable; import java.util.List; @@ -28,7 +27,7 @@ public class ResourceSupport { public static final long DEFAULT_TIMEOUT_MS = 300_000; - protected static final Logger LOGGER = LogManager.getLogger(ResourceSupport.class); + protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ResourceSupport.class); private final Vertx vertx; @@ -39,6 +38,7 @@ public class ResourceSupport { /** * Asynchronously close the given {@code closeable} on a worker thread, * returning a Future which completes with the outcome. + * * @param closeable The closeable * @return The Future */ @@ -46,7 +46,7 @@ public Future closeOnWorkerThread(Closeable closeable) { return executeBlocking( blockingFuture -> { try { - LOGGER.debug("Closing {}", closeable); + LOGGER.debugOp("Closing {}", closeable); closeable.close(); blockingFuture.complete(); } catch (Throwable t) { @@ -67,6 +67,7 @@ Future executeBlocking(Handler> blockingCodeHandler) { * a single cause, possibly with suppressed exception. * If both AsyncResults have failed {@code primary} will be the main cause of failure and * {@code secondary} will be a suppressed exception. + * * @param primary The primary failure. * @param secondary The secondary failure. * @return The cause. @@ -94,6 +95,7 @@ private Throwable collectCauses(AsyncResult primary, * Kubernetes resources changes, so may block. * When the {@code watchFn} returns non-null the watch will be closed and then * the future returned from this method will be completed on the context thread. + * * @param watchable The watchable. * @param operationTimeoutMs The timeout in ms. * @param watchFnDescription A description of what {@code watchFn} is watching for. @@ -133,7 +135,7 @@ Future selfClosingWatch(Watchable> watchable, closeFuture.onComplete(closeResult -> vertx.runOnContext(ignored2 -> { - LOGGER.debug("Completing watch future"); + LOGGER.debugOp("Completing watch future"); if (joinResult.succeeded() && closeResult.succeeded()) { resultPromise.complete(joinResult.result().resultAt(1)); } else { @@ -145,7 +147,7 @@ Future selfClosingWatch(Watchable> watchable, try { Watch watch = watchable.watch(this); - LOGGER.debug("Opened watch {} for evaluation of {}", watch, watchFnDescription); + LOGGER.debugOp("Opened watch {} for evaluation of {}", watch, watchFnDescription); watchPromise.complete(watch); } catch (Throwable t) { watchPromise.fail(t); @@ -162,11 +164,11 @@ public void eventReceived(Action action, T resource) { f.tryComplete(apply); vertx.cancelTimer(timerId); } else { - LOGGER.debug("Not yet satisfied: {}", watchFnDescription); + LOGGER.debugOp("Not yet satisfied: {}", watchFnDescription); } } catch (Throwable t) { if (!f.tryFail(t)) { - LOGGER.debug("Ignoring exception thrown while " + + LOGGER.debugOp("Ignoring exception thrown while " + "evaluating watch {} because the future was already completed", watchFnDescription, t); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java index d75cde3279..c3b3b32c6a 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java @@ -9,6 +9,7 @@ import io.fabric8.openshift.api.model.Route; import io.fabric8.openshift.api.model.RouteList; import io.fabric8.openshift.client.OpenShiftClient; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -33,14 +34,15 @@ protected MixedOperation> operation() { /** * Succeeds when the Route has an assigned address. * + * @param reconciliation The reconciliation * @param namespace Namespace. * @param name Name of the route. * @param pollIntervalMs Interval in which we poll. * @param timeoutMs Timeout. * @return A future that succeeds when the Route has an assigned address. */ - public Future hasAddress(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isAddressReady); + public Future hasAddress(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isAddressReady); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java index 4f6cc52de6..3d5e74e6b8 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java @@ -9,10 +9,13 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.Future; import io.vertx.core.Vertx; public class ServiceAccountOperator extends AbstractResourceOperator> { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ServiceAccountOperator.class); private final boolean patching; /** @@ -41,16 +44,15 @@ protected MixedOperation> internalPatch(String namespace, String name, ServiceAccount current, ServiceAccount desired) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, ServiceAccount current, ServiceAccount desired) { if (patching) { if (desired.getSecrets() == null || desired.getSecrets().isEmpty()) { desired.setSecrets(current.getSecrets()); } - - return super.internalPatch(namespace, name, current, desired); + return super.internalPatch(reconciliation, namespace, name, current, desired); } else { // Patching an SA causes new tokens to be created, which we should avoid - log.debug("{} {} in namespace {} has not been patched: patching service accounts generates new tokens which should be avoided.", resourceKind, name, namespace); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has not been patched: patching service accounts generates new tokens which should be avoided.", resourceKind, name, namespace); return Future.succeededFuture(ReconcileResult.noop(current)); } } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java index c0f6bd341e..9cc100d200 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java @@ -12,6 +12,8 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.ServiceResource; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -26,6 +28,7 @@ */ public class ServiceOperator extends AbstractResourceOperator> { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ServiceOperator.class); protected static final Pattern IGNORABLE_PATHS = Pattern.compile( "^(/metadata/managedFields" + "|/spec/sessionAffinity" + @@ -38,6 +41,7 @@ public class ServiceOperator extends AbstractResourceOperator> internalPatch(String namespace, String name, Service current, Service desired) { + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, Service current, Service desired) { try { if (current.getSpec() != null && desired.getSpec() != null) { if (("NodePort".equals(current.getSpec().getType()) && "NodePort".equals(desired.getSpec().getType())) @@ -87,9 +92,9 @@ protected Future> internalPatch(String namespace, Strin patchDualStackNetworking(current, desired); } - return super.internalPatch(namespace, name, current, desired); + return super.internalPatch(reconciliation, namespace, name, current, desired); } catch (Exception e) { - log.error("Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + LOGGER.errorCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); return Future.failedFuture(e); } } @@ -177,30 +182,32 @@ protected void patchDualStackNetworking(Service current, Service desired) { * Deletes the resource with the given namespace and name and completes the given future accordingly. * This method will do a cascading delete. * + * @param reconciliation The reconciliation * @param namespace Namespace of the resource which should be deleted * @param name Name of the resource which should be deleted * * @return Future with result of the reconciliation */ - protected Future> internalDelete(String namespace, String name) { - return internalDelete(namespace, name, true); + protected Future> internalDelete(Reconciliation reconciliation, String namespace, String name) { + return internalDelete(reconciliation, namespace, name, true); } - public Future endpointReadiness(String namespace, String name, long pollInterval, long operationTimeoutMs) { - return endpointOperations.readiness(namespace, name, pollInterval, operationTimeoutMs); + public Future endpointReadiness(Reconciliation reconciliation, String namespace, String name, long pollInterval, long operationTimeoutMs) { + return endpointOperations.readiness(reconciliation, namespace, name, pollInterval, operationTimeoutMs); } /** * Succeeds when the Service has an assigned address * + * @param reconciliation The reconciliation * @param namespace Namespace * @param name Name of the service * @param pollIntervalMs Interval in which we poll * @param timeoutMs Timeout * @return A future that succeeds when the Service has an assigned address. */ - public Future hasIngressAddress(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isIngressAddressReady); + public Future hasIngressAddress(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, "addressable", pollIntervalMs, timeoutMs, this::isIngressAddressReady); } /** @@ -226,14 +233,15 @@ public boolean isIngressAddressReady(String namespace, String name) { /** * Succeeds when the Service has an assigned node port * + * @param reconciliation The reconciliation * @param namespace Namespace * @param name Name of the service * @param pollIntervalMs Interval in which we poll * @param timeoutMs Timeout * @return A future that succeeds when the Service has an assigned node port */ - public Future hasNodePort(String namespace, String name, long pollIntervalMs, long timeoutMs) { - return waitFor(namespace, name, pollIntervalMs, timeoutMs, this::isNodePortReady); + public Future hasNodePort(Reconciliation reconciliation, String namespace, String name, long pollIntervalMs, long timeoutMs) { + return waitFor(reconciliation, namespace, name, pollIntervalMs, timeoutMs, this::isNodePortReady); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java index 125bde345b..5cd9e050d3 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java @@ -16,6 +16,7 @@ public class StorageClassOperator extends AbstractNonNamespacedResourceOperator< /** * Constructor. + * * @param vertx The Vertx instance. * @param client The Kubernetes client. */ diff --git a/operator-common/src/main/java/io/strimzi/operator/common/process/ProcessHelper.java b/operator-common/src/main/java/io/strimzi/operator/common/process/ProcessHelper.java index b8ff0aadf3..cd733d9fcf 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/process/ProcessHelper.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/process/ProcessHelper.java @@ -19,6 +19,7 @@ public class ProcessHelper { * Execute the command given in {@code args}. * Apply the given {@code sanitizer} function to the * {@code args} when logging, so that security-sensitive information is not logged. + * * @param args The executable and its arguments. * @return The result of the subprocess * @throws IOException Reading/writing to the subprocess @@ -39,9 +40,7 @@ public static ProcessResult executeSubprocess(List args) throws IOExcept pb.redirectError(stderr); pb.redirectOutput(stdout); Process p = pb.start(); - if (LOGGER.isInfoEnabled()) { - LOGGER.info("Started process {} with command line {}", p, args); - } + LOGGER.info("Started process {} with command line {}", p, args); p.getOutputStream().close(); int exitCode = p.waitFor(); // TODO timeout on wait @@ -51,9 +50,6 @@ public static ProcessResult executeSubprocess(List args) throws IOExcept public static File createTmpFile(String suffix) throws IOException { File tmpFile = File.createTempFile(ProcessHelper.class.getName(), suffix); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Created temporary file {}", tmpFile); - } tmpFile.deleteOnExit(); return tmpFile; } diff --git a/operator-common/src/test/java/io/strimzi/operator/common/OperatorMetricsTest.java b/operator-common/src/test/java/io/strimzi/operator/common/OperatorMetricsTest.java index 2c954a0eda..9d70fa1eea 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/OperatorMetricsTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/OperatorMetricsTest.java @@ -74,7 +74,8 @@ protected Future createOrUpdate(Reconciliation reconciliation, CustomResource re return Future.succeededFuture(); } - public Set validate(CustomResource resource) { + @Override + public Set validate(Reconciliation reconciliation, CustomResource resource) { return emptySet(); } @@ -125,7 +126,7 @@ protected Future createOrUpdate(Reconciliation reconciliation, CustomResource re } @Override - public Set validate(CustomResource resource) { + public Set validate(Reconciliation reconciliation, CustomResource resource) { // Do nothing return emptySet(); } @@ -178,7 +179,7 @@ protected Future createOrUpdate(Reconciliation reconciliation, CustomResource re } @Override - public Set validate(CustomResource resource) { + public Set validate(Reconciliation reconciliation, CustomResource resource) { return new HashSet<>(); } @@ -230,7 +231,7 @@ protected Future createOrUpdate(Reconciliation reconciliation, CustomResource re } @Override - public Set validate(CustomResource resource) { + public Set validate(Reconciliation reconciliation, CustomResource resource) { // Do nothing return emptySet(); } @@ -279,7 +280,7 @@ public HasMetadata get(String namespace, String name) { } @Override - public Future updateStatusAsync(HasMetadata resource) { + public Future updateStatusAsync(Reconciliation reconciliation, HasMetadata resource) { return null; } }; @@ -291,7 +292,7 @@ protected Future createOrUpdate(Reconciliation reconciliation, CustomResource re } @Override - public Set validate(CustomResource resource) { + public Set validate(Reconciliation reconciliation, CustomResource resource) { // Do nothing return emptySet(); } @@ -354,7 +355,7 @@ public Future> allResourceNames(String namespace) { } @Override - public Set validate(CustomResource resource) { + public Set validate(Reconciliation reconciliation, CustomResource resource) { // Do nothing return emptySet(); } @@ -425,7 +426,7 @@ protected abstract static class MyResource extends CustomResource { protected AbstractWatchableStatusedResourceOperator resourceOperatorWithExistingResource() { return new AbstractWatchableStatusedResourceOperator(vertx, null, "TestResource") { @Override - public Future updateStatusAsync(HasMetadata resource) { + public Future updateStatusAsync(Reconciliation reconciliation, HasMetadata resource) { return null; } @@ -491,7 +492,7 @@ public void setStatus(Object status) { private AbstractWatchableStatusedResourceOperator resourceOperatorWithExistingPausedResource() { return new AbstractWatchableStatusedResourceOperator(vertx, null, "TestResource") { @Override - public Future updateStatusAsync(HasMetadata resource) { + public Future updateStatusAsync(Reconciliation reconciliation, HasMetadata resource) { return Future.succeededFuture(); } diff --git a/operator-common/src/test/java/io/strimzi/operator/common/model/ResourceVisitorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/model/ResourceVisitorTest.java index 263efedd96..dabe075248 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/model/ResourceVisitorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/model/ResourceVisitorTest.java @@ -5,6 +5,7 @@ package io.strimzi.operator.common.model; import io.strimzi.api.kafka.model.Kafka; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.TestUtils; import org.junit.jupiter.api.Test; @@ -22,14 +23,14 @@ public class ResourceVisitorTest { public void testDoesNotThrow() { Kafka k = TestUtils.fromYaml("/example.yaml", Kafka.class, true); assertThat(k, is(notNullValue())); - ResourceVisitor.visit(k, new ResourceVisitor.Visitor() { + ResourceVisitor.visit(new Reconciliation("test", "kind", "namespace", "name"), k, new ResourceVisitor.Visitor() { @Override - public void visitProperty(List path, Object owner, M member, ResourceVisitor.Property property, Object propertyValue) { + public void visitProperty(Reconciliation reconciliation, List path, Object owner, M member, ResourceVisitor.Property property, Object propertyValue) { } @Override - public void visitObject(List path, Object object) { + public void visitObject(Reconciliation reconciliation, List path, Object object) { } }); @@ -39,14 +40,14 @@ public void visitObject(List path, Object object) { public void testDoesNotThrowWithListenerList() { Kafka k = TestUtils.fromYaml("/example2.yaml", Kafka.class, true); assertThat(k, is(notNullValue())); - ResourceVisitor.visit(k, new ResourceVisitor.Visitor() { + ResourceVisitor.visit(new Reconciliation("test", "kind", "namespace", "name"), k, new ResourceVisitor.Visitor() { @Override - public void visitProperty(List path, Object owner, M member, ResourceVisitor.Property property, Object propertyValue) { + public void visitProperty(Reconciliation reconciliation, List path, Object owner, M member, ResourceVisitor.Property property, Object propertyValue) { } @Override - public void visitObject(List path, Object object) { + public void visitObject(Reconciliation reconciliation, List path, Object object) { } }); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/model/ValidationVisitorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/model/ValidationVisitorTest.java index 23d2bf20f1..0b61108ec2 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/model/ValidationVisitorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/model/ValidationVisitorTest.java @@ -8,11 +8,10 @@ import io.strimzi.api.kafka.model.Kafka; import io.strimzi.api.kafka.model.KafkaBuilder; import io.strimzi.api.kafka.model.status.Condition; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.TestUtils; import io.strimzi.test.logging.TestLogger; import org.apache.logging.log4j.Level; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.Logger; import org.junit.jupiter.api.Test; import java.util.HashSet; @@ -31,7 +30,7 @@ public class ValidationVisitorTest { public void testValidationErrorsAreLogged() { Kafka k = TestUtils.fromYaml("/example.yaml", Kafka.class, true); assertThat(k, is(notNullValue())); - TestLogger logger = new TestLogger((Logger) LogManager.getLogger(ValidationVisitorTest.class)); + TestLogger logger = TestLogger.create(ValidationVisitorTest.class); HasMetadata resource = new KafkaBuilder() .withNewMetadata() .withName("testname") @@ -42,7 +41,7 @@ public void testValidationErrorsAreLogged() { Set warningConditions = new HashSet<>(); - ResourceVisitor.visit(k, new ValidationVisitor(resource, logger, warningConditions)); + ResourceVisitor.visit(Reconciliation.DUMMY_RECONCILIATION, k, new ValidationVisitor(resource, logger, warningConditions)); List warningMessages = warningConditions.stream().map(Condition::getMessage).collect(Collectors.toList()); @@ -54,32 +53,33 @@ public void testValidationErrorsAreLogged() { assertThat(warningMessages, hasItem("In API version v1alpha1 the object topicOperator at path spec.topicOperator has been deprecated. " + "This object has been replaced with EntityTopicOperatorSpec and is removed in API version v1beta2.")); + logger.assertLoggedAtLeastOnce(lm -> + lm.level() == Level.WARN + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + + "Contains object at path spec.kafka with an unknown property: foo")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + - "Contains object at path spec.kafka with an unknown property: foo").equals(lm.formattedMessage())); - logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the topicOperator property at path spec.topicOperator has been deprecated, " + - "and should now be configured using spec.entityOperator.topicOperator. This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "and should now be configured using spec.entityOperator.topicOperator. This property is removed in API version v1beta2.")); logger.assertNotLogged(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the tolerations property at path spec.zookeeper.tolerations has been deprecated, " + - "and should now be configured using spec.zookeeper.template.pod.tolerations. This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "and should now be configured using spec.zookeeper.template.pod.tolerations. This property is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the object kafkaListeners at path spec.kafka.listeners.kafkaListeners has been deprecated. " + - "This object has been replaced with GenericKafkaListener and is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This object has been replaced with GenericKafkaListener and is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the object topicOperator at path spec.topicOperator has been deprecated. " + - "This object has been replaced with EntityTopicOperatorSpec and is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This object has been replaced with EntityTopicOperatorSpec and is removed in API version v1beta2.")); } @Test public void testV1Beta1Deprecations() { Kafka k = TestUtils.fromYaml("/v1beta1Deprecations.yaml", Kafka.class, true); assertThat(k, is(notNullValue())); - TestLogger logger = new TestLogger((Logger) LogManager.getLogger(ValidationVisitorTest.class)); + TestLogger logger = TestLogger.create(ValidationVisitorTest.class); HasMetadata resource = new KafkaBuilder() .withNewMetadata() .withName("testname") @@ -90,7 +90,7 @@ public void testV1Beta1Deprecations() { Set warningConditions = new HashSet<>(); - ResourceVisitor.visit(k, new ValidationVisitor(resource, logger, warningConditions)); + ResourceVisitor.visit(Reconciliation.DUMMY_RECONCILIATION, k, new ValidationVisitor(resource, logger, warningConditions)); List warningMessages = warningConditions.stream().map(Condition::getMessage).collect(Collectors.toList()); @@ -100,34 +100,34 @@ public void testV1Beta1Deprecations() { "This object has been replaced with EntityTopicOperatorSpec and is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the affinity property at path spec.zookeeper.affinity has been deprecated, and " + "should now be configured using spec.zookeeper.template.pod.affinity. " + - "This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This property is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the tolerations property at path spec.zookeeper.tolerations has been deprecated, " + "and should now be configured using spec.zookeeper.template.pod.tolerations. " + - "This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This property is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the affinity property at path spec.kafka.affinity has been deprecated, " + "and should now be configured using spec.kafka.template.pod.affinity. " + - "This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This property is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the tolerations property at path spec.kafka.tolerations has been deprecated, " + "and should now be configured using spec.kafka.template.pod.tolerations. " + - "This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This property is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the topicOperator property at path spec.topicOperator has been deprecated, " + "and should now be configured using spec.entityOperator.topicOperator. " + - "This property is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This property is removed in API version v1beta2.")); logger.assertLoggedAtLeastOnce(lm -> lm.level() == Level.WARN - && ("Kafka resource testname in namespace testnamespace: " + + && lm.formattedMessage().matches("Reconciliation #[0-9]*\\(test\\) kind\\(namespace\\/name\\): " + "In API version v1alpha1 the object topicOperator at path spec.topicOperator has been deprecated. " + - "This object has been replaced with EntityTopicOperatorSpec and is removed in API version v1beta2.").equals(lm.formattedMessage())); + "This object has been replaced with EntityTopicOperatorSpec and is removed in API version v1beta2.")); } diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractCustomResourceOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractCustomResourceOperatorIT.java index 0794b97a94..c61ec66a0b 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractCustomResourceOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractCustomResourceOperatorIT.java @@ -13,6 +13,7 @@ import io.strimzi.api.kafka.model.status.ConditionBuilder; import io.strimzi.operator.KubernetesVersion; import io.strimzi.operator.PlatformFeaturesAvailability; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.k8s.KubeClusterResource; import io.strimzi.test.k8s.cluster.KubeCluster; import io.vertx.core.Promise; @@ -52,7 +53,7 @@ // to correctly setup the test environment before the tests. @TestInstance(TestInstance.Lifecycle.PER_CLASS) public abstract class AbstractCustomResourceOperatorIT> { - protected static final Logger log = LogManager.getLogger(AbstractCustomResourceOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(AbstractCustomResourceOperatorIT.class); protected static final String RESOURCE_NAME = "my-test-resource"; protected static final Condition READY_CONDITION = new ConditionBuilder() .withType("Ready") @@ -83,19 +84,19 @@ public void before() { client = new DefaultKubernetesClient(); if (cluster.getTestNamespace() != null && System.getenv("SKIP_TEARDOWN") == null) { - log.warn("Namespace {} is already created, going to delete it", namespace); + LOGGER.warn("Namespace {} is already created, going to delete it", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } - log.info("Creating namespace: {}", namespace); + LOGGER.info("Creating namespace: {}", namespace); kubeClient().createNamespace(namespace); cmdKubeClient().waitForResourceCreation("Namespace", namespace); - log.info("Creating CRD"); + LOGGER.info("Creating CRD"); cluster.createCustomResources(getCrd()); cluster.waitForCustomResourceDefinition(getCrdName()); - log.info("Created CRD"); + LOGGER.info("Created CRD"); } @AfterAll @@ -104,7 +105,7 @@ public void after() { String namespace = getNamespace(); if (kubeClient().getNamespace(namespace) != null && System.getenv("SKIP_TEARDOWN") == null) { - log.warn("Deleting namespace {} after tests run", namespace); + LOGGER.warn("Deleting namespace {} after tests run", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } @@ -118,7 +119,7 @@ public void testUpdateStatus(VertxTestContext context) { CrdOperator op = operator(); - log.info("Getting Kubernetes version"); + LOGGER.info("Getting Kubernetes version"); PlatformFeaturesAvailability.create(vertx, client) .onComplete(context.succeeding(pfa -> context.verify(() -> { assertThat("Kubernetes version : " + pfa.getKubernetesVersion() + " is too old", @@ -126,15 +127,15 @@ public void testUpdateStatus(VertxTestContext context) { }))) .compose(pfa -> { - log.info("Creating resource"); - return op.reconcile(namespace, resourceName, getResource(resourceName)); + LOGGER.info("Creating resource"); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)); }) .onComplete(context.succeeding()) .compose(rrCreated -> { T newStatus = getResourceWithNewReadyStatus(rrCreated.resource()); - log.info("Updating resource status"); - return op.updateStatusAsync(newStatus); + LOGGER.info("Updating resource status"); + return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus); }) .onComplete(context.succeeding()) @@ -144,8 +145,8 @@ public void testUpdateStatus(VertxTestContext context) { }))) .compose(rrModified -> { - log.info("Deleting resource"); - return op.reconcile(namespace, resourceName, null); + LOGGER.info("Deleting resource"); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null); }) .onComplete(context.succeeding(rrDeleted -> async.flag())); } @@ -166,29 +167,29 @@ public void testUpdateStatusAfterResourceDeletedThrowsKubernetesClientException( AtomicReference newStatus = new AtomicReference<>(); - log.info("Getting Kubernetes version"); + LOGGER.info("Getting Kubernetes version"); PlatformFeaturesAvailability.create(vertx, client) .onComplete(context.succeeding(pfa -> context.verify(() -> { assertThat("Kubernetes version : " + pfa.getKubernetesVersion() + " is too old", pfa.getKubernetesVersion().compareTo(KubernetesVersion.V1_16), CoreMatchers.is(not(lessThan(0)))); }))) .compose(pfa -> { - log.info("Creating resource"); - return op.reconcile(namespace, resourceName, getResource(resourceName)); + LOGGER.info("Creating resource"); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)); }) .onComplete(context.succeeding()) .compose(rr -> { - log.info("Saving resource with status change prior to deletion"); + LOGGER.info("Saving resource with status change prior to deletion"); newStatus.set(getResourceWithNewReadyStatus(op.get(namespace, resourceName))); - log.info("Deleting resource"); - return op.reconcile(namespace, resourceName, null); + LOGGER.info("Deleting resource"); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null); }) .onComplete(context.succeeding()) .compose(rrDeleted -> { - log.info("Updating resource with new status - should fail"); - return op.updateStatusAsync(newStatus.get()); + LOGGER.info("Updating resource with new status - should fail"); + return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus.get()); }) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(KubernetesClientException.class)); @@ -211,26 +212,26 @@ public void testUpdateStatusAfterResourceUpdatedThrowsKubernetesClientException( Promise updateFailed = Promise.promise(); - log.info("Getting Kubernetes version"); + LOGGER.info("Getting Kubernetes version"); PlatformFeaturesAvailability.create(vertx, client) .onComplete(context.succeeding(pfa -> context.verify(() -> { assertThat("Kubernetes version : " + pfa.getKubernetesVersion() + " is too old", pfa.getKubernetesVersion().compareTo(KubernetesVersion.V1_16), CoreMatchers.is(not(lessThan(0)))); }))) .compose(pfa -> { - log.info("Creating resource"); - return op.reconcile(namespace, resourceName, getResource(resourceName)); + LOGGER.info("Creating resource"); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)); }) .onComplete(context.succeeding()) .compose(rrCreated -> { T updated = getResourceWithModifications(rrCreated.resource()); T newStatus = getResourceWithNewReadyStatus(rrCreated.resource()); - log.info("Updating resource (mocking an update due to some other reason)"); + LOGGER.info("Updating resource (mocking an update due to some other reason)"); op.operation().inNamespace(namespace).withName(resourceName).patch(updated); - log.info("Updating resource status after underlying resource has changed"); - return op.updateStatusAsync(newStatus); + LOGGER.info("Updating resource status after underlying resource has changed"); + return op.updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, newStatus); }) .onComplete(context.failing(e -> context.verify(() -> { assertThat("Exception was not KubernetesClientException, it was : " + e.toString(), @@ -239,8 +240,8 @@ public void testUpdateStatusAfterResourceUpdatedThrowsKubernetesClientException( }))); updateFailed.future().compose(v -> { - log.info("Deleting resource"); - return op.reconcile(namespace, resourceName, null); + LOGGER.info("Deleting resource"); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null); }) .onComplete(context.succeeding(v -> async.flag())); } diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorIT.java index 1d7b5e0496..722dc85d67 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorIT.java @@ -9,6 +9,7 @@ import io.fabric8.kubernetes.client.DefaultKubernetesClient; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.test.k8s.cluster.KubeCluster; import io.vertx.core.Vertx; @@ -75,25 +76,25 @@ public void testCreateModifyDelete(VertxTestContext context) { T newResource = getOriginal(); T modResource = getModified(); - op.reconcile(resourceName, newResource) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resourceName, newResource) .onComplete(context.succeeding(rrCreate -> context.verify(() -> { T created = op.get(resourceName); assertThat("Failed to get created Resource", created, is(notNullValue())); assertResources(context, newResource, created); }))) - .compose(rr -> op.reconcile(resourceName, modResource)) + .compose(rr -> op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resourceName, modResource)) .onComplete(context.succeeding(rrModified -> context.verify(() -> { T modified = (T) op.get(resourceName); assertThat("Failed to get modified Resource", modified, is(notNullValue())); assertResources(context, modResource, modified); }))) - .compose(rr -> op.reconcile(resourceName, null)) + .compose(rr -> op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resourceName, null)) .onComplete(context.succeeding(rrDelete -> context.verify(() -> { // it seems the resource is cached for some time so we need wait for it to be null context.verify(() -> { - Util.waitFor(vertx, "resource deletion " + resourceName, "deleted", 1000, + Util.waitFor(Reconciliation.DUMMY_RECONCILIATION, vertx, "resource deletion " + resourceName, "deleted", 1000, 30_000, () -> op.get(resourceName) == null) .onComplete(del -> { assertThat(op.get(resourceName), is(nullValue())); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorTest.java index 258c1f173e..d2a9a20651 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperatorTest.java @@ -13,6 +13,7 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; @@ -110,7 +111,7 @@ public void testCreateWhenExistsWithChangeIsAPatch(VertxTestContext context) { AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(modifiedResource()) + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, modifiedResource()) .onComplete(context.succeeding(ar -> { verify(mockResource).get(); verify(mockResource).patch(any()); @@ -142,7 +143,7 @@ public void testCreateWhenExistsWithoutChangeIsNotAPatch(VertxTestContext contex AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource()) + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource()) .onComplete(context.succeeding(ar -> { verify(mockResource).get(); verify(mockResource, never()).patch(any()); @@ -174,7 +175,7 @@ public void testCreateOrUpdateThrowsWhenExistenceCheckThrows(VertxTestContext co AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource).onComplete(context.failing(e -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.failing(e -> { context.verify(() -> assertThat(e, is(ex))); async.flag(); })); @@ -200,7 +201,7 @@ public void testSuccessfulCreation(VertxTestContext context) { vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource).onComplete(context.succeeding(rr -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.succeeding(rr -> { verify(mockResource).get(); verify(mockResource).create(eq(resource)); async.flag(); @@ -228,7 +229,7 @@ public void testCreateOrUpdateThrowsWhenCreateThrows(VertxTestContext context) { AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource).onComplete(context.failing(e -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.failing(e -> { context.verify(() -> assertThat(e, is(ex))); async.flag(); })); @@ -251,7 +252,7 @@ public void testDeletionWhenResourceDoesNotExistIsANop(VertxTestContext context) AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.succeeding(rrDeleted -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.succeeding(rrDeleted -> { verify(mockResource).get(); verify(mockResource, never()).delete(); async.flag(); @@ -287,7 +288,7 @@ public void testDeletionWhenResourceExistsStillDeletes(VertxTestContext context) AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.succeeding(rrDeleted -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.succeeding(rrDeleted -> { verify(mockResource).delete(); context.verify(() -> assertThat("Watch was not closed", watchWasClosed.get(), is(true))); async.flag(); @@ -321,7 +322,7 @@ public void testReconcileThrowsWhenDeletionTimesOut(VertxTestContext context) { AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.failing(e -> context.verify(() -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); verify(mockResource).delete(); assertThat("Watch was not closed", watchWasClosed.get(), is(true)); @@ -358,7 +359,7 @@ public void testReconcileDeletionSuccessful(VertxTestContext context) { AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.succeeding(rrDeleted -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.succeeding(rrDeleted -> { verify(mockResource).delete(); context.verify(() -> assertThat("Watch was not closed", watchWasClosed.get(), is(true))); async.flag(); @@ -395,7 +396,7 @@ public void testReconcileDeletionThrowsWhenDeleteMethodThrows(VertxTestContext c AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.failing(e -> context.verify(() -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.failing(e -> context.verify(() -> { assertThat(e, is(ex)); assertThat("Watch was not closed", watchWasClosed.get(), is(true)); async.flag(); @@ -424,7 +425,7 @@ public void testReconcileDeletionThrowsWhenWatchMethodThrows(VertxTestContext co AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.failing(e -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.failing(e -> { context.verify(() -> assertThat(e, is(ex))); async.flag(); })); @@ -460,7 +461,7 @@ public void testReconcileDeletionThrowsWhenDeleteMethodReturnsFalse(VertxTestCon AbstractNonNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getName(), null).onComplete(context.failing(e -> { + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getName(), null).onComplete(context.failing(e -> { verify(mockResource).delete(); context.verify(() -> assertThat("Watch was not closed", watchWasClosed.get(), is(true))); async.flag(); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperatorTest.java index b86e2292ab..9749dc0f6e 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractReadyResourceOperatorTest.java @@ -10,6 +10,7 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxTestContext; @@ -50,7 +51,7 @@ public void testReadinessThrowsWhenResourceDoesNotExist(VertxTestContext context AbstractReadyResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.readiness(NAMESPACE, RESOURCE_NAME, 20, 100) + op.readiness(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, 20, 100) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); verify(mockResource, atLeastOnce()).get(); @@ -80,7 +81,7 @@ public void testReadinessThrowsWhenExistenceCheckThrows(VertxTestContext context AbstractReadyResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.readiness(NAMESPACE, RESOURCE_NAME, 20, 100) + op.readiness(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, 20, 100) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); verify(mockResource, never()).isReady(); @@ -132,7 +133,7 @@ public void waitUntilReadySuccessful(VertxTestContext context, int unreadyCount) AbstractReadyResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.readiness(NAMESPACE, RESOURCE_NAME, 20, 5_000) + op.readiness(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, 20, 5_000) .onComplete(context.succeeding(v -> { verify(mockResource, times(unreadyCount + 1)).isReady(); async.flag(); @@ -159,7 +160,7 @@ public void testWaitUntilReadyUnsuccessful(VertxTestContext context) { AbstractReadyResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.readiness(NAMESPACE, RESOURCE_NAME, 20, 100) + op.readiness(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, 20, 100) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); verify(mockResource, atLeastOnce()).get(); @@ -190,7 +191,7 @@ public void testWaitUntilReadyThrows(VertxTestContext context) { AbstractReadyResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.readiness(NAMESPACE, RESOURCE_NAME, 20, 100) + op.readiness(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, 20, 100) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, instanceOf(TimeoutException.class)); async.flag(); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorIT.java index 0dd4154610..ae7ef71d9d 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorIT.java @@ -9,6 +9,7 @@ import io.fabric8.kubernetes.client.DefaultKubernetesClient; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.test.k8s.KubeClusterResource; import io.strimzi.test.k8s.cluster.KubeCluster; @@ -45,7 +46,7 @@ public abstract class AbstractResourceOperatorIT, R extends Resource> { - protected static final Logger log = LogManager.getLogger(AbstractResourceOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(AbstractResourceOperatorIT.class); public static final String RESOURCE_NAME = "my-test-resource"; protected String resourceName; protected static Vertx vertx; @@ -69,12 +70,12 @@ public static void before() { client = new DefaultKubernetesClient(); if (cluster.getTestNamespace() != null && System.getenv("SKIP_TEARDOWN") == null) { - log.warn("Namespace {} is already created, going to delete it", namespace); + LOGGER.warn("Namespace {} is already created, going to delete it", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } - log.info("Creating namespace: {}", namespace); + LOGGER.info("Creating namespace: {}", namespace); kubeClient().createNamespace(namespace); cmdKubeClient().waitForResourceCreation("Namespace", namespace); } @@ -83,7 +84,7 @@ public static void before() { public static void after() { vertx.close(); if (kubeClient().getNamespace(namespace) != null && System.getenv("SKIP_TEARDOWN") == null) { - log.warn("Deleting namespace {} after tests run", namespace); + LOGGER.warn("Deleting namespace {} after tests run", namespace); kubeClient().deleteNamespace(namespace); cmdKubeClient().waitForResourceDeletion("Namespace", namespace); } @@ -102,25 +103,25 @@ public void testCreateModifyDelete(VertxTestContext context) { T newResource = getOriginal(); T modResource = getModified(); - op.reconcile(namespace, resourceName, newResource) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, newResource) .onComplete(context.succeeding(rrCreated -> { T created = op.get(namespace, resourceName); context.verify(() -> assertThat(created, is(notNullValue()))); assertResources(context, newResource, created); })) - .compose(rr -> op.reconcile(namespace, resourceName, modResource)) + .compose(rr -> op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, modResource)) .onComplete(context.succeeding(rrModified -> { T modified = op.get(namespace, resourceName); context.verify(() -> assertThat(modified, is(notNullValue()))); assertResources(context, modResource, modified); })) - .compose(rr -> op.reconcile(namespace, resourceName, null)) + .compose(rr -> op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null)) .onComplete(context.succeeding(rrDeleted -> { // it seems the resource is cached for some time so we need wait for it to be null context.verify(() -> { - Util.waitFor(vertx, "resource deletion " + resourceName, "deleted", 1000, + Util.waitFor(Reconciliation.DUMMY_RECONCILIATION, vertx, "resource deletion " + resourceName, "deleted", 1000, 30_000, () -> op.get(namespace, resourceName) == null) .onComplete(del -> { assertThat(op.get(namespace, resourceName), is(nullValue())); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorTest.java index 4d4540e699..cad90ae90c 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/AbstractResourceOperatorTest.java @@ -14,6 +14,7 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; @@ -112,7 +113,7 @@ public void testCreateWhenExistsWithChangeIsAPatch(VertxTestContext context, boo AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(modifiedResource()).onComplete(context.succeeding(rr -> context.verify(() -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, modifiedResource()).onComplete(context.succeeding(rr -> context.verify(() -> { verify(mockResource).get(); verify(mockResource).patch(any()); verify(mockResource, never()).create(any()); @@ -147,7 +148,7 @@ public void testCreateWhenExistsWithoutChangeIsNotAPatch(VertxTestContext contex AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource()).onComplete(context.succeeding(rr -> context.verify(() -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource()).onComplete(context.succeeding(rr -> context.verify(() -> { verify(mockResource).get(); verify(mockResource, never()).patch(any()); verify(mockResource, never()).create(any()); @@ -178,7 +179,7 @@ public void testExistenceCheckThrows(VertxTestContext context) { AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource).onComplete(context.failing(e -> context.verify(() -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.failing(e -> context.verify(() -> { assertThat(e, is(ex)); async.flag(); }))); @@ -203,7 +204,7 @@ public void testSuccessfulCreation(VertxTestContext context) { AbstractResourceOperator op = createResourceOperationsWithMockedReadiness(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource).onComplete(context.succeeding(rr -> context.verify(() -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.succeeding(rr -> context.verify(() -> { verify(mockResource).get(); verify(mockResource).create(eq(resource)); async.flag(); @@ -231,7 +232,7 @@ public void testCreateOrUpdateThrowsWhenCreateThrows(VertxTestContext context) { AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource).onComplete(context.failing(e -> { + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.failing(e -> { context.verify(() -> assertThat(e, is(ex))); async.flag(); })); @@ -254,7 +255,7 @@ public void testDeleteWhenResourceDoesNotExistIsANop(VertxTestContext context) { AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) .onComplete(context.succeeding(rr -> context.verify(() -> { verify(mockResource).get(); verify(mockResource, never()).delete(); @@ -295,7 +296,7 @@ public void testReconcileDeleteWhenResourceExistsStillDeletes(VertxTestContext c AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) .onComplete(context.succeeding(rr -> context.verify(() -> { verify(mockDeletable).delete(); async.flag(); @@ -335,7 +336,7 @@ public void testReconcileDeletionSuccessfullyDeletes(VertxTestContext context) { AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) .onComplete(context.succeeding(rr -> context.verify(() -> { verify(mockDeletable).delete(); async.flag(); @@ -378,7 +379,7 @@ public void testReconcileDeleteThrowsWhenDeletionThrows(VertxTestContext context AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e, is(ex)); async.flag(); @@ -419,7 +420,7 @@ public void testReconcileDeleteThrowsWhenDeletionReturnsFalse(VertxTestContext c AbstractResourceOperator op = createResourceOperations(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.reconcile(resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) .onComplete(context.failing(e -> context.verify(() -> { assertThat(e.getMessage(), endsWith("could not be deleted (returned false)")); async.flag(); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaBridgeCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaBridgeCrdOperatorIT.java index e0c19fcb0a..6d23fada67 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaBridgeCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaBridgeCrdOperatorIT.java @@ -28,7 +28,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaBridgeCrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaBridgeCrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaBridgeCrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectCrdOperatorIT.java index ecc9fd7227..21d0f18d9d 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectCrdOperatorIT.java @@ -26,7 +26,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaConnectCrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaConnectCrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaConnectCrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectS2ICrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectS2ICrdOperatorIT.java index cad0fa8c52..3acab04d5b 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectS2ICrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectS2ICrdOperatorIT.java @@ -26,7 +26,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaConnectS2ICrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaConnectS2ICrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaConnectS2ICrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectorCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectorCrdOperatorIT.java index bea513f597..f66178895f 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectorCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaConnectorCrdOperatorIT.java @@ -26,7 +26,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaConnectorCrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaConnectorCrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaConnectorCrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorIT.java index db0bd24199..262b785f47 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorIT.java @@ -28,7 +28,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaCrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaCrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaCrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorTest.java index 91b5f8f7f2..b6bbb8af3a 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaCrdOperatorTest.java @@ -14,6 +14,7 @@ import io.strimzi.api.kafka.model.listener.KafkaListenersBuilder; import io.strimzi.api.kafka.model.listener.arraylistener.ArrayOrObjectKafkaListeners; import io.strimzi.api.kafka.model.status.ConditionBuilder; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxTestContext; @@ -110,7 +111,7 @@ public void testUpdateStatusAsync(VertxTestContext context) throws IOException { Checkpoint async = context.checkpoint(); createResourceOperations(vertx, mockClient) - .updateStatusAsync(resource()) + .updateStatusAsync(Reconciliation.DUMMY_RECONCILIATION, resource()) .onComplete(context.succeeding(kafka -> async.flag())); } } diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMaker2CrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMaker2CrdOperatorIT.java index 85746e5fc3..0577ddfd70 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMaker2CrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMaker2CrdOperatorIT.java @@ -26,7 +26,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaMirrorMaker2CrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaMirrorMaker2CrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaMirrorMaker2CrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMakerCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMakerCrdOperatorIT.java index 20c76e8517..c2a8655868 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMakerCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaMirrorMakerCrdOperatorIT.java @@ -27,7 +27,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaMirrorMakerCrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaMirrorMakerCrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaMirrorMakerCrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaUserCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaUserCrdOperatorIT.java index d2d5ceac24..d16c2bb71f 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaUserCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/KafkaUserCrdOperatorIT.java @@ -26,7 +26,7 @@ */ @ExtendWith(VertxExtension.class) public class KafkaUserCrdOperatorIT extends AbstractCustomResourceOperatorIT { - protected static final Logger log = LogManager.getLogger(KafkaUserCrdOperatorIT.class); + protected static final Logger LOGGER = LogManager.getLogger(KafkaUserCrdOperatorIT.class); @Override protected CrdOperator operator() { diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorTest.java index febe39b345..aaffb95b4a 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorTest.java @@ -11,6 +11,7 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.PodResource; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.mockkube.MockKube; import io.vertx.core.Vertx; @@ -38,13 +39,13 @@ public void testCreateReadUpdate(VertxTestContext context) { context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY), is(emptyList()))); Checkpoint async = context.checkpoint(1); - pr.createOrUpdate(resource()).onComplete(createResult -> { + pr.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource()).onComplete(createResult -> { context.verify(() -> assertThat(createResult.succeeded(), is(true))); context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() .map(p -> p.getMetadata().getName()) .collect(Collectors.toList()), is(singletonList(RESOURCE_NAME)))); - pr.reconcile(NAMESPACE, RESOURCE_NAME, null).onComplete(deleteResult -> { + pr.reconcile(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, null).onComplete(deleteResult -> { context.verify(() -> assertThat(deleteResult.succeeded(), is(true))); async.flag(); }); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorIT.java index 48f20a7867..69bda11016 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorIT.java @@ -10,6 +10,7 @@ import io.fabric8.kubernetes.api.model.ServiceAccountList; import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; @@ -67,10 +68,10 @@ public void testCreateModifyDelete(VertxTestContext context) { List secrets = new ArrayList<>(); - op.reconcile(namespace, resourceName, newResource) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, newResource) .compose(rr -> { // Wait for the service account to be created and secrets added - return Util.waitFor(vertx, "token secrets created for service account " + resourceName, "has tokens", 1000, + return Util.waitFor(Reconciliation.DUMMY_RECONCILIATION, vertx, "token secrets created for service account " + resourceName, "has tokens", 1000, 30_000, () -> !op.get(namespace, resourceName).getSecrets().isEmpty()); }) .onComplete(context.succeeding(rrCreated -> { @@ -80,7 +81,7 @@ public void testCreateModifyDelete(VertxTestContext context) { assertResources(context, newResource, created); secrets.addAll(created.getSecrets()); })) - .compose(rr -> op.reconcile(namespace, resourceName, modResource)) + .compose(rr -> op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, modResource)) .onComplete(context.succeeding(rrModified -> { ServiceAccount modified = op.get(namespace, resourceName); @@ -88,10 +89,10 @@ public void testCreateModifyDelete(VertxTestContext context) { assertResources(context, modResource, modified); context.verify(() -> assertThat(modified.getSecrets(), is(secrets))); })) - .compose(rr -> op.reconcile(namespace, resourceName, null)) + .compose(rr -> op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null)) .onComplete(context.succeeding(rrDeleted -> { // it seems the resource is cached for some time so we need wait for it to be null - context.verify(() -> Util.waitFor(vertx, "resource deletion " + resourceName, "deleted", 1000, + context.verify(() -> Util.waitFor(Reconciliation.DUMMY_RECONCILIATION, vertx, "resource deletion " + resourceName, "deleted", 1000, 30_000, () -> op.get(namespace, resourceName) == null) .onComplete(del -> { assertThat(op.get(namespace, resourceName), Matchers.is(nullValue())); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorTest.java index 1cb51e21e1..abda88e2ab 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperatorTest.java @@ -14,6 +14,7 @@ import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxTestContext; @@ -107,7 +108,7 @@ public void testCreateWhenExistsWithChangeIsAPatch(VertxTestContext context, boo ServiceAccountOperator op = new ServiceAccountOperator(vertx, mockClient); Checkpoint async = context.checkpoint(); - op.createOrUpdate(resource) + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource) .onComplete(context.succeeding(rr -> { context.verify(() -> assertThat(rr, instanceOf(ReconcileResult.Noop.class))); verify(mockResource).get(); @@ -162,7 +163,7 @@ public void testSecretsPatching(VertxTestContext context) { ServiceAccountOperator op = new ServiceAccountOperator(vertx, mockClient, true); Checkpoint async = context.checkpoint(); - op.reconcile(NAMESPACE, RESOURCE_NAME, desired) + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, desired) .onComplete(context.succeeding(rr -> { verify(mockResource, times(1)).patch(any()); diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceOperatorTest.java index 8ac3abe125..e0a937d2ec 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/ServiceOperatorTest.java @@ -12,6 +12,7 @@ import io.fabric8.kubernetes.client.KubernetesClient; import io.fabric8.kubernetes.client.dsl.MixedOperation; import io.fabric8.kubernetes.client.dsl.ServiceResource; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import org.junit.jupiter.api.Test; @@ -157,7 +158,7 @@ void testCattleAnnotationPatching() { .build(); ServiceOperator op = new ServiceOperator(vertx, client); - op.internalPatch(NAMESPACE, RESOURCE_NAME, current, desired); + op.internalPatch(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, RESOURCE_NAME, current, desired); assertThat(desired.getMetadata().getAnnotations().get("field.cattle.io~1publicEndpoints"), equalTo("foo")); assertThat(desired.getMetadata().getAnnotations().get("cattle.io/test"), equalTo("bar")); diff --git a/operator-common/src/test/java/io/strimzi/test/logging/TestLogger.java b/operator-common/src/test/java/io/strimzi/test/logging/TestLogger.java index 9ad87c7fa2..18da4dc6aa 100644 --- a/operator-common/src/test/java/io/strimzi/test/logging/TestLogger.java +++ b/operator-common/src/test/java/io/strimzi/test/logging/TestLogger.java @@ -4,12 +4,14 @@ */ package io.strimzi.test.logging; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Marker; -import org.apache.logging.log4j.core.Logger; -import org.apache.logging.log4j.message.FormattedMessageFactory; -import org.apache.logging.log4j.message.Message; import org.apache.logging.log4j.spi.AbstractLogger; +import org.apache.logging.log4j.spi.ExtendedLoggerWrapper; import java.util.ArrayList; import java.util.List; @@ -18,17 +20,17 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; -public class TestLogger extends AbstractLogger { +public class TestLogger extends ReconciliationLogger { public static class LoggedMessage { private final Level level; private final Marker marker; private final String formattedMessage; private final Throwable throwable; - LoggedMessage(Level level, Marker marker, Message message, Throwable throwable) { + public LoggedMessage(Level level, Marker marker, String msg, Throwable throwable) { this.level = level; this.marker = marker; - this.formattedMessage = message.getFormattedMessage(); + this.formattedMessage = msg; this.throwable = throwable; } @@ -49,14 +51,17 @@ public Throwable throwable() { } } - private final Logger delegate; - private List loggedMessages = new ArrayList<>(); + private TestLogger(final Logger logger) { + super(new ExtendedLoggerWrapper((AbstractLogger) logger, logger.getName(), logger.getMessageFactory())); + } - public TestLogger(Logger delegate) { - super("test", new FormattedMessageFactory()); - this.delegate = delegate; + public static TestLogger create(final Class loggerName) { + final Logger wrapped = LogManager.getLogger(loggerName); + return new TestLogger(wrapped); } + private List loggedMessages = new ArrayList<>(); + public List getLoggedMessages() { return loggedMessages; } @@ -70,95 +75,7 @@ public void assertNotLogged(Predicate test) { } @Override - public Level getLevel() { - return Level.TRACE; - } - - @Override - public boolean isEnabled(Level level, Marker marker, Message message, Throwable throwable) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, CharSequence charSequence, Throwable throwable) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, Object o, Throwable throwable) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Throwable throwable) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object... objects) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3, Object o4) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3, Object o4, Object o5) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3, Object o4, Object o5, Object o6) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3, Object o4, Object o5, Object o6, Object o7) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3, Object o4, Object o5, Object o6, Object o7, Object o8) { - return true; - } - - @Override - public boolean isEnabled(Level level, Marker marker, String s, Object o, Object o1, Object o2, Object o3, Object o4, Object o5, Object o6, Object o7, Object o8, Object o9) { - return true; - } - - @Override - public void logMessage(String s, Level level, Marker marker, Message message, Throwable throwable) { - if (delegate != null) { - delegate.logMessage(s, level, marker, message, throwable); - } - loggedMessages.add(new LoggedMessage(level, marker, message, throwable)); + public void warnCr(Reconciliation reconciliation, String msg) { + loggedMessages.add(new LoggedMessage(Level.WARN, null, reconciliation.toString() + ": " + msg, null)); } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaRebalanceUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaRebalanceUtils.java index 7279c5bf5e..f2905ee91b 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaRebalanceUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kafkaUtils/KafkaRebalanceUtils.java @@ -10,13 +10,13 @@ import io.strimzi.api.kafka.model.balancing.KafkaRebalanceState; import io.strimzi.api.kafka.model.status.KafkaRebalanceStatus; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.systemtest.Constants; import io.strimzi.systemtest.resources.ResourceManager; import io.strimzi.systemtest.resources.ResourceOperation; import io.strimzi.systemtest.resources.crd.KafkaRebalanceResource; import io.strimzi.test.TestUtils; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Arrays; import java.util.List; @@ -26,11 +26,11 @@ public class KafkaRebalanceUtils { - private static final Logger LOGGER = LogManager.getLogger(KafkaRebalanceUtils.class); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaRebalanceUtils.class); private KafkaRebalanceUtils() {} - private static Condition rebalanceStateCondition(String namespaceName, String resourceName) { + private static Condition rebalanceStateCondition(Reconciliation reconciliation, String namespaceName, String resourceName) { List statusConditions = KafkaRebalanceResource.kafkaRebalanceClient().inNamespace(namespaceName) .withName(resourceName).get().getStatus().getConditions().stream() @@ -41,10 +41,10 @@ private static Condition rebalanceStateCondition(String namespaceName, String re if (statusConditions.size() == 1) { return statusConditions.get(0); } else if (statusConditions.size() > 1) { - LOGGER.warn("Multiple KafkaRebalance State Conditions were present in the KafkaRebalance status"); + LOGGER.warnCr(reconciliation, "Multiple KafkaRebalance State Conditions were present in the KafkaRebalance status"); throw new RuntimeException("Multiple KafkaRebalance State Conditions were present in the KafkaRebalance status"); } else { - LOGGER.warn("No KafkaRebalance State Conditions were present in the KafkaRebalance status"); + LOGGER.warnCr(reconciliation, "No KafkaRebalance State Conditions were present in the KafkaRebalance status"); throw new RuntimeException("No KafkaRebalance State Conditions were present in the KafkaRebalance status"); } } @@ -58,46 +58,46 @@ public static boolean waitForKafkaRebalanceCustomResourceState(String resourceNa return waitForKafkaRebalanceCustomResourceState(kubeClient().getNamespace(), resourceName, state); } - public static String annotateKafkaRebalanceResource(String namespaceName, String resourceName, KafkaRebalanceAnnotation annotation) { - LOGGER.info("Annotating KafkaRebalance:{} with annotation {}", resourceName, annotation.toString()); + public static String annotateKafkaRebalanceResource(Reconciliation reconciliation, String namespaceName, String resourceName, KafkaRebalanceAnnotation annotation) { + LOGGER.infoCr(reconciliation, "Annotating KafkaRebalance:{} with annotation {}", resourceName, annotation.toString()); return ResourceManager.cmdKubeClient().namespace(namespaceName) .execInCurrentNamespace("annotate", "kafkarebalance", resourceName, Annotations.ANNO_STRIMZI_IO_REBALANCE + "=" + annotation.toString()) .out() .trim(); } - public static String annotateKafkaRebalanceResource(String resourceName, KafkaRebalanceAnnotation annotation) { - return annotateKafkaRebalanceResource(kubeClient().getNamespace(), resourceName, annotation); + public static String annotateKafkaRebalanceResource(Reconciliation reconciliation, String resourceName, KafkaRebalanceAnnotation annotation) { + return annotateKafkaRebalanceResource(reconciliation, kubeClient().getNamespace(), resourceName, annotation); } - public static void doRebalancingProcess(String namespaceName, String rebalanceName) { + public static void doRebalancingProcess(Reconciliation reconciliation, String namespaceName, String rebalanceName) { // it can sometimes happen that KafkaRebalance is already in the ProposalReady state -> race condition prevention - if (!rebalanceStateCondition(namespaceName, rebalanceName).getType().equals(KafkaRebalanceState.ProposalReady.name())) { - LOGGER.info("Verifying that KafkaRebalance resource is in {} state", KafkaRebalanceState.PendingProposal); + if (!rebalanceStateCondition(reconciliation, namespaceName, rebalanceName).getType().equals(KafkaRebalanceState.ProposalReady.name())) { + LOGGER.infoCr(reconciliation, "Verifying that KafkaRebalance resource is in {} state", KafkaRebalanceState.PendingProposal); waitForKafkaRebalanceCustomResourceState(namespaceName, rebalanceName, KafkaRebalanceState.PendingProposal); - LOGGER.info("Verifying that KafkaRebalance resource is in {} state", KafkaRebalanceState.ProposalReady); + LOGGER.infoCr(reconciliation, "Verifying that KafkaRebalance resource is in {} state", KafkaRebalanceState.ProposalReady); waitForKafkaRebalanceCustomResourceState(namespaceName, rebalanceName, KafkaRebalanceState.ProposalReady); } - LOGGER.info("Triggering the rebalance with annotation {} of KafkaRebalance resource", "strimzi.io/rebalance=approve"); + LOGGER.infoCr(reconciliation, "Triggering the rebalance with annotation {} of KafkaRebalance resource", "strimzi.io/rebalance=approve"); - String response = annotateKafkaRebalanceResource(namespaceName, rebalanceName, KafkaRebalanceAnnotation.approve); + String response = annotateKafkaRebalanceResource(reconciliation, namespaceName, rebalanceName, KafkaRebalanceAnnotation.approve); - LOGGER.info("Response from the annotation process {}", response); + LOGGER.infoCr(reconciliation, "Response from the annotation process {}", response); - LOGGER.info("Verifying that annotation triggers the {} state", KafkaRebalanceState.Rebalancing); + LOGGER.infoCr(reconciliation, "Verifying that annotation triggers the {} state", KafkaRebalanceState.Rebalancing); waitForKafkaRebalanceCustomResourceState(namespaceName, rebalanceName, KafkaRebalanceState.Rebalancing); - LOGGER.info("Verifying that KafkaRebalance is in the {} state", KafkaRebalanceState.Ready); + LOGGER.infoCr(reconciliation, "Verifying that KafkaRebalance is in the {} state", KafkaRebalanceState.Ready); waitForKafkaRebalanceCustomResourceState(namespaceName, rebalanceName, KafkaRebalanceState.Ready); } - public static void waitForRebalanceStatusStability(String namespaceName, String resourceName) { + public static void waitForRebalanceStatusStability(Reconciliation reconciliation, String namespaceName, String resourceName) { int[] stableCounter = {0}; KafkaRebalanceStatus oldStatus = KafkaRebalanceResource.kafkaRebalanceClient().inNamespace(namespaceName).withName(resourceName).get().getStatus(); @@ -106,20 +106,20 @@ public static void waitForRebalanceStatusStability(String namespaceName, String if (KafkaRebalanceResource.kafkaRebalanceClient().inNamespace(namespaceName).withName(resourceName).get().getStatus().equals(oldStatus)) { stableCounter[0]++; if (stableCounter[0] == Constants.GLOBAL_STABILITY_OFFSET_COUNT) { - LOGGER.info("KafkaRebalance status is stable for {} polls intervals", stableCounter[0]); + LOGGER.infoCr(reconciliation, "KafkaRebalance status is stable for {} polls intervals", stableCounter[0]); return true; } } else { - LOGGER.info("KafkaRebalance status is not stable. Going to set the counter to zero."); + LOGGER.infoCr(reconciliation, "KafkaRebalance status is not stable. Going to set the counter to zero."); stableCounter[0] = 0; return false; } - LOGGER.info("KafkaRebalance status gonna be stable in {} polls", Constants.GLOBAL_STABILITY_OFFSET_COUNT - stableCounter[0]); + LOGGER.infoCr(reconciliation, "KafkaRebalance status gonna be stable in {} polls", Constants.GLOBAL_STABILITY_OFFSET_COUNT - stableCounter[0]); return false; }); } - public static void waitForRebalanceStatusStability(String resourceName) { - waitForRebalanceStatusStability(kubeClient().getNamespace(), resourceName); + public static void waitForRebalanceStatusStability(Reconciliation reconciliation, String resourceName) { + waitForRebalanceStatusStability(reconciliation, kubeClient().getNamespace(), resourceName); } } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java index bd7aea4e7f..f4004e13ec 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/cruisecontrol/CruiseControlST.java @@ -5,12 +5,14 @@ package io.strimzi.systemtest.cruisecontrol; import io.strimzi.api.kafka.model.CruiseControlResources; +import io.strimzi.api.kafka.model.KafkaRebalance; import io.strimzi.api.kafka.model.KafkaTopicSpec; import io.strimzi.api.kafka.model.status.KafkaRebalanceStatus; import io.strimzi.api.kafka.model.status.KafkaStatus; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.systemtest.AbstractST; import io.strimzi.api.kafka.model.balancing.KafkaRebalanceAnnotation; import io.strimzi.api.kafka.model.balancing.KafkaRebalanceState; -import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Constants; import io.strimzi.systemtest.annotations.IsolatedTest; import io.strimzi.systemtest.annotations.ParallelNamespaceTest; @@ -105,14 +107,14 @@ void testCruiseControlWithRebalanceResourceAndRefreshAnnotation(ExtensionContext resourceManager.createResource(extensionContext, KafkaTemplates.kafkaWithCruiseControl(clusterName, 3, 3).build()); resourceManager.createResource(extensionContext, KafkaRebalanceTemplates.kafkaRebalance(clusterName).build()); - KafkaRebalanceUtils.doRebalancingProcess(NAMESPACE, clusterName); + KafkaRebalanceUtils.doRebalancingProcess(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, NAMESPACE, clusterName), NAMESPACE, clusterName); LOGGER.info("Annotating KafkaRebalance: {} with 'refresh' anno", clusterName); - KafkaRebalanceUtils.annotateKafkaRebalanceResource(clusterName, KafkaRebalanceAnnotation.refresh); + KafkaRebalanceUtils.annotateKafkaRebalanceResource(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, NAMESPACE, clusterName), clusterName, KafkaRebalanceAnnotation.refresh); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(clusterName, KafkaRebalanceState.ProposalReady); LOGGER.info("Trying rebalancing process again"); - KafkaRebalanceUtils.doRebalancingProcess(NAMESPACE, clusterName); + KafkaRebalanceUtils.doRebalancingProcess(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, NAMESPACE, clusterName), NAMESPACE, clusterName); } @ParallelNamespaceTest @@ -169,7 +171,7 @@ void testCruiseControlTopicExclusion(ExtensionContext extensionContext) { assertThat(kafkaRebalanceStatus.getOptimizationResult().get("excludedTopics").toString(), containsString(excludedTopic2)); assertThat(kafkaRebalanceStatus.getOptimizationResult().get("excludedTopics").toString(), not(containsString(includedTopic))); - KafkaRebalanceUtils.annotateKafkaRebalanceResource(namespaceName, clusterName, KafkaRebalanceAnnotation.approve); + KafkaRebalanceUtils.annotateKafkaRebalanceResource(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, NAMESPACE, clusterName), namespaceName, clusterName, KafkaRebalanceAnnotation.approve); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.Ready); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java index f66a0aba17..a0776848eb 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/MultipleClusterOperatorsST.java @@ -7,8 +7,10 @@ import io.fabric8.kubernetes.api.model.EnvVar; import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding; import io.strimzi.api.kafka.model.KafkaConnect; +import io.strimzi.api.kafka.model.KafkaRebalance; import io.strimzi.api.kafka.model.KafkaResources; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Constants; @@ -175,7 +177,7 @@ void testKafkaCCAndRebalanceWithMultipleCOs(ExtensionContext extensionContext) { assertThat(StatefulSetUtils.ssSnapshot(KafkaResources.kafkaStatefulSetName(clusterName)).size(), is(scaleTo)); - KafkaRebalanceUtils.doRebalancingProcess(DEFAULT_NAMESPACE, clusterName); + KafkaRebalanceUtils.doRebalancingProcess(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, SECOND_NAMESPACE, clusterName), DEFAULT_NAMESPACE, clusterName); } void deployCOInNamespace(ExtensionContext extensionContext, String coName, String coNamespace, EnvVar selectorEnv, boolean multipleNamespaces) { diff --git a/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java b/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java index d9b4a2ac51..dc68eac41d 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/operators/ReconciliationST.java @@ -6,10 +6,12 @@ import io.strimzi.api.kafka.model.KafkaConnect; import io.strimzi.api.kafka.model.KafkaConnectResources; +import io.strimzi.api.kafka.model.KafkaRebalance; import io.strimzi.api.kafka.model.KafkaResources; import io.strimzi.api.kafka.model.balancing.KafkaRebalanceAnnotation; import io.strimzi.api.kafka.model.balancing.KafkaRebalanceState; import io.strimzi.operator.common.Annotations; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.systemtest.AbstractST; import io.strimzi.systemtest.Constants; @@ -159,11 +161,11 @@ void testPauseReconciliationInKafkaRebalanceAndTopic(ExtensionContext extensionC KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.ReconciliationPaused); - KafkaRebalanceUtils.annotateKafkaRebalanceResource(namespaceName, clusterName, KafkaRebalanceAnnotation.approve); + KafkaRebalanceUtils.annotateKafkaRebalanceResource(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, namespaceName, clusterName), namespaceName, clusterName, KafkaRebalanceAnnotation.approve); // unfortunately we don't have any option to check, if something is changed when reconciliations are paused // so we will check stability of status - KafkaRebalanceUtils.waitForRebalanceStatusStability(namespaceName, clusterName); + KafkaRebalanceUtils.waitForRebalanceStatusStability(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, namespaceName, clusterName), namespaceName, clusterName); LOGGER.info("Setting annotation to \"false\" and waiting for KafkaRebalance to be in {} state", KafkaRebalanceState.Ready); KafkaRebalanceResource.replaceKafkaRebalanceResourceInSpecificNamespace(clusterName, @@ -172,7 +174,7 @@ void testPauseReconciliationInKafkaRebalanceAndTopic(ExtensionContext extensionC KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.ProposalReady); // because approve annotation wasn't reflected, approving again - KafkaRebalanceUtils.annotateKafkaRebalanceResource(namespaceName, clusterName, KafkaRebalanceAnnotation.approve); + KafkaRebalanceUtils.annotateKafkaRebalanceResource(new Reconciliation("test", KafkaRebalance.RESOURCE_KIND, namespaceName, clusterName), namespaceName, clusterName, KafkaRebalanceAnnotation.approve); KafkaRebalanceUtils.waitForKafkaRebalanceCustomResourceState(namespaceName, clusterName, KafkaRebalanceState.Ready); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java index 050ea92193..2ae53d37b5 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/security/oauth/OauthAuthorizationST.java @@ -417,7 +417,6 @@ void testSuperUserWithOauthAuthorization(ExtensionContext extensionContext) { * 6) Try to send messages to topic with `a-` -> we should still be able to sent messages, because we didn't changed the permissions * 6) Change the permissions back and check that the messages are correctly sent * - * * The re-authentication can be seen in the log of team-a-producer pod. */ @IsolatedTest("Modification of shared Kafka cluster") diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/K8s.java b/topic-operator/src/main/java/io/strimzi/operator/topic/K8s.java index ce690700c4..7784f735e7 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/K8s.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/K8s.java @@ -6,6 +6,7 @@ import io.fabric8.kubernetes.api.model.Event; import io.strimzi.api.kafka.model.KafkaTopic; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.AsyncResult; import io.vertx.core.Future; @@ -15,6 +16,7 @@ public interface K8s { /** * Asynchronously create the given resource. + * * @param topicResource The resource to be created. * @return A future which completes when the topic has been created. */ @@ -22,6 +24,7 @@ public interface K8s { /** * Asynchronously update the given resource. + * * @param topicResource The topic. * @return A future which completes when the topic has been updated. */ @@ -29,17 +32,21 @@ public interface K8s { /** * Asynchronously update the given resource's status. + * + * @param reconciliation The reconciliation * @param topicResource The topic. * @return A future which completes when the topic's status has been updated. */ - Future updateResourceStatus(KafkaTopic topicResource); + Future updateResourceStatus(Reconciliation reconciliation, KafkaTopic topicResource); /** * Asynchronously delete the given resource. + * + * @param reconciliation The reconciliation * @param resourceName The name of the resource to be deleted. * @return A future which completes when the topic has been deleted. */ - Future deleteResource(ResourceName resourceName); + Future deleteResource(Reconciliation reconciliation, ResourceName resourceName); /** * Asynchronously list the resources. @@ -51,6 +58,7 @@ public interface K8s { * Get the resource with the given name, invoking the given handler with the result. * If a resource with the given name does not exist, the handler will be called with * a null {@link AsyncResult#result() result()}. + * * @param resourceName The name of the resource to get. * @return A future which completes with the topic */ @@ -58,6 +66,7 @@ public interface K8s { /** * Create an event. + * * @param event The event. * @return A future which completes when the event has been created. */ diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/K8sImpl.java b/topic-operator/src/main/java/io/strimzi/operator/topic/K8sImpl.java index 3b3d11af77..2183bbbe5e 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/K8sImpl.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/K8sImpl.java @@ -12,6 +12,7 @@ import io.fabric8.kubernetes.client.dsl.Resource; import io.strimzi.api.kafka.KafkaTopicList; import io.strimzi.api.kafka.model.KafkaTopic; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.operator.resource.CrdOperator; import io.vertx.core.Future; @@ -79,12 +80,12 @@ public Future updateResource(KafkaTopic topicResource) { } @Override - public Future updateResourceStatus(KafkaTopic topicResource) { - return crdOperator.updateStatusAsync(topicResource); + public Future updateResourceStatus(Reconciliation ctx, KafkaTopic topicResource) { + return crdOperator.updateStatusAsync(ctx, topicResource); } @Override - public Future deleteResource(ResourceName resourceName) { + public Future deleteResource(Reconciliation reconciliation, ResourceName resourceName) { Promise handler = Promise.promise(); vertx.executeBlocking(future -> { try { @@ -93,7 +94,7 @@ public Future deleteResource(ResourceName resourceName) { LOGGER.warn("KafkaTopic {} could not be deleted, since it doesn't seem to exist", resourceName.toString()); future.complete(); } else { - Util.waitFor(vertx, "sync resource deletion " + resourceName, "deleted", 1000, Long.MAX_VALUE, () -> { + Util.waitFor(reconciliation, vertx, "sync resource deletion " + resourceName, "deleted", 1000, Long.MAX_VALUE, () -> { KafkaTopic kafkaTopic = operation().inNamespace(namespace).withName(resourceName.toString()).get(); boolean notExists = kafkaTopic == null; LOGGER.debug("KafkaTopic {} deleted {}", resourceName.toString(), notExists); diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/K8sTopicWatcher.java b/topic-operator/src/main/java/io/strimzi/operator/topic/K8sTopicWatcher.java index 04666e45ad..59be86b046 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/K8sTopicWatcher.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/K8sTopicWatcher.java @@ -8,19 +8,18 @@ import io.fabric8.kubernetes.client.Watcher; import io.fabric8.kubernetes.client.WatcherException; import io.strimzi.api.kafka.model.KafkaTopic; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Annotations; import io.vertx.core.AsyncResult; import io.vertx.core.Future; import io.vertx.core.Handler; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Map; import java.util.Objects; class K8sTopicWatcher implements Watcher { - private final static Logger LOGGER = LogManager.getLogger(K8sTopicWatcher.class); + private final static ReconciliationLogger LOGGER = ReconciliationLogger.create(K8sTopicWatcher.class); private final Future initReconcileFuture; private final Runnable onHttpGoneTask; @@ -41,11 +40,11 @@ public void eventReceived(Action action, KafkaTopic kafkaTopic) { String name = metadata.getName(); String kind = kafkaTopic.getKind(); if (!initReconcileFuture.isComplete()) { - LOGGER.debug("Ignoring initial event for {} {} during initial reconcile", kind, name); + LOGGER.debugCr(logContext.toReconciliation(), "Ignoring initial event for {} {} during initial reconcile", kind, name); return; } if (action.equals(Action.ERROR)) { - LOGGER.error("{}: Watch received action=ERROR for {} {} {}", logContext, kind, name, kafkaTopic); + LOGGER.errorCr(logContext.toReconciliation(), "Watch received action=ERROR for {} {} {}", kind, name, kafkaTopic); } else { PauseAnnotationChanges pauseAnnotationChanges = pausedAnnotationChanged(kafkaTopic); if (action.equals(Action.DELETED) || shouldReconcile(kafkaTopic, metadata, pauseAnnotationChanges.isChanged())) { @@ -54,28 +53,28 @@ public void eventReceived(Action action, KafkaTopic kafkaTopic) { } else if (pauseAnnotationChanges.isResourceUnpausedByAnno()) { topicOperator.pausedTopicCounter.getAndDecrement(); } - LOGGER.info("{}: event {} on resource {} generation={}, labels={}", logContext, action, name, + LOGGER.infoCr(logContext.toReconciliation(), "event {} on resource {} generation={}, labels={}", action, name, metadata.getGeneration(), labels); Handler> resultHandler = ar -> { if (ar.succeeded()) { - LOGGER.info("{}: Success processing event {} on resource {} with labels {}", logContext, action, name, labels); + LOGGER.infoCr(logContext.toReconciliation(), "Success processing event {} on resource {} with labels {}", action, name, labels); } else { String message; if (ar.cause() instanceof InvalidTopicException) { message = kind + " " + name + " has an invalid spec section: " + ar.cause().getMessage(); - LOGGER.error("{}", message); + LOGGER.errorCr(logContext.toReconciliation(), message); } else { message = "Failure processing " + kind + " watch event " + action + " on resource " + name + " with labels " + labels + ": " + ar.cause().getMessage(); - LOGGER.error("{}: {}", logContext, message, ar.cause()); + LOGGER.errorCr(logContext.toReconciliation(), message, ar.cause()); } - topicOperator.enqueue(topicOperator.new Event(kafkaTopic, message, TopicOperator.EventType.WARNING, errorResult -> { + topicOperator.enqueue(logContext, topicOperator.new Event(logContext, kafkaTopic, message, TopicOperator.EventType.WARNING, errorResult -> { })); } }; topicOperator.onResourceEvent(logContext, kafkaTopic, action).onComplete(resultHandler); } else { - LOGGER.debug("{}: Ignoring {} to {} {} because metadata.generation==status.observedGeneration", logContext, action, kind, name); + LOGGER.debugCr(logContext.toReconciliation(), "Ignoring {} to {} {} because metadata.generation==status.observedGeneration", action, kind, name); } } } @@ -101,9 +100,9 @@ private PauseAnnotationChanges pausedAnnotationChanged(KafkaTopic kafkaTopic) { @Override public void onClose(WatcherException exception) { - LOGGER.debug("Closing {}", this); + LOGGER.debugOp("Closing {}", this); if (exception != null) { - LOGGER.debug("Restarting topic watcher due to ", exception); + LOGGER.debugOp("Restarting topic watcher due to ", exception); onHttpGoneTask.run(); } } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/Kafka.java b/topic-operator/src/main/java/io/strimzi/operator/topic/Kafka.java index f5a852890a..f96997b63b 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/Kafka.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/Kafka.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.topic; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import java.util.Set; @@ -19,50 +20,60 @@ public interface Kafka { * completing the returned Future when the topic has been created. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * + * @param reconciliation The reconciliation. * @param newTopic The topic to create. * @return A future which is completed once the topic has been created. */ - Future createTopic(Topic newTopic); + Future createTopic(Reconciliation reconciliation, Topic newTopic); /** * Asynchronously delete the given topic in Kafka, * completing the returned Future when the topic has been deleted. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * + * @param reconciliation The reconciliation. * @param topicName The name of the topic to delete. * @return A future which is completed once the topic has been deleted. */ - Future deleteTopic(TopicName topicName); + Future deleteTopic(Reconciliation reconciliation, TopicName topicName); /** * Wait for the given topic to not existing Kafka , * completing the returned Future when the topic does not exists. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * + * @param reconciliation The reconciliation. * @param topicName The name of the topic to delete. * @return A future which is completed once the topic has been deleted. */ - Future topicExists(TopicName topicName); + Future topicExists(Reconciliation reconciliation, TopicName topicName); /** * Asynchronously update the topic config in Kafka, * completing the returned Future when the topic has been updated. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * + * @param reconciliation The reconciliation. * @param topic The topic config to update. * @return A future which is completed once the topic has been updated. */ - Future updateTopicConfig(Topic topic); + Future updateTopicConfig(Reconciliation reconciliation, Topic topic); /** * Asynchronously increase the topic's partitions in Kafka, * completing the returned Future when the topic has been updated. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * + * @param reconciliation The reconciliation. * @param topic The topic. * @return A future which is completed once the topic has been updated. */ - Future increasePartitions(Topic topic); + Future increasePartitions(Reconciliation reconciliation, Topic topic); /** * Asynchronously fetch the topic metadata in Kafka, @@ -70,16 +81,19 @@ public interface Kafka { * If the topic does not exist the returned Future will be completed with null result. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * + * @param reconciliation The reconciliation. * @param topicName The name of the topic to get the metadata of. * @return A future which is completed with the requested metadata. */ - Future topicMetadata(TopicName topicName); + Future topicMetadata(Reconciliation reconciliation, TopicName topicName); /** * Asynchronously list the names of the topics available in Kafka, * completing the returned Future with the topic names. * If the operation fails the returned Future will be failed with the * KafkaException (not an ExecutionException). + * * @return A future which is completed with the list of topics. */ Future> listTopics(); diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaImpl.java b/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaImpl.java index 20ed1d5bbb..900fb1854a 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaImpl.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaImpl.java @@ -9,6 +9,8 @@ import java.util.Set; import java.util.concurrent.ExecutionException; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.CompositeFuture; import io.vertx.core.Future; import io.vertx.core.Promise; @@ -23,8 +25,6 @@ import org.apache.kafka.common.KafkaFuture; import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.errors.TopicExistsException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import static java.util.Collections.singleton; @@ -36,7 +36,7 @@ */ public class KafkaImpl implements Kafka { - private final static Logger LOGGER = LogManager.getLogger(KafkaImpl.class); + private final static ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaImpl.class); protected final Admin adminClient; @@ -52,9 +52,9 @@ public KafkaImpl(Admin adminClient, Vertx vertx) { * (in a different thread) with the result. */ @Override - public Future deleteTopic(TopicName topicName) { + public Future deleteTopic(Reconciliation reconciliation, TopicName topicName) { Promise handler = Promise.promise(); - LOGGER.debug("Deleting topic {}", topicName); + LOGGER.debugCr(reconciliation, "Deleting topic {}", topicName); KafkaFuture future = adminClient.deleteTopics( singleton(topicName.toString())).values().get(topicName.toString()); mapFuture(future).onComplete(ar -> { @@ -67,7 +67,7 @@ public Future deleteTopic(TopicName topicName) { } @Override - public Future topicExists(TopicName topicName) { + public Future topicExists(Reconciliation reconciliation, TopicName topicName) { // Test existence by doing a validate-only creation and checking for topic exists exception. // This request goes to the controller, so is less susceptible to races // where we happen to query a broker which hasn't processed an UPDATE_METADATA @@ -92,7 +92,7 @@ public Future topicExists(TopicName topicName) { @SuppressWarnings("deprecation") @Override - public Future updateTopicConfig(Topic topic) { + public Future updateTopicConfig(Reconciliation reconciliation, Topic topic) { Map configs = TopicSerialization.toTopicConfig(topic); KafkaFuture future = adminClient.alterConfigs(configs).values().get(configs.keySet().iterator().next()); return mapFuture(future); @@ -104,10 +104,10 @@ public Future updateTopicConfig(Topic topic) { * The Future completes with a null result a topic with the given {@code topicName} does not exist. */ @Override - public Future topicMetadata(TopicName topicName) { - LOGGER.debug("Getting metadata for topic {}", topicName); + public Future topicMetadata(Reconciliation reconciliation, TopicName topicName) { + LOGGER.debugCr(reconciliation, "Getting metadata for topic {}", topicName); ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, topicName.toString()); - return topicExists(topicName).compose(exists -> { + return topicExists(reconciliation, topicName).compose(exists -> { if (exists) { Future topicDescriptionFuture = mapFuture(adminClient.describeTopics( singleton(topicName.toString())).values().get(topicName.toString())); @@ -124,7 +124,7 @@ public Future topicMetadata(TopicName topicName) { @Override public Future> listTopics() { try { - LOGGER.debug("Listing topics"); + LOGGER.debugOp("Listing topics"); ListTopicsOptions listOptions = new ListTopicsOptions().listInternal(true); return mapFuture(adminClient.listTopics(listOptions).names()); } catch (Exception e) { @@ -134,11 +134,11 @@ public Future> listTopics() { @Override - public Future increasePartitions(Topic topic) { + public Future increasePartitions(Reconciliation reconciliation, Topic topic) { try { String topicName = topic.getTopicName().toString(); final NewPartitions newPartitions = NewPartitions.increaseTo(topic.getNumPartitions()); - LOGGER.debug("Increasing partitions {}", newPartitions); + LOGGER.debugCr(reconciliation, "Increasing partitions {}", newPartitions); final Map request = Collections.singletonMap(topicName, newPartitions); return mapFuture(adminClient.createPartitions(request).values().get(topicName)); } catch (Exception e) { @@ -151,10 +151,10 @@ public Future increasePartitions(Topic topic) { * (in a different thread) with the result. */ @Override - public Future createTopic(Topic topic) { + public Future createTopic(Reconciliation reconciliation, Topic topic) { try { NewTopic newTopic = TopicSerialization.toNewTopic(topic, null); - LOGGER.debug("Creating topic {}", newTopic); + LOGGER.debugCr(reconciliation, "Creating topic {}", newTopic); KafkaFuture future = adminClient.createTopics( singleton(newTopic)).values().get(newTopic.name()); return mapFuture(future); diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStore.java b/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStore.java index 5edcb72bab..7e1866f45e 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStore.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStore.java @@ -8,8 +8,8 @@ import io.vertx.core.Future; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.streams.state.ReadOnlyKeyValueStore; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import java.util.concurrent.CompletionStage; import java.util.function.BiFunction; @@ -19,7 +19,7 @@ * Apicurio Registry's gRPC based Kafka Streams ReadOnlyKeyValueStore */ public class KafkaStreamsTopicStore implements TopicStore { - private static final Logger log = LoggerFactory.getLogger(KafkaStreamsTopicStore.class); + private static final Logger LOGGER = LogManager.getLogger(KafkaStreamsTopicStore.class); private final ReadOnlyKeyValueStore topicStore; @@ -79,7 +79,7 @@ public Future read(TopicName name) { } private Future handleTopicCommand(TopicCommand cmd) { - log.debug("Handling topic command [{}]: {}", cmd.getType(), cmd.getKey()); + LOGGER.debug("Handling topic command [{}]: {}", cmd.getType(), cmd.getKey()); String key = cmd.getKey(); CompletionStage result = resultService.apply(key, cmd.getUuid()) .thenApply(KafkaStreamsTopicStore::toThrowable); @@ -87,7 +87,7 @@ private Future handleTopicCommand(TopicCommand cmd) { producer.apply(new ProducerRecord<>(storeTopic, key, cmd)) .whenComplete((r, t) -> { if (t != null) { - log.error("Error sending topic command", t); + LOGGER.error("Error sending topic command", t); } }); return Future.fromCompletionStage(result).compose( diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStoreService.java b/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStoreService.java index 35c799d4f9..73abcd9ab4 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStoreService.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/KafkaStreamsTopicStoreService.java @@ -36,7 +36,7 @@ * A service to configure and start/stop KafkaStreamsTopicStore. */ public class KafkaStreamsTopicStoreService { - private static final Logger log = LoggerFactory.getLogger(KafkaStreamsTopicStoreService.class); + private static final Logger LOGGER = LoggerFactory.getLogger(KafkaStreamsTopicStoreService.class); private final List closeables = new ArrayList<>(); @@ -49,7 +49,7 @@ public CompletionStage start(Config config, Properties kafkaProperti // check if entry topic has the right configuration Admin admin = Admin.create(kafkaProperties); - log.info("Starting ..."); + LOGGER.info("Starting ..."); return toCS(admin.describeCluster().nodes()) .thenApply(nodes -> new Context(nodes.size())) .thenCompose(c -> toCS(admin.listTopics().names()).thenApply(c::setTopics)) @@ -66,10 +66,10 @@ public CompletionStage start(Config config, Properties kafkaProperti // use another thread to stop, if needed try { if (t != null) { - log.warn("Failed to start.", t); + LOGGER.warn("Failed to start.", t); stop(); } else { - log.info("Started."); + LOGGER.info("Started."); } } finally { close(admin); @@ -78,7 +78,7 @@ public CompletionStage start(Config config, Properties kafkaProperti } private TopicStore createKafkaTopicStore(Config config, Properties kafkaProperties, String storeTopic, AsyncBiFunctionService.WithSerdes serviceImpl) { - log.info("Creating topic store ..."); + LOGGER.info("Creating topic store ..."); ProducerActions producer = new AsyncProducer<>( kafkaProperties, Serdes.String().serializer(), @@ -94,7 +94,7 @@ private TopicStore createKafkaTopicStore(Config config, Properties kafkaProperti } private CompletableFuture> createKafkaStreams(Config config, Properties kafkaProperties, String storeTopic, String storeName) { - log.info("Creating Kafka Streams, store name: {}", storeName); + LOGGER.info("Creating Kafka Streams, store name: {}", storeName); long timeoutMillis = config.get(Config.STALE_RESULT_TIMEOUT_MS); ForeachActionDispatcher dispatcher = new ForeachActionDispatcher<>(); WaitForResultService serviceImpl = new WaitForResultService(timeoutMillis, dispatcher); @@ -132,7 +132,7 @@ private CompletableFuture createNewStoreTopic(String storeTopic, Admin admin, Context c) { - log.info("Creating new store topic: {}", storeTopic); + LOGGER.info("Creating new store topic: {}", storeTopic); int rf = Math.min(3, c.clusterSize); int minISR = Math.max(rf - 1, 1); NewTopic newTopic = new NewTopic(storeTopic, 1, (short) rf) @@ -141,7 +141,7 @@ private CompletionStage createNewStoreTopic(String storeTopic, Admin admin } private CompletionStage validateExistingStoreTopic(String storeTopic, Admin admin, Context c) { - log.info("Validating existing store topic: {}", storeTopic); + LOGGER.info("Validating existing store topic: {}", storeTopic); ConfigResource storeTopicConfigResource = new ConfigResource(ConfigResource.Type.TOPIC, storeTopic); return toCS(admin.describeTopics(Collections.singleton(storeTopic)).values().get(storeTopic)) .thenApply(td -> c.setRf(td.partitions().stream().map(tp -> tp.replicas().size()).min(Integer::compare).orElseThrow())) @@ -149,7 +149,7 @@ private CompletionStage validateExistingStoreTopic(String storeTopic, Admi .thenApply(cr -> c2.setMinISR(parseInt(cr.get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value())))) .thenApply(c3 -> { if (c3.rf != Math.min(3, c3.clusterSize) || c3.minISR != c3.rf - 1) { - log.warn("Durability of the topic [{}] is not sufficient for production use - replicationFactor: {}, {}: {}. " + + LOGGER.warn("Durability of the topic [{}] is not sufficient for production use - replicationFactor: {}, {}: {}. " + "Increase the replication factor to at least 3 and configure the {} to {}.", storeTopic, c3.rf, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, c3.minISR, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, c3.minISR); } @@ -158,7 +158,7 @@ private CompletionStage validateExistingStoreTopic(String storeTopic, Admi } public void stop() { - log.info("Stopping services ..."); + LOGGER.info("Stopping services ..."); Collections.reverse(closeables); closeables.forEach(KafkaStreamsTopicStoreService::close); } @@ -167,7 +167,7 @@ private static void close(AutoCloseable service) { try { service.close(); } catch (Exception e) { - log.warn("Exception while closing service: {}", service, e); + LOGGER.warn("Exception while closing service: {}", service, e); } } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/LogContext.java b/topic-operator/src/main/java/io/strimzi/operator/topic/LogContext.java index ee19d6aad1..26ac1aa817 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/LogContext.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/LogContext.java @@ -6,8 +6,11 @@ import io.fabric8.kubernetes.client.Watcher; import io.strimzi.api.kafka.model.KafkaTopic; +import io.strimzi.operator.common.Reconciliation; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.Marker; +import org.apache.logging.log4j.MarkerManager; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; @@ -19,20 +22,24 @@ public class LogContext { private static AtomicInteger ctx = new AtomicInteger(); private final String base; private final String trigger; + private final String namespace; + private final String topicName; private String resourceVersion; - private LogContext(String trigger) { + private LogContext(String trigger, String namespace, String topicName) { base = ctx.getAndIncrement() + "|" + trigger; + this.namespace = namespace; + this.topicName = topicName; this.trigger = trigger; } - static LogContext zkWatch(String znode, String childAction) { - return new LogContext(znode + " " + childAction); + static LogContext zkWatch(String znode, String childAction, String namespace, String topicName) { + return new LogContext(znode + " " + childAction, namespace, topicName); } static LogContext kubeWatch(Watcher.Action action, KafkaTopic kafkaTopic) { - LogContext logContext = new LogContext("kube " + action(action) + kafkaTopic.getMetadata().getName()); + LogContext logContext = new LogContext("kube " + action(action) + kafkaTopic.getMetadata().getName(), kafkaTopic.getMetadata().getNamespace(), kafkaTopic.getMetadata().getName()); logContext.resourceVersion = kafkaTopic.getMetadata().getResourceVersion(); return logContext; } @@ -49,8 +56,8 @@ private static String action(Watcher.Action action) { return "!"; } - static LogContext periodic(String periodicType) { - return new LogContext(periodicType); + static LogContext periodic(String periodicType, String namespace, String topicName) { + return new LogContext(periodicType, namespace, topicName); } public String trigger() { @@ -66,6 +73,12 @@ public String toString() { } } + public Marker getMarker() { + String marker = "KafkaTopic(" + namespace + "/" + topicName + ")"; + LOGGER.trace("marker is {}", marker); + return MarkerManager.getMarker(marker); + } + public LogContext withKubeTopic(KafkaTopic kafkaTopic) { String newResourceVersion = kafkaTopic == null ? null : kafkaTopic.getMetadata().getResourceVersion(); if (!Objects.equals(resourceVersion, newResourceVersion)) { @@ -74,4 +87,8 @@ public LogContext withKubeTopic(KafkaTopic kafkaTopic) { this.resourceVersion = newResourceVersion; return this; } + + public Reconciliation toReconciliation() { + return new Reconciliation(trigger, "KafkaTopic", namespace, topicName); + } } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/Main.java b/topic-operator/src/main/java/io/strimzi/operator/topic/Main.java index 65019c7e05..39941c89c2 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/Main.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/Main.java @@ -7,15 +7,14 @@ import io.fabric8.kubernetes.client.DefaultKubernetesClient; import io.strimzi.api.kafka.Crds; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - import java.util.HashMap; import java.util.Map; import io.vertx.core.VertxOptions; import io.vertx.micrometer.MicrometerMetricsOptions; import io.vertx.micrometer.VertxPrometheusOptions; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * The entry-point to the topic operator. diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/ResourceName.java b/topic-operator/src/main/java/io/strimzi/operator/topic/ResourceName.java index 8df75931f0..cf7f3389de 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/ResourceName.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/ResourceName.java @@ -31,6 +31,7 @@ public ResourceName(String name) { /** * Create a MapName from the name of the given resource + * * @param resource */ public ResourceName(HasMetadata resource) { diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicConfigsWatcher.java b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicConfigsWatcher.java index c0f9d44d9b..83d8cef829 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicConfigsWatcher.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicConfigsWatcher.java @@ -11,18 +11,16 @@ */ class TopicConfigsWatcher extends ZkWatcher { - private static final String CONFIGS_ZNODE = "/config/topics"; - TopicConfigsWatcher(TopicOperator topicOperator) { super(topicOperator, CONFIGS_ZNODE); } @Override protected void notifyOperator(String child) { - LogContext logContext = LogContext.zkWatch(CONFIGS_ZNODE, "=" + child); - log.info("{}: Topic config change", logContext); + LogContext logContext = LogContext.zkWatch(CONFIGS_ZNODE, "=" + child, topicOperator.getNamespace(), child); + logger.infoCr(logContext.toReconciliation(), "Topic config change"); topicOperator.onTopicConfigChanged(logContext, new TopicName(child)).onComplete(ar2 -> { - log.info("{}: Reconciliation result due to topic config change on topic {}: {}", logContext, child, ar2); + logger.infoCr(logContext.toReconciliation(), "Reconciliation result due to topic config change on topic {}: {}", child, ar2); }); } } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicDiff.java b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicDiff.java index c4c18193f7..da8c610ab3 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicDiff.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicDiff.java @@ -215,6 +215,7 @@ private TopicDiff(Map differences, ObjectMeta objectMeta) { /** * Return the TopicDiff that will transform the given source topic into the given target topic. + * * @param source * @param target * @return The difference between the source and target. @@ -318,6 +319,7 @@ public boolean changesReplicationFactor() { /** * Apply this diff to this given topic, returning a new topic. + * * @param topic * @return */ @@ -361,6 +363,7 @@ public boolean conflicts(TopicDiff other) { /** * Merge this TopicDiff with the given other TopicDiff, returning a * single diff which combines the two. + * * @param other The diff to merge with. * @return * @throws IllegalArgumentException if the topics conflict. diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicMetadataHandler.java b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicMetadataHandler.java index f51c9eecfe..e6c68a8c56 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicMetadataHandler.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicMetadataHandler.java @@ -6,6 +6,7 @@ import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.MaxAttemptsExceededException; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.AsyncResult; import io.vertx.core.Handler; import io.vertx.core.Vertx; @@ -20,7 +21,7 @@ */ public abstract class TopicMetadataHandler implements Handler> { - private static final Logger log = LogManager.getLogger(TopicMetadataHandler.class); + private static final Logger LOGGER = LogManager.getLogger(TopicMetadataHandler.class); private final BackOff backOff; @@ -58,24 +59,24 @@ public abstract class TopicMetadataHandler implements Handler kafka.topicMetadata(topicName).onComplete(this)); + vertx.runOnContext(timerId -> kafka.topicMetadata(reconciliation, topicName).onComplete(this)); } else { vertx.setTimer(TimeUnit.MILLISECONDS.convert(delay, TimeUnit.MILLISECONDS), - timerId -> kafka.topicMetadata(topicName).onComplete(this)); + timerId -> kafka.topicMetadata(reconciliation, topicName).onComplete(this)); } } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicOperator.java b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicOperator.java index a64c7239df..980d84db05 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicOperator.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicOperator.java @@ -21,6 +21,7 @@ import io.strimzi.operator.common.BackOff; import io.strimzi.operator.common.MaxAttemptsExceededException; import io.strimzi.operator.common.MetricsProvider; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.operator.resource.StatusUtils; import io.vertx.core.AsyncResult; @@ -56,7 +57,8 @@ @SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity"}) class TopicOperator { - private final static Logger LOGGER = LogManager.getLogger(TopicOperator.class); + private final static ReconciliationLogger LOGGER = ReconciliationLogger.create(TopicOperator.class); + private final static Logger EVENT_LOGGER = LogManager.getLogger("Event"); public static final String METRICS_PREFIX = "strimzi."; private final Kafka kafka; @@ -88,12 +90,14 @@ enum EventType { } class Event implements Handler { + private final LogContext logContext; private final EventType eventType; private final String message; private final HasMetadata involvedObject; private final Handler> handler; - public Event(HasMetadata involvedObject, String message, EventType eventType, Handler> handler) { + public Event(LogContext logContext, HasMetadata involvedObject, String message, EventType eventType, Handler> handler) { + this.logContext = logContext; this.involvedObject = involvedObject; this.message = message; this.handler = handler; @@ -124,10 +128,10 @@ public void handle(Void v) { io.fabric8.kubernetes.api.model.Event event = evtb.build(); switch (eventType) { case INFO: - LOGGER.info("{}", message); + LOGGER.infoCr(logContext.toReconciliation(), message); break; case WARNING: - LOGGER.warn("{}", message); + LOGGER.warnCr(logContext.toReconciliation(), message); break; } k8s.createEvent(event).onComplete(handler); @@ -140,7 +144,7 @@ public String toString() { private Future createResource(LogContext logContext, Topic topic) { Promise result = Promise.promise(); - enqueue(new CreateResource(logContext, topic, result)); + enqueue(logContext, new CreateResource(logContext, topic, result)); return result.future(); } @@ -170,7 +174,7 @@ public String toString() { private Future deleteResource(LogContext logContext, ResourceName resourceName) { Promise result = Promise.promise(); - enqueue(new DeleteResource(logContext, resourceName, result)); + enqueue(logContext, new DeleteResource(logContext, resourceName, result)); return result.future(); } @@ -189,7 +193,7 @@ public DeleteResource(LogContext logContext, ResourceName resourceName, Handler< @Override public void handle(Void v) { - k8s.deleteResource(resourceName).onComplete(handler); + k8s.deleteResource(logContext.toReconciliation(), resourceName).onComplete(handler); } @Override @@ -200,7 +204,7 @@ public String toString() { private Future updateResource(LogContext logContext, Topic topic) { Promise result = Promise.promise(); - enqueue(new UpdateResource(logContext, topic, result)); + enqueue(logContext, new UpdateResource(logContext, topic, result)); return result.future(); } @@ -232,7 +236,7 @@ public String toString() { private Future createKafkaTopic(LogContext logContext, Topic topic, HasMetadata involvedObject) { Promise result = Promise.promise(); - enqueue(new CreateKafkaTopic(logContext, topic, involvedObject, result)); + enqueue(logContext, new CreateKafkaTopic(logContext, topic, involvedObject, result)); return result.future(); } @@ -254,10 +258,10 @@ public CreateKafkaTopic(LogContext logContext, Topic topic, @Override public void handle(Void v) throws OperatorException { - kafka.createTopic(topic).onComplete(ar -> { + kafka.createTopic(logContext.toReconciliation(), topic).onComplete(ar -> { if (ar.succeeded()) { - LOGGER.debug("{}: Created topic '{}' for KafkaTopic '{}'", - logContext, topic.getTopicName(), topic.getResourceName()); + LOGGER.debugCr(logContext.toReconciliation(), "Created topic '{}' for KafkaTopic '{}'", + topic.getTopicName(), topic.getResourceName()); handler.handle(ar); } else { handler.handle(ar); @@ -296,9 +300,9 @@ public UpdateKafkaConfig(LogContext logContext, Topic topic, HasMetadata involve @Override public void handle(Void v) throws OperatorException { - kafka.updateTopicConfig(topic).onComplete(ar -> { + kafka.updateTopicConfig(logContext.toReconciliation(), topic).onComplete(ar -> { if (ar.failed()) { - enqueue(new Event(involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); + enqueue(logContext, new Event(logContext, involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); } handler.handle(ar); }); @@ -329,9 +333,9 @@ public IncreaseKafkaPartitions(LogContext logContext, Topic topic, HasMetadata i @Override public void handle(Void v) throws OperatorException { - kafka.increasePartitions(topic).onComplete(ar -> { + kafka.increasePartitions(logContext.toReconciliation(), topic).onComplete(ar -> { if (ar.failed()) { - enqueue(new Event(involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); + enqueue(logContext, new Event(logContext, involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); } handler.handle(ar); }); @@ -346,7 +350,7 @@ public String toString() { private Future deleteKafkaTopic(LogContext logContext, TopicName topicName) { Promise result = Promise.promise(); - enqueue(new DeleteKafkaTopic(logContext, topicName, result)); + enqueue(logContext, new DeleteKafkaTopic(logContext, topicName, result)); return result.future(); } @@ -365,8 +369,8 @@ public DeleteKafkaTopic(LogContext logContext, TopicName topicName, Handler executeWithTopicLockHeld(LogContext logContext, TopicName ke BiFunction decrement = (topicName, waiters) -> { if (waiters != null) { if (waiters == 1) { - LOGGER.debug("{}: Removing last waiter {}", logContext, action); + LOGGER.debugCr(logContext.toReconciliation(), "Removing last waiter {}", action); return null; } else { - LOGGER.debug("{}: Removing waiter {}, {} waiters left", logContext, action, waiters - 1); + LOGGER.debugCr(logContext.toReconciliation(), "Removing waiter {}, {} waiters left", action, waiters - 1); return waiters - 1; } } else { - LOGGER.error("{}: Assertion failure. topic {}, action {}", logContext, lockName, action); + LOGGER.errorCr(logContext.toReconciliation(), "Assertion failure. topic {}, action {}", lockName, action); return null; } }; - LOGGER.debug("{}: Queuing action {} on topic {}", logContext, action, lockName); + LOGGER.debugCr(logContext.toReconciliation(), "Queuing action {} on topic {}", action, lockName); inflight.compute(key, (topicName, waiters) -> { if (waiters == null) { - LOGGER.debug("{}: Adding first waiter {}", logContext, action); + LOGGER.debugCr(logContext.toReconciliation(), "Adding first waiter {}", action); return 1; } else { - LOGGER.debug("{}: Adding waiter {}: {}", logContext, action, waiters + 1); + LOGGER.debugCr(logContext.toReconciliation(), "Adding waiter {}: {}", action, waiters + 1); return waiters + 1; } }); vertx.sharedData().getLockWithTimeout(lockName, timeoutMs, lockResult -> { if (lockResult.succeeded()) { - LOGGER.debug("{}: Lock acquired", logContext); - LOGGER.debug("{}: Executing action {} on topic {}", logContext, action, lockName); + LOGGER.debugCr(logContext.toReconciliation(), "Lock acquired"); + LOGGER.debugCr(logContext.toReconciliation(), "Executing action {} on topic {}", action, lockName); action.execute().onComplete(actionResult -> { - LOGGER.debug("{}: Executing handler for action {} on topic {}", logContext, action, lockName); + LOGGER.debugCr(logContext.toReconciliation(), "Executing handler for action {} on topic {}", action, lockName); action.result = actionResult; String keytag = namespace + ":" + "KafkaTopic" + "/" + key.asKubeName().toString(); Optional metric = metrics.meterRegistry().getMeters() @@ -493,7 +497,7 @@ public Future executeWithTopicLockHeld(LogContext logContext, TopicName ke if (metric.isPresent()) { // remove metric so it can be re-added with new tags metrics.meterRegistry().remove(metric.get().getId()); - LOGGER.debug("Removed metric {}.resource.state{{}}", METRICS_PREFIX, keytag); + LOGGER.debugCr(logContext.toReconciliation(), "Removed metric {}.resource.state{{}}", METRICS_PREFIX, keytag); } if (action.topic != null) { @@ -506,12 +510,12 @@ public Future executeWithTopicLockHeld(LogContext logContext, TopicName ke Tag.of("reason", succeeded ? "none" : actionResult.cause().getMessage() == null ? "unknown error" : actionResult.cause().getMessage())); metrics.gauge(METRICS_PREFIX + "resource.state", "Current state of the resource: 1 ready, 0 fail", metricTags).set(actionResult.succeeded() ? 1 : 0); - LOGGER.debug("Updated metric " + METRICS_PREFIX + "resource.state{} = {}", metricTags, succeeded ? 1 : 0); + LOGGER.debugCr(logContext.toReconciliation(), "Updated metric " + METRICS_PREFIX + "resource.state{} = {}", metricTags, succeeded ? 1 : 0); } // Update status with lock held so that event is ignored via statusUpdateGeneration action.updateStatus(logContext).onComplete(statusResult -> { if (statusResult.failed()) { - LOGGER.error("{}: Error updating KafkaTopic.status for action {}", logContext, action, + LOGGER.errorCr(logContext.toReconciliation(), "Error updating KafkaTopic.status for action {}", action, statusResult.cause()); } try { @@ -523,14 +527,14 @@ public Future executeWithTopicLockHeld(LogContext logContext, TopicName ke result.fail(t); } finally { lockResult.result().release(); - LOGGER.debug("{}: Lock released", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "Lock released"); inflight.compute(key, decrement); } }); }); } else { lockedReconciliationsCounter.increment(); - LOGGER.warn("{}: Lock not acquired within {}ms: action {} will not be run", logContext, timeoutMs, action); + LOGGER.warnCr(logContext.toReconciliation(), "Lock not acquired within {}ms: action {} will not be run", timeoutMs, action); try { result.handle(Future.failedFuture("Failed to acquire lock for topic " + lockName + " after " + timeoutMs + "ms. Not executing action " + action)); } finally { @@ -569,20 +573,20 @@ Future reconcile(Reconciliation reconciliation, final LogContext logContex final Future reconciliationResultHandler; { TopicName topicName = k8sTopic != null ? k8sTopic.getTopicName() : kafkaTopic != null ? kafkaTopic.getTopicName() : privateTopic != null ? privateTopic.getTopicName() : null; - LOGGER.info("{}: Reconciling topic {}, k8sTopic:{}, kafkaTopic:{}, privateTopic:{}", logContext, topicName, k8sTopic == null ? "null" : "nonnull", kafkaTopic == null ? "null" : "nonnull", privateTopic == null ? "null" : "nonnull"); + LOGGER.infoCr(logContext.toReconciliation(), "Reconciling topic {}, k8sTopic:{}, kafkaTopic:{}, privateTopic:{}", topicName, k8sTopic == null ? "null" : "nonnull", kafkaTopic == null ? "null" : "nonnull", privateTopic == null ? "null" : "nonnull"); } if (k8sTopic != null && Annotations.isReconciliationPausedWithAnnotation(k8sTopic.getMetadata())) { - LOGGER.debug("{}: Reconciliation paused, not applying changes.", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "Reconciliation paused, not applying changes."); reconciliationResultHandler = Future.succeededFuture(); } else if (privateTopic == null) { if (k8sTopic == null) { if (kafkaTopic == null) { // All three null: This happens reentrantly when a topic or KafkaTopic is deleted - LOGGER.debug("{}: All three topics null during reconciliation.", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "All three topics null during reconciliation."); reconciliationResultHandler = Future.succeededFuture(); } else { // it's been created in Kafka => create in k8s and privateState - LOGGER.debug("{}: topic created in kafka, will create KafkaTopic in k8s and topicStore", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "topic created in kafka, will create KafkaTopic in k8s and topicStore"); reconciliationResultHandler = createResource(logContext, kafkaTopic) .compose(createdKt -> { reconciliation.observedTopicFuture(createdKt); @@ -591,16 +595,16 @@ Future reconcile(Reconciliation reconciliation, final LogContext logContex } } else if (kafkaTopic == null) { // it's been created in k8s => create in Kafka and privateState - LOGGER.debug("{}: KafkaTopic created in k8s, will create topic in kafka and topicStore", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "KafkaTopic created in k8s, will create topic in kafka and topicStore"); reconciliationResultHandler = createKafkaTopic(logContext, k8sTopic, involvedObject) .compose(ignore -> createInTopicStore(logContext, k8sTopic, involvedObject)) // Kafka will set the message.format.version, so we need to update the KafkaTopic to reflect // that to avoid triggering another reconciliation - .compose(ignored -> getFromKafka(k8sTopic.getTopicName())) + .compose(ignored -> getFromKafka(logContext.toReconciliation(), k8sTopic.getTopicName())) .compose(kafkaTopic2 -> { - LOGGER.debug("Post-create kafka {}", kafkaTopic2); + LOGGER.debugCr(logContext.toReconciliation(), "Post-create kafka {}", kafkaTopic2); if (kafkaTopic2 == null) { - LOGGER.error("Post-create kafka unexpectedly null"); + LOGGER.errorCr(logContext.toReconciliation(), "Post-create kafka unexpectedly null"); return Future.succeededFuture(); } return update3Way(reconciliation, logContext, involvedObject, k8sTopic, kafkaTopic2, k8sTopic); @@ -613,18 +617,18 @@ Future reconcile(Reconciliation reconciliation, final LogContext logContex if (k8sTopic == null) { if (kafkaTopic == null) { // delete privateState - LOGGER.debug("{}: KafkaTopic deleted in k8s and topic deleted in kafka => delete from topicStore", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "KafkaTopic deleted in k8s and topic deleted in kafka => delete from topicStore"); reconciliationResultHandler = deleteFromTopicStore(logContext, involvedObject, privateTopic.getTopicName()); } else { // it was deleted in k8s so delete in kafka and privateState // If delete.topic.enable=false then the resulting exception will be ignored and only the privateState topic will be deleted - LOGGER.debug("{}: KafkaTopic deleted in k8s => delete topic from kafka and from topicStore", logContext); - reconciliationResultHandler = deleteKafkaTopic(logContext, kafkaTopic.getTopicName()).recover(this::handleTopicDeletionDisabled) + LOGGER.debugCr(logContext.toReconciliation(), "KafkaTopic deleted in k8s => delete topic from kafka and from topicStore"); + reconciliationResultHandler = deleteKafkaTopic(logContext, kafkaTopic.getTopicName()).recover(thrown -> handleTopicDeletionDisabled(thrown, logContext)) .compose(ignored -> deleteFromTopicStore(logContext, involvedObject, privateTopic.getTopicName())); } } else if (kafkaTopic == null) { // it was deleted in kafka so delete in k8s and privateState - LOGGER.debug("{}: topic deleted in kafkas => delete KafkaTopic from k8s and from topicStore", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "topic deleted in kafkas => delete KafkaTopic from k8s and from topicStore"); reconciliationResultHandler = deleteResource(logContext, privateTopic.getOrAsKubeName()) .compose(ignore -> { reconciliation.observedTopicFuture(null); @@ -632,7 +636,7 @@ Future reconcile(Reconciliation reconciliation, final LogContext logContex }); } else { // all three exist - LOGGER.debug("{}: 3 way diff", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "3 way diff"); reconciliationResultHandler = update3Way(reconciliation, logContext, involvedObject, k8sTopic, kafkaTopic, privateTopic); } @@ -655,12 +659,12 @@ Future reconcile(Reconciliation reconciliation, final LogContext logContex * @param thrown The exception encountered when attempting to delete the kafka topic. * @return Either an succeeded future in the case that topic deletion is disabled or a failed future in all other cases. */ - private Future handleTopicDeletionDisabled(Throwable thrown) { + private Future handleTopicDeletionDisabled(Throwable thrown, LogContext logContext) { if (thrown instanceof org.apache.kafka.common.errors.TopicDeletionDisabledException) { - LOGGER.warn("Topic deletion is disabled. Kafka topic will persist and KafkaTopic resource will be recreated in the next reconciliation."); + LOGGER.warnCr(logContext.toReconciliation(), "Topic deletion is disabled. Kafka topic will persist and KafkaTopic resource will be recreated in the next reconciliation."); } else { - LOGGER.error("Topic deletion failed with ({}) error: {}", thrown.getClass(), thrown.getMessage()); + LOGGER.errorCr(logContext.toReconciliation(), "Topic deletion failed with ({}) error: {}", thrown.getClass(), thrown.getMessage()); return Future.failedFuture(thrown); } @@ -672,8 +676,8 @@ private Future update2Way(Reconciliation reconciliation, LogContext logCon TopicDiff diff = TopicDiff.diff(kafkaTopic, k8sTopic); if (diff.isEmpty()) { // they're the same => do nothing, but still create the private copy - LOGGER.debug("{}: KafkaTopic created in k8s and topic created in kafka, but they're identical => just creating in topicStore", logContext); - LOGGER.debug("{}: k8s and kafka versions of topic '{}' are the same", logContext, kafkaTopic.getTopicName()); + LOGGER.debugCr(logContext.toReconciliation(), "KafkaTopic created in k8s and topic created in kafka, but they're identical => just creating in topicStore"); + LOGGER.debugCr(logContext.toReconciliation(), "k8s and kafka versions of topic '{}' are the same", kafkaTopic.getTopicName()); Topic privateTopic = new Topic.Builder(kafkaTopic) .withMapName(k8sTopic.getResourceName().toString()) .build(); @@ -682,7 +686,7 @@ private Future update2Way(Reconciliation reconciliation, LogContext logCon && !diff.changesNumPartitions() && diff.changesConfig() && disjoint(kafkaTopic.getConfig().keySet(), k8sTopic.getConfig().keySet())) { - LOGGER.debug("{}: KafkaTopic created in k8s and topic created in kafka, they differ only in topic config, and those configs are disjoint: Updating k8s and kafka, and creating in topic store", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "KafkaTopic created in k8s and topic created in kafka, they differ only in topic config, and those configs are disjoint: Updating k8s and kafka, and creating in topic store"); Map mergedConfigs = new HashMap<>(kafkaTopic.getConfig()); mergedConfigs.putAll(k8sTopic.getConfig()); Topic mergedTopic = new Topic.Builder(kafkaTopic) @@ -693,14 +697,14 @@ && disjoint(kafkaTopic.getConfig().keySet(), k8sTopic.getConfig().keySet())) { .compose(updatedResource -> { reconciliation.observedTopicFuture(updatedResource); Promise x = Promise.promise(); - enqueue(new UpdateKafkaConfig(logContext, mergedTopic, involvedObject, x)); + enqueue(logContext, new UpdateKafkaConfig(logContext, mergedTopic, involvedObject, x)); return x.future().compose(ignore -> createInTopicStore(logContext, mergedTopic, involvedObject)); }); } else { // Just use kafka version, but also create a warning event - LOGGER.debug("{}: KafkaTopic created in k8s and topic created in kafka, and they are irreconcilably different => kafka version wins", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "KafkaTopic created in k8s and topic created in kafka, and they are irreconcilably different => kafka version wins"); Promise eventPromise = Promise.promise(); - enqueue(new Event(involvedObject, "KafkaTopic is incompatible with the topic metadata. " + + enqueue(logContext, new Event(logContext, involvedObject, "KafkaTopic is incompatible with the topic metadata. " + "The topic metadata will be treated as canonical.", EventType.INFO, eventPromise)); reconciliationResultHandler = eventPromise.future() .compose(ignored -> @@ -723,28 +727,28 @@ private Future update3Way(Reconciliation reconciliation, LogContext logCon "Topic '" + kafkaTopic.getTopicName() + "' is already managed via KafkaTopic '" + privateTopic.getResourceName() + "' it cannot also be managed via the KafkaTopic '" + k8sTopic.getResourceName() + "'")); } TopicDiff oursKafka = TopicDiff.diff(privateTopic, kafkaTopic); - LOGGER.debug("{}: topicStore->kafkaTopic: {}", logContext, oursKafka); + LOGGER.debugCr(logContext.toReconciliation(), "topicStore->kafkaTopic: {}", oursKafka); TopicDiff oursK8s = TopicDiff.diff(privateTopic, k8sTopic); - LOGGER.debug("{}: topicStore->k8sTopic: {}", logContext, oursK8s); + LOGGER.debugCr(logContext.toReconciliation(), "topicStore->k8sTopic: {}", oursK8s); String conflict = oursKafka.conflict(oursK8s); if (conflict != null) { final String message = "KafkaTopic resource and Kafka topic both changed in a conflicting way: " + conflict; - LOGGER.error("{}: {}", logContext, message); - enqueue(new Event(involvedObject, message, EventType.INFO, eventResult -> { })); + LOGGER.errorCr(logContext.toReconciliation(), "{}", message); + enqueue(logContext, new Event(logContext, involvedObject, message, EventType.INFO, eventResult -> { })); reconciliationResultHandler = Future.failedFuture(new ConflictingChangesException(involvedObject, message)); } else { TopicDiff merged = oursKafka.merge(oursK8s); - LOGGER.debug("{}: Diffs do not conflict, merged diff: {}", logContext, merged); + LOGGER.debugCr(logContext.toReconciliation(), "Diffs do not conflict, merged diff: {}", merged); if (merged.isEmpty()) { - LOGGER.info("{}: All three topics are identical", logContext); + LOGGER.infoCr(logContext.toReconciliation(), "All three topics are identical"); reconciliationResultHandler = Future.succeededFuture(); } else { Topic result = merged.apply(privateTopic); int partitionsDelta = merged.numPartitionsDelta(); if (partitionsDelta < 0) { final String message = "Number of partitions cannot be decreased"; - LOGGER.error("{}: {}", logContext, message); - enqueue(new Event(involvedObject, message, EventType.INFO, eventResult -> { + LOGGER.errorCr(logContext.toReconciliation(), "{}", message); + enqueue(logContext, new Event(logContext, involvedObject, message, EventType.INFO, eventResult -> { })); reconciliationResultHandler = Future.failedFuture(new PartitionDecreaseException(involvedObject, message)); } else if (oursK8s.changesReplicationFactor() @@ -759,7 +763,7 @@ private Future update3Way(Reconciliation reconciliation, LogContext logCon // such that the old number of replicas < the new min isr? But likewise // we could decrease, so order of tasks in the queue will need to change // depending on what the diffs are. - LOGGER.debug("{}: Updating KafkaTopic, kafka topic and topicStore", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "Updating KafkaTopic, kafka topic and topicStore"); TopicDiff kubeDiff = TopicDiff.diff(k8sTopic, result); reconciliationResultHandler = Future.succeededFuture() .compose(updatedKafkaTopic -> { @@ -769,23 +773,23 @@ private Future update3Way(Reconciliation reconciliation, LogContext logCon && !kafkaDiff.isEmpty()) { Promise promise = Promise.promise(); configFuture = promise.future(); - LOGGER.debug("{}: Updating kafka config with {}", logContext, kafkaDiff); - enqueue(new UpdateKafkaConfig(logContext, result, involvedObject, promise)); + LOGGER.debugCr(logContext.toReconciliation(), "Updating kafka config with {}", kafkaDiff); + enqueue(logContext, new UpdateKafkaConfig(logContext, result, involvedObject, promise)); } else { - LOGGER.debug("{}: No need to update kafka topic with {}", logContext, kafkaDiff); + LOGGER.debugCr(logContext.toReconciliation(), "No need to update kafka topic with {}", kafkaDiff); configFuture = Future.succeededFuture(); } return configFuture; }).compose(ignored -> { Future resourceFuture; if (!kubeDiff.isEmpty()) { - LOGGER.debug("{}: Updating KafkaTopic with {}", logContext, kubeDiff); + LOGGER.debugCr(logContext.toReconciliation(), "Updating KafkaTopic with {}", kubeDiff); resourceFuture = updateResource(logContext, result).map(updatedKafkaTopic -> { reconciliation.observedTopicFuture(updatedKafkaTopic); return updatedKafkaTopic; }); } else { - LOGGER.debug("{}: No need to update KafkaTopic {}", logContext, kubeDiff); + LOGGER.debugCr(logContext.toReconciliation(), "No need to update KafkaTopic {}", kubeDiff); resourceFuture = Future.succeededFuture(); } return resourceFuture; @@ -795,14 +799,14 @@ private Future update3Way(Reconciliation reconciliation, LogContext logCon // Kafka throws an error if we attempt a noop change #partitions && result.getNumPartitions() > kafkaTopic.getNumPartitions()) { Promise partitionsPromise = Promise.promise(); - enqueue(new IncreaseKafkaPartitions(logContext, result, involvedObject, partitionsPromise)); + enqueue(logContext, new IncreaseKafkaPartitions(logContext, result, involvedObject, partitionsPromise)); return partitionsPromise.future(); } else { return Future.succeededFuture(); } }).compose(ignored -> { Promise topicStorePromise = Promise.promise(); - enqueue(new UpdateInTopicStore(logContext, result, involvedObject, topicStorePromise)); + enqueue(logContext, new UpdateInTopicStore(logContext, result, involvedObject, topicStorePromise)); return topicStorePromise.future(); }); } @@ -811,8 +815,8 @@ private Future update3Way(Reconciliation reconciliation, LogContext logCon return reconciliationResultHandler; } - void enqueue(Handler event) { - LOGGER.debug("Enqueuing event {}", event); + void enqueue(LogContext logContext, Handler event) { + LOGGER.debugCr(logContext.toReconciliation(), "Enqueuing event {}", event); vertx.runOnContext(event); } @@ -823,7 +827,7 @@ Future onTopicDeleted(LogContext logContext, TopicName topicName) { .compose( ignored -> executeWithTopicLockHeld(logContext, topicName, - new Reconciliation("onTopicDeleted", true) { + new Reconciliation(logContext, "onTopicDeleted", true) { @Override public Future execute() { return reconcileOnTopicChange(logContext, topicName, null, this); @@ -835,8 +839,8 @@ public Future execute() { private Future awaitExistential(LogContext logContext, TopicName topicName, boolean checkExists) { String logState = "confirmed " + (checkExists ? "" : "non-") + "existence"; - AtomicReference> ref = new AtomicReference<>(kafka.topicExists(topicName)); - Future voidFuture = Util.waitFor(vertx, logContext.toString(), logState, 1_000, 60_000, + AtomicReference> ref = new AtomicReference<>(kafka.topicExists(logContext.toReconciliation(), topicName)); + Future voidFuture = Util.waitFor(logContext.toReconciliation(), vertx, logContext.toString(), logState, 1_000, 60_000, () -> { Future existsFuture = ref.get(); if (existsFuture.isComplete()) { @@ -845,7 +849,7 @@ private Future awaitExistential(LogContext logContext, TopicName topicName return true; } else { // It still exists (or still doesn't exist), so ask again, until we timeout - ref.set(kafka.topicExists(topicName)); + ref.set(kafka.topicExists(logContext.toReconciliation(), topicName)); return false; } } @@ -859,10 +863,10 @@ private Future awaitExistential(LogContext logContext, TopicName topicName */ Future onTopicConfigChanged(LogContext logContext, TopicName topicName) { return executeWithTopicLockHeld(logContext, topicName, - new Reconciliation("onTopicConfigChanged", true) { + new Reconciliation(logContext, "onTopicConfigChanged", true) { @Override public Future execute() { - return kafka.topicMetadata(topicName) + return kafka.topicMetadata(logContext.toReconciliation(), topicName) .compose(metadata -> { Topic topic = TopicSerialization.fromTopicMetadata(metadata); return reconcileOnTopicChange(logContext, topicName, topic, this); @@ -875,7 +879,7 @@ public Future execute() { * Called when ZK watch notifies of a change to the topic's partitions */ Future onTopicPartitionsChanged(LogContext logContext, TopicName topicName) { - Reconciliation action = new Reconciliation("onTopicPartitionsChanged", true) { + Reconciliation action = new Reconciliation(logContext, "onTopicPartitionsChanged", true) { @Override public Future execute() { Reconciliation self = this; @@ -893,9 +897,9 @@ public void handle(AsyncResult metadataResult) { // if partitions aren't changed on Kafka yet, we retry with exponential backoff if (topicResult.result().getNumPartitions() == kafkaTopic.getNumPartitions()) { - retry(); + retry(logContext.toReconciliation()); } else { - LOGGER.info("Topic {} partitions changed to {}", topicName, kafkaTopic.getNumPartitions()); + LOGGER.infoCr(logContext.toReconciliation(), "Topic {} partitions changed to {}", topicName, kafkaTopic.getNumPartitions()); reconcileOnTopicChange(logContext, topicName, kafkaTopic, self) .onComplete(promise); } @@ -916,7 +920,7 @@ public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) { promise.complete(); } }; - kafka.topicMetadata(topicName).onComplete(handler); + kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler); }); return promise.future(); } @@ -945,7 +949,7 @@ private Future reconcileOnTopicChange(LogContext logContext, TopicName top Future onTopicCreated(LogContext logContext, TopicName topicName) { // XXX currently runs on the ZK thread, requiring a synchronized inFlight // is it better to put this check in the topic deleted event? - Reconciliation action = new Reconciliation("onTopicCreated", true) { + Reconciliation action = new Reconciliation(logContext, "onTopicCreated", true) { @Override public Future execute() { Reconciliation self = this; @@ -960,7 +964,7 @@ public void handle(AsyncResult metadataResult) { // In this case it is most likely that we've been notified by ZK // before Kafka has finished creating the topic, so we retry // with exponential backoff. - retry(); + retry(logContext.toReconciliation()); } else { // We now have the metadata we need to create the // resource... @@ -979,7 +983,7 @@ public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) { } }; return awaitExistential(logContext, topicName, true).compose(exists -> { - kafka.topicMetadata(topicName).onComplete(handler); + kafka.topicMetadata(logContext.toReconciliation(), topicName).onComplete(handler); return promise.future(); }); } @@ -988,17 +992,19 @@ public void onMaxAttemptsExceeded(MaxAttemptsExceededException e) { } abstract class Reconciliation { + private final LogContext logContext; private final String name; private final boolean watchedForMetrics; public AsyncResult result; public volatile KafkaTopic topic; Timer.Sample reconciliationTimerSample; - public Reconciliation(String name, boolean watchedForMetrics) { + public Reconciliation(LogContext logContext, String name, boolean watchedForMetrics) { + this.logContext = logContext; this.watchedForMetrics = watchedForMetrics; this.name = name; if (isEventWatched()) { - LOGGER.debug("Metric {} triggered", this.name); + LOGGER.debugCr(logContext.toReconciliation(), "Metric {} triggered", this.name); this.reconciliationTimerSample = Timer.start(metrics.meterRegistry()); reconciliationsCounter.increment(); } @@ -1006,7 +1012,7 @@ public Reconciliation(String name, boolean watchedForMetrics) { public void failed() { if (isEventWatched()) { - LOGGER.debug("failed reconciliation {}", name); + LOGGER.debugCr(logContext.toReconciliation(), "failed reconciliation {}", name); reconciliationTimerSample.stop(reconciliationsTimer); failedReconciliationsCounter.increment(); } @@ -1014,7 +1020,7 @@ public void failed() { public void succeeded() { if (isEventWatched()) { - LOGGER.debug("succeeded reconciliation {}", name); + LOGGER.debugCr(logContext.toReconciliation(), "succeeded reconciliation {}", name); reconciliationTimerSample.stop(reconciliationsTimer); successfulReconciliationsCounter.increment(); } @@ -1041,8 +1047,7 @@ private Future updateStatus(LogContext logContext) { Future statusFuture; if (topic != null) { // Get the existing status and if it's og == g and is has same status then don't update - LOGGER.debug("{}: There is a KafkaTopic to set status on, rv={}, generation={}", - logContext, + LOGGER.debugCr(logContext.toReconciliation(), "There is a KafkaTopic to set status on, rv={}, generation={}", topic.getMetadata().getResourceVersion(), topic.getMetadata().getGeneration()); KafkaTopicStatus kts = new KafkaTopicStatus(); @@ -1063,16 +1068,15 @@ private Future updateStatus(LogContext logContext) { if (!ksDiff.isEmpty()) { Promise promise = Promise.promise(); statusFuture = promise.future(); - k8s.updateResourceStatus(new KafkaTopicBuilder(topic).withStatus(kts).build()).onComplete(ar -> { + k8s.updateResourceStatus(logContext.toReconciliation(), new KafkaTopicBuilder(topic).withStatus(kts).build()).onComplete(ar -> { if (ar.succeeded() && ar.result() != null) { ObjectMeta metadata = ar.result().getMetadata(); - LOGGER.debug("{}: status was set rv={}, generation={}, observedGeneration={}", - logContext, + LOGGER.debugCr(logContext.toReconciliation(), "status was set rv={}, generation={}, observedGeneration={}", metadata.getResourceVersion(), metadata.getGeneration(), ar.result().getStatus().getObservedGeneration()); } else { - LOGGER.error("{}: Error setting resource status", logContext, ar.cause()); + LOGGER.errorCr(logContext.toReconciliation(), "Error setting resource status", ar.cause()); } promise.handle(ar.map((Void) null)); }); @@ -1080,12 +1084,12 @@ private Future updateStatus(LogContext logContext) { statusFuture = Future.succeededFuture(); } } else { - LOGGER.debug("{}: No KafkaTopic to set status", logContext); + LOGGER.debugCr(logContext.toReconciliation(), "No KafkaTopic to set status"); statusFuture = Future.succeededFuture(); } return statusFuture; } catch (Throwable t) { - LOGGER.error("{}", logContext, t); + LOGGER.errorCr(logContext.toReconciliation(), "{}", t); return Future.failedFuture(t); } } @@ -1094,7 +1098,7 @@ private Future updateStatus(LogContext logContext) { /** Called when a resource is isModify in k8s */ Future onResourceEvent(LogContext logContext, KafkaTopic modifiedTopic, Watcher.Action action) { return executeWithTopicLockHeld(logContext, new TopicName(modifiedTopic), - new Reconciliation("onResourceEvent", false) { + new Reconciliation(logContext, "onResourceEvent", false) { @Override public Future execute() { return k8s.getFromName(new ResourceName(modifiedTopic)) @@ -1135,12 +1139,12 @@ private Future reconcileOnResourceChange(Reconciliation reconciliation, Lo return checkForNameChange(topicName, topicResource) .onComplete(nameChanged -> { if (nameChanged.failed()) { - enqueue(new Event(topicResource, + enqueue(logContext, new Event(logContext, topicResource, "Kafka topics cannot be renamed, but KafkaTopic's spec.topicName has changed.", EventType.WARNING, eventResult -> { })); } }) - .compose(i -> CompositeFuture.all(getFromKafka(topicName), getFromTopicStore(topicName)) + .compose(i -> CompositeFuture.all(getFromKafka(logContext.toReconciliation(), topicName), getFromTopicStore(topicName)) .compose(compositeResult -> { Topic kafkaTopic = compositeResult.resultAt(0); Topic privateTopic = compositeResult.resultAt(1); @@ -1151,7 +1155,7 @@ private Future reconcileOnResourceChange(Reconciliation reconciliation, Lo // When processing a Kafka-side deletion then when we delete the KT // We first receive a modify event (setting deletionTimestamp etc) // then the deleted event. We need to ignore the modify event. - LOGGER.debug("Ignoring pre-delete modify event"); + LOGGER.debugCr(logContext.toReconciliation(), "Ignoring pre-delete modify event"); reconciliation.observedTopicFuture(null); return Future.succeededFuture(); } else { @@ -1177,7 +1181,7 @@ public UpdateInTopicStore(LogContext logContext, Topic topic, HasMetadata involv public void handle(Void v) throws OperatorException { topicStore.update(topic).onComplete(ar -> { if (ar.failed()) { - enqueue(new Event(involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); + enqueue(logContext, new Event(logContext, involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); } handler.handle(ar); }); @@ -1191,7 +1195,7 @@ public String toString() { private Future createInTopicStore(LogContext logContext, Topic topic, HasMetadata involvedObject) { Promise result = Promise.promise(); - enqueue(new CreateInTopicStore(logContext, topic, involvedObject, result)); + enqueue(logContext, new CreateInTopicStore(logContext, topic, involvedObject, result)); return result.future(); } @@ -1211,14 +1215,14 @@ private CreateInTopicStore(LogContext logContext, Topic topic, HasMetadata invol @Override public void handle(Void v) throws OperatorException { - LOGGER.debug("Executing {}", this); + LOGGER.debugCr(logContext.toReconciliation(), "Executing {}", this); topicStore.create(topic).onComplete(ar -> { - LOGGER.debug("Completing {}", this); + LOGGER.debugCr(logContext.toReconciliation(), "Completing {}", this); if (ar.failed()) { - LOGGER.debug("{} failed", this); - enqueue(new Event(involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); + LOGGER.debugCr(logContext.toReconciliation(), "{} failed", this); + enqueue(logContext, new Event(logContext, involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); } else { - LOGGER.debug("{} succeeded", this); + LOGGER.debugCr(logContext.toReconciliation(), "{} succeeded", this); } handler.handle(ar); }); @@ -1233,7 +1237,7 @@ public String toString() { private Future deleteFromTopicStore(LogContext logContext, HasMetadata involvedObject, TopicName topicName) { Promise reconciliationResultHandler = Promise.promise(); - enqueue(new DeleteFromTopicStore(logContext, topicName, involvedObject, reconciliationResultHandler)); + enqueue(logContext, new DeleteFromTopicStore(logContext, topicName, involvedObject, reconciliationResultHandler)); return reconciliationResultHandler.future(); } @@ -1255,7 +1259,7 @@ private DeleteFromTopicStore(LogContext logContext, TopicName topicName, HasMeta public void handle(Void v) throws OperatorException { topicStore.delete(topicName).onComplete(ar -> { if (ar.failed()) { - enqueue(new Event(involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); + enqueue(logContext, new Event(logContext, involvedObject, ar.cause().toString(), EventType.WARNING, eventResult -> { })); } handler.handle(ar); }); @@ -1268,7 +1272,7 @@ public String toString() { } public boolean isWorkInflight() { - LOGGER.debug("Outstanding: {}", inflight); + LOGGER.debugOp("Outstanding: {}", inflight); return inflight.size() > 0; } @@ -1307,7 +1311,7 @@ public void setKafkaTopics(List ktList) { } Future reconcileAllTopics(String reconciliationType) { - LOGGER.info("Starting {} reconciliation", reconciliationType); + LOGGER.infoOp("Starting {} reconciliation", reconciliationType); return kafka.listTopics().recover(ex -> Future.failedFuture( new OperatorException("Error listing existing topics during " + reconciliationType + " reconciliation", ex) )).compose(topicNamesFromKafka -> @@ -1329,18 +1333,18 @@ Future reconcileAllTopics(String reconciliationType) { if (Annotations.isReconciliationPausedWithAnnotation(kt)) { pausedTopicCounter.getAndIncrement(); } - LogContext logContext = LogContext.periodic(reconciliationType + "kube " + kt.getMetadata().getName()).withKubeTopic(kt); + LogContext logContext = LogContext.periodic(reconciliationType + "kube " + kt.getMetadata().getName(), kt.getMetadata().getNamespace(), kt.getMetadata().getName()).withKubeTopic(kt); Topic topic = TopicSerialization.fromTopicResource(kt); TopicName topicName = topic.getTopicName(); if (reconcileState.failed.containsKey(topicName)) { // we already failed to reconcile this topic in reconcileFromKafka(), / // don't bother trying again - LOGGER.trace("{}: Already failed to reconcile {}", logContext, topicName); + LOGGER.traceCr(logContext.toReconciliation(), "Already failed to reconcile {}", topicName); reconciliationsCounter.increment(); failedReconciliationsCounter.increment(); } else if (reconcileState.succeeded.contains(topicName)) { // we already succeeded in reconciling this topic in reconcileFromKafka() - LOGGER.trace("{}: Already successfully reconciled {}", logContext, topicName); + LOGGER.traceCr(logContext.toReconciliation(), "Already successfully reconciled {}", topicName); reconciliationsCounter.increment(); successfulReconciliationsCounter.increment(); } else if (reconcileState.undetermined.contains(topicName)) { @@ -1353,7 +1357,7 @@ Future reconcileAllTopics(String reconciliationType) { })); } else { // Topic exists in kube, but not in Kafka - LOGGER.debug("{}: Topic {} exists in Kubernetes, but not Kafka", logContext, topicName, logTopic(kt)); + LOGGER.debugCr(logContext.toReconciliation(), "Topic {} exists in Kubernetes, but not Kafka", topicName, logTopic(kt)); futs.add(reconcileWithKubeTopic(logContext, kt, reconciliationType, new ResourceName(kt), topic.getTopicName()).compose(r -> { // if success then add to success reconcileState.succeeded.add(topicName); @@ -1368,8 +1372,8 @@ Future reconcileAllTopics(String reconciliationType) { } // anything left in undetermined doesn't exist in topic store nor kube for (TopicName tn : reconcileState.undetermined) { - LogContext logContext = LogContext.periodic(reconciliationType + "-" + tn); - futs2.add(executeWithTopicLockHeld(logContext, tn, new Reconciliation("delete-remaining", true) { + LogContext logContext = LogContext.periodic(reconciliationType + "-" + tn, namespace, tn.asKubeName().toString()); + futs2.add(executeWithTopicLockHeld(logContext, tn, new Reconciliation(logContext, "delete-remaining", true) { @Override public Future execute() { observedTopicFuture(null); @@ -1391,14 +1395,14 @@ private Future reconcileFromKafka(String reconciliationType, Lis Set undetermined = new HashSet<>(); Map failed = new HashMap<>(); - LOGGER.debug("Reconciling kafka topics {}", topicsFromKafka); + LOGGER.debugOp("Reconciling kafka topics {}", topicsFromKafka); final ReconcileState state = new ReconcileState(succeeded, undetermined, failed); if (topicsFromKafka.size() > 0) { List> futures = new ArrayList<>(); for (TopicName topicName : topicsFromKafka) { - LogContext logContext = LogContext.periodic(reconciliationType + "kafka " + topicName); - futures.add(executeWithTopicLockHeld(logContext, topicName, new Reconciliation("reconcile-from-kafka", false) { + LogContext logContext = LogContext.periodic(reconciliationType + "kafka " + topicName, namespace, topicName.asKubeName().toString()); + futures.add(executeWithTopicLockHeld(logContext, topicName, new Reconciliation(logContext, "reconcile-from-kafka", false) { @Override public Future execute() { return getFromTopicStore(topicName).recover(error -> { @@ -1408,18 +1412,18 @@ public Future execute() { return Future.succeededFuture(); }).compose(topic -> { if (topic == null) { - LOGGER.debug("{}: No private topic for topic {} in Kafka -> undetermined", logContext, topicName); + LOGGER.debugCr(logContext.toReconciliation(), "No private topic for topic {} in Kafka -> undetermined", topicName); undetermined.add(topicName); return Future.succeededFuture(); } else { - LOGGER.debug("{}: Have private topic for topic {} in Kafka", logContext, topicName); + LOGGER.debugCr(logContext.toReconciliation(), "Have private topic for topic {} in Kafka", topicName); return reconcileWithPrivateTopic(logContext, topicName, topic, this) .map(ignored -> { - LOGGER.debug("{} reconcile success -> succeeded", topicName); + LOGGER.debugCr(logContext.toReconciliation(), "{} reconcile success -> succeeded", topicName); succeeded.add(topicName); return null; }).recover(error -> { - LOGGER.debug("{} reconcile error -> failed", topicName); + LOGGER.debugCr(logContext.toReconciliation(), "{} reconcile error -> failed", topicName); failed.put(topicName, error); return Future.failedFuture(error); }); @@ -1451,8 +1455,7 @@ private Future reconcileWithPrivateTopic(LogContext logContext, TopicName Reconciliation reconciliation) { return k8s.getFromName(privateTopic.getResourceName()) .recover(error -> { - LOGGER.error("{}: Error getting KafkaTopic {} for topic {}", - logContext, + LOGGER.errorCr(logContext.toReconciliation(), "Error getting KafkaTopic {} for topic {}", topicName.asKubeName(), topicName, error); return Future.failedFuture(new OperatorException("Error getting KafkaTopic " + topicName.asKubeName() + " during " + logContext.trigger() + " reconciliation", error)); }) @@ -1471,12 +1474,12 @@ private Future getKafkaAndReconcile(Reconciliation reconciliation, LogCont checkForNameChange(topicName, kafkaTopicResource) .onComplete(nameChanged -> { if (nameChanged.failed()) { - enqueue(new Event(kafkaTopicResource, + enqueue(logContext, new Event(logContext, kafkaTopicResource, "Kafka topics cannot be renamed, but KafkaTopic's spec.topicName has changed.", EventType.WARNING, eventResult -> { })); } }) - .compose(i -> kafka.topicMetadata(topicName)) + .compose(i -> kafka.topicMetadata(logContext.toReconciliation(), topicName)) .compose(kafkaTopicMeta -> { Topic topicFromKafka = TopicSerialization.fromTopicMetadata(kafkaTopicMeta); return reconcile(reconciliation, logContext, kafkaTopicResource, k8sTopic, topicFromKafka, privateTopic); @@ -1484,27 +1487,27 @@ private Future getKafkaAndReconcile(Reconciliation reconciliation, LogCont .onComplete(ar -> { if (ar.failed()) { reconciliation.failed(); - LOGGER.error("Error reconciling KafkaTopic {}", logTopic(kafkaTopicResource), ar.cause()); + LOGGER.errorCr(logContext.toReconciliation(), "Error reconciling KafkaTopic {}", logTopic(kafkaTopicResource), ar.cause()); } else { reconciliation.succeeded(); - LOGGER.info("Success reconciling KafkaTopic {}", logTopic(kafkaTopicResource)); + LOGGER.infoCr(logContext.toReconciliation(), "Success reconciling KafkaTopic {}", logTopic(kafkaTopicResource)); } topicPromise.handle(ar); }); } catch (InvalidTopicException e) { reconciliation.failed(); - LOGGER.error("Error reconciling KafkaTopic {}: Invalid resource: ", logTopic(kafkaTopicResource), e.getMessage()); + LOGGER.errorCr(logContext.toReconciliation(), "Error reconciling KafkaTopic {}: Invalid resource: ", logTopic(kafkaTopicResource), e.getMessage()); topicPromise.fail(e); } catch (OperatorException e) { reconciliation.failed(); - LOGGER.error("Error reconciling KafkaTopic {}", logTopic(kafkaTopicResource), e); + LOGGER.errorCr(logContext.toReconciliation(), "Error reconciling KafkaTopic {}", logTopic(kafkaTopicResource), e); topicPromise.fail(e); } return topicPromise.future(); } - Future getFromKafka(TopicName topicName) { - return kafka.topicMetadata(topicName).map(TopicSerialization::fromTopicMetadata); + Future getFromKafka(io.strimzi.operator.common.Reconciliation reconciliation, TopicName topicName) { + return kafka.topicMetadata(reconciliation, topicName).map(TopicSerialization::fromTopicMetadata); } Future getFromTopicStore(TopicName topicName) { @@ -1513,7 +1516,7 @@ Future getFromTopicStore(TopicName topicName) { private Future reconcileWithKubeTopic(LogContext logContext, HasMetadata involvedObject, String reconciliationType, ResourceName kubeName, TopicName topicName) { - return executeWithTopicLockHeld(logContext, topicName, new Reconciliation("reconcile-with-kube", true) { + return executeWithTopicLockHeld(logContext, topicName, new Reconciliation(logContext, "reconcile-with-kube", true) { @Override public Future execute() { Reconciliation self = this; @@ -1522,7 +1525,7 @@ public Future execute() { observedTopicFuture(kt); return kt; }), - getFromKafka(topicName), + getFromKafka(logContext.toReconciliation(), topicName), getFromTopicStore(topicName)) .compose(compositeResult -> { KafkaTopic ktr = compositeResult.resultAt(0); @@ -1536,5 +1539,9 @@ public Future execute() { }); } + public String getNamespace() { + return this.namespace; + } + } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicStore.java b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicStore.java index d8477c6454..69efbee30e 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/TopicStore.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/TopicStore.java @@ -34,6 +34,7 @@ public static class InvalidStateException extends Exception { * completing the returned future when done. * If no topic with the given name exists, the future will complete with * a null result. + * * @param name The name of the topic. * @return A future which completes with the given topic. */ @@ -44,6 +45,7 @@ public static class InvalidStateException extends Exception { * completing the returned future when done. * If a topic with the given name already exists, the future will complete with an * {@link EntityExistsException}. + * * @param topic The topic. * @return A future which completes when the given topic has been created. */ @@ -54,6 +56,7 @@ public static class InvalidStateException extends Exception { * completing the returned future when done. * If no topic with the given name exists, the future will complete with a * {@link NoSuchEntityExistsException}. + * * @param topic The topic. * @return A future which completes when the given topic has been updated. */ @@ -64,6 +67,7 @@ public static class InvalidStateException extends Exception { * completing the returned future when done. * If no topic with the given name exists, the future will complete with a * {@link NoSuchEntityExistsException}. + * * @param topic The topic. * @return A future which completes when the given topic has been deleted. */ diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/Zk2KafkaStreams.java b/topic-operator/src/main/java/io/strimzi/operator/topic/Zk2KafkaStreams.java index 79ca77343b..7b85667acd 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/Zk2KafkaStreams.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/Zk2KafkaStreams.java @@ -21,7 +21,7 @@ * Migration tool to move ZkTopicStore to KafkaStreamsTopicStore. */ public class Zk2KafkaStreams { - private static final Logger log = LoggerFactory.getLogger(Zk2KafkaStreams.class); + private static final Logger LOGGER = LoggerFactory.getLogger(Zk2KafkaStreams.class); public static CompletionStage upgrade( Zk zk, @@ -31,17 +31,17 @@ public static CompletionStage upgrade( ) { String topicsPath = config.get(Config.TOPICS_PATH); - log.info("Upgrading topic store [{}]: {}", doStop, topicsPath); + LOGGER.info("Upgrading topic store [{}]: {}", doStop, topicsPath); TopicStore zkTopicStore = new TempZkTopicStore(zk, topicsPath); KafkaStreamsTopicStoreService service = new KafkaStreamsTopicStoreService(); return service.start(config, kafkaProperties) .thenCompose(ksTopicStore -> { - log.info("Starting upgrade ..."); + LOGGER.info("Starting upgrade ..."); @SuppressWarnings("rawtypes") List results = new ArrayList<>(); List list = zk.getChildren(topicsPath); - log.info("Topics to upgrade: {}", list); + LOGGER.info("Topics to upgrade: {}", list); list.forEach(topicName -> { TopicName tn = new TopicName(topicName); Future ft = zkTopicStore.read(tn); @@ -65,7 +65,7 @@ public static CompletionStage upgrade( return result; }) .thenRun(() -> { - log.info("Deleting ZK topics path: {}", topicsPath); + LOGGER.info("Deleting ZK topics path: {}", topicsPath); zk.delete(topicsPath, -1); }) .whenCompleteAsync((v, t) -> { @@ -73,7 +73,7 @@ public static CompletionStage upgrade( if (doStop || t != null) { service.stop(); } - log.info("Upgrade complete", t); + LOGGER.info("Upgrade complete", t); }) .thenApply(v -> service); } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicWatcher.java b/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicWatcher.java index 5b94b290fe..0728f7568d 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicWatcher.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicWatcher.java @@ -19,11 +19,11 @@ public class ZkTopicWatcher extends ZkWatcher { @Override protected void notifyOperator(String child) { - LogContext logContext = LogContext.zkWatch(TOPICS_ZNODE, "=" + child); - log.info("{}: Partitions change", logContext); + LogContext logContext = LogContext.zkWatch(TOPICS_ZNODE, "=" + child, topicOperator.getNamespace(), child); + logger.infoCr(logContext.toReconciliation(), "Partitions change"); topicOperator.onTopicPartitionsChanged(logContext, new TopicName(child)).onComplete(ar -> { - log.info("{}: Reconciliation result due to topic partitions change on topic {}: {}", logContext, child, ar); + logger.infoCr(logContext.toReconciliation(), "Reconciliation result due to topic partitions change on topic {}: {}", child, ar); }); } } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicsWatcher.java b/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicsWatcher.java index ad926ad598..e8061ea6bb 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicsWatcher.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/ZkTopicsWatcher.java @@ -4,12 +4,11 @@ */ package io.strimzi.operator.topic; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.topic.zk.Zk; import io.vertx.core.AsyncResult; import io.vertx.core.Future; import io.vertx.core.Handler; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.HashSet; import java.util.List; @@ -22,7 +21,7 @@ */ class ZkTopicsWatcher { - private final static Logger LOGGER = LogManager.getLogger(ZkTopicsWatcher.class); + private final static ReconciliationLogger LOGGER = ReconciliationLogger.create(ZkTopicsWatcher.class); private static final String TOPICS_ZNODE = "/brokers/topics"; @@ -66,11 +65,11 @@ void start(Zk zk) { zk.watchChildren(TOPICS_ZNODE, new ChildrenWatchHandler(zk)).compose(zk2 -> { zk.children(TOPICS_ZNODE, childResult -> { if (childResult.failed()) { - LOGGER.error("Error on znode {} children", TOPICS_ZNODE, childResult.cause()); + LOGGER.errorOp("Error on znode {} children", TOPICS_ZNODE, childResult.cause()); return; } List result = childResult.result(); - LOGGER.debug("Setting initial children {}", result); + LOGGER.debugOp("Setting initial children {}", result); synchronized (this) { this.children = result; } @@ -104,7 +103,7 @@ public void handle(AsyncResult> childResult) { return; } if (childResult.failed()) { - LOGGER.error("Error on znode {} children", TOPICS_ZNODE, childResult.cause()); + LOGGER.errorOp("Error on znode {} children", TOPICS_ZNODE, childResult.cause()); return; } ++watchCount; @@ -112,7 +111,7 @@ public void handle(AsyncResult> childResult) { Set deleted; Set created; synchronized (ZkTopicsWatcher.this) { - LOGGER.debug("{}: znode {} now has children {}, previous children {}", watchCount, TOPICS_ZNODE, result, ZkTopicsWatcher.this.children); + LOGGER.debugOp("{}: znode {} now has children {}, previous children {}", watchCount, TOPICS_ZNODE, result, ZkTopicsWatcher.this.children); List oldChildren = ZkTopicsWatcher.this.children; if (oldChildren == null) { return; @@ -124,33 +123,33 @@ public void handle(AsyncResult> childResult) { ZkTopicsWatcher.this.children = result; } - LOGGER.info("Topics deleted from ZK for watch {}: {}", watchCount, deleted); + LOGGER.infoOp("Topics deleted from ZK for watch {}: {}", watchCount, deleted); if (!deleted.isEmpty()) { for (String topicName : deleted) { tcw.removeChild(topicName); tw.removeChild(topicName); - LogContext logContext = LogContext.zkWatch(TOPICS_ZNODE, watchCount + ":-" + topicName); + LogContext logContext = LogContext.zkWatch(TOPICS_ZNODE, watchCount + ":-" + topicName, topicOperator.getNamespace(), topicName); topicOperator.onTopicDeleted(logContext, new TopicName(topicName)).onComplete(ar -> { if (ar.succeeded()) { - LOGGER.debug("{}: Success responding to deletion of topic {}", logContext, topicName); + LOGGER.debugCr(logContext.toReconciliation(), "Success responding to deletion of topic {}", topicName); } else { - LOGGER.warn("{}: Error responding to deletion of topic {}", logContext, topicName, ar.cause()); + LOGGER.warnCr(logContext.toReconciliation(), "Error responding to deletion of topic {}", topicName, ar.cause()); } }); } } - LOGGER.info("Topics created in ZK for watch {}: {}", watchCount, created); + LOGGER.infoOp("Topics created in ZK for watch {}: {}", watchCount, created); if (!created.isEmpty()) { for (String topicName : created) { tcw.addChild(topicName); tw.addChild(topicName); - LogContext logContext = LogContext.zkWatch(TOPICS_ZNODE, watchCount + ":+" + topicName); + LogContext logContext = LogContext.zkWatch(TOPICS_ZNODE, watchCount + ":+" + topicName, topicOperator.getNamespace(), topicName); topicOperator.onTopicCreated(logContext, new TopicName(topicName)).onComplete(ar -> { if (ar.succeeded()) { - LOGGER.debug("{}: Success responding to creation of topic {}", logContext, topicName); + LOGGER.debugCr(logContext.toReconciliation(), "Success responding to creation of topic {}", topicName); } else { - LOGGER.warn("{}: Error responding to creation of topic {}", logContext, topicName, ar.cause()); + LOGGER.warnCr(logContext.toReconciliation(), "Error responding to creation of topic {}", topicName, ar.cause()); } }); } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/ZkWatcher.java b/topic-operator/src/main/java/io/strimzi/operator/topic/ZkWatcher.java index a3ccd89598..b83d297f91 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/ZkWatcher.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/ZkWatcher.java @@ -4,12 +4,11 @@ */ package io.strimzi.operator.topic; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.topic.zk.Zk; import io.vertx.core.AsyncResult; import io.vertx.core.Future; import io.vertx.core.Handler; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.concurrent.ConcurrentHashMap; @@ -18,7 +17,7 @@ */ public abstract class ZkWatcher { - protected Logger log = LogManager.getLogger(getClass()); + protected ReconciliationLogger logger = ReconciliationLogger.create(getClass()); protected final TopicOperator topicOperator; private volatile ZkWatcherState state = ZkWatcherState.NOT_STARTED; @@ -27,6 +26,8 @@ public abstract class ZkWatcher { private final ConcurrentHashMap children = new ConcurrentHashMap<>(); private final String rootZNode; + protected static final String CONFIGS_ZNODE = "/config/topics"; + /** * Constructor * @@ -69,8 +70,9 @@ protected boolean started() { */ protected void addChild(String child) { this.children.put(child, false); + LogContext logContext = LogContext.zkWatch(CONFIGS_ZNODE, "=" + child, topicOperator.getNamespace(), child); String path = getPath(child); - log.debug("Watching znode {} for changes", path); + logger.debugCr(logContext.toReconciliation(), "Watching znode {} for changes", path); Handler> handler = dataResult -> { if (dataResult.succeeded()) { this.children.compute(child, (k, v) -> { @@ -80,7 +82,7 @@ protected void addChild(String child) { return true; }); } else { - log.error("While getting or watching znode {}", path, dataResult.cause()); + logger.errorCr(logContext.toReconciliation(), "While getting or watching znode {}", path, dataResult.cause()); } }; zk.watchData(path, handler).compose(zk2 -> { @@ -95,7 +97,8 @@ protected void addChild(String child) { * @param child child to unwatch */ protected void removeChild(String child) { - log.debug("Unwatching znode {} for changes", child); + LogContext logContext = LogContext.zkWatch(CONFIGS_ZNODE, "=" + child, topicOperator.getNamespace(), child); + logger.debugCr(logContext.toReconciliation(), "Unwatching znode {} for changes", child); this.children.remove(child); zk.unwatchData(getPath(child)); } diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/zk/AclBuilder.java b/topic-operator/src/main/java/io/strimzi/operator/topic/zk/AclBuilder.java index 6c3a115633..08531771c0 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/zk/AclBuilder.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/zk/AclBuilder.java @@ -85,6 +85,7 @@ private Map getDigests() { /** * Set the given permissions for all users (including unauthenticated users). + * * @param permissions The permissions. * @return This instance. */ @@ -107,6 +108,7 @@ private Map getHosts() { /** * Set the given permissions for users connecting from the most * significant {@code bits} given IP {@code address}. + * * @param address The IP address to add. * @param bits The number of bits in the IP address. * @param permissions The permissions for users connecting from matching IP addresses. diff --git a/topic-operator/src/main/java/io/strimzi/operator/topic/zk/Zk.java b/topic-operator/src/main/java/io/strimzi/operator/topic/zk/Zk.java index 1b8c504fd1..a4e21e8c68 100644 --- a/topic-operator/src/main/java/io/strimzi/operator/topic/zk/Zk.java +++ b/topic-operator/src/main/java/io/strimzi/operator/topic/zk/Zk.java @@ -40,6 +40,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Disconnect from the ZooKeeper server, asynchronously. + * * @param handler The result handler. * @return This instance. */ @@ -48,6 +49,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Asynchronously create the znode at the given path and with the given data and ACL, using the * given createMode, then invoke the given handler with the result. + * * @param path The path. * @param data The data. * @param acls The ACLs. @@ -60,6 +62,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Asynchronously delete the znode at the given path, iff the given version is -1 or matches the version of the znode, * then invoke the given handler with the result. + * * @param path The path. * @param version The version. * @param handler The result handler. @@ -71,6 +74,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, * Asynchronously set the data in the znode at the given path to the * given data iff the given version is -1, or matches the version of the znode, * then invoke the given handler with the result. + * * @param path The path. * @param data The data. * @param version The version. @@ -82,6 +86,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Asynchronously fetch the children of the znode at the given {@code path}, calling the given * handler with the result. + * * @param path The path. * @param handler The result handler. * @return This instance. @@ -94,6 +99,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, * A subsequent call to {@link #children(String, Handler)} with the same path will register the child {@code watcher} * for the given {@code path} current at that time with zookeeper so * that that {@code watcher} is called when the children of the given {@code path} change. + * * @param path The path. * @param watcher The watcher. * @return This instance. @@ -102,6 +108,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Remove the children watcher, if any, for the given {@code path}. + * * @param path The path. * @return This instance. */ @@ -110,6 +117,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Asynchronously fetch the data of the given znode at the given path, calling the given handler * with the result. + * * @param path The path. * @param handler The result handler. * @return This instance. @@ -122,6 +130,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, * A subsequent call to {@link #getData(String, Handler)} with the same path will register the data {@code watcher} * for the given {@code path} current at that time with zookeeper so * that that {@code watcher} is called when the data of the given {@code path} changes. + * * @param path The path. * @param watcher The result handler. * @return This instance @@ -130,6 +139,7 @@ static Zk createSync(Vertx vertx, String zkConnectionString, int sessionTimeout, /** * Remove the data watcher, if any, for the given {@code path}. + * * @param path The path. * @return This instance. */ diff --git a/topic-operator/src/test/java/io/strimzi/operator/topic/KafkaImplTest.java b/topic-operator/src/test/java/io/strimzi/operator/topic/KafkaImplTest.java index d12b444e41..d2c226d41c 100644 --- a/topic-operator/src/test/java/io/strimzi/operator/topic/KafkaImplTest.java +++ b/topic-operator/src/test/java/io/strimzi/operator/topic/KafkaImplTest.java @@ -7,6 +7,7 @@ import java.util.Map; import java.util.Optional; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.VertxExtension; import io.vertx.junit5.VertxTestContext; @@ -175,7 +176,7 @@ public void testTopicMetadataBothNotFound(VertxTestContext testContext) { Either.ofRight(new UnknownTopicOrPartitionException()))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { + impl.topicMetadata(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { assertNull(topicMetadata); testContext.completeNow(); }))); @@ -191,7 +192,7 @@ public void testTopicMetadataDescribeTopicNotFound(VertxTestContext testContext) Either.ofLeft(mock(Config.class)))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { + impl.topicMetadata(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { assertNull(topicMetadata); testContext.completeNow(); }))); @@ -207,7 +208,7 @@ public void testTopicMetadataDescribeConfigsNotFound(VertxTestContext testContex Either.ofRight(new UnknownTopicOrPartitionException()))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { + impl.topicMetadata(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { assertNull(topicMetadata); testContext.completeNow(); }))); @@ -222,7 +223,7 @@ public void testTopicMetadataBothFound(VertxTestContext testContext) { Either.ofLeft(mock(Config.class)))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.topicMetadata(new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { + impl.topicMetadata(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")).onComplete(testContext.succeeding(topicMetadata -> testContext.verify(() -> { assertNotNull(topicMetadata); assertNotNull(topicMetadata.getDescription()); assertNotNull(topicMetadata.getConfig()); @@ -239,7 +240,7 @@ public void testTopicMetadataDescribeTimeout(VertxTestContext testContext) { Either.ofRight(new TimeoutException()))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.topicMetadata(new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> { + impl.topicMetadata(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> { assertTrue(error instanceof TimeoutException); testContext.completeNow(); }))); @@ -252,7 +253,7 @@ public void testDelete(VertxTestContext testContext) { mockDeleteTopics(admin, singletonMap("test", Either.ofLeft(null))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.deleteTopic(new TopicName("test")) + impl.deleteTopic(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")) .onComplete(testContext.succeeding(error -> testContext.verify(testContext::completeNow))); } @@ -264,7 +265,7 @@ public void testDeleteDeleteTimeout(VertxTestContext testContext) { mockDeleteTopics(admin, singletonMap("test", Either.ofRight(new TimeoutException()))); KafkaImpl impl = new KafkaImpl(admin, vertx); - impl.deleteTopic(new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> { + impl.deleteTopic(Reconciliation.DUMMY_RECONCILIATION, new TopicName("test")).onComplete(testContext.failing(error -> testContext.verify(() -> { assertTrue(error instanceof TimeoutException); testContext.completeNow(); }))); diff --git a/topic-operator/src/test/java/io/strimzi/operator/topic/MockK8s.java b/topic-operator/src/test/java/io/strimzi/operator/topic/MockK8s.java index 4bfe8af5c5..45286a6934 100644 --- a/topic-operator/src/test/java/io/strimzi/operator/topic/MockK8s.java +++ b/topic-operator/src/test/java/io/strimzi/operator/topic/MockK8s.java @@ -9,6 +9,7 @@ import io.strimzi.api.kafka.model.KafkaTopicBuilder; import io.strimzi.api.kafka.model.status.KafkaTopicStatus; import io.strimzi.api.kafka.model.status.KafkaTopicStatusBuilder; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.AsyncResult; import io.vertx.core.Future; import io.vertx.core.Promise; @@ -131,7 +132,7 @@ public List getStatuses() { } @Override - public Future updateResourceStatus(KafkaTopic topicResource) { + public Future updateResourceStatus(Reconciliation reconciliation, KafkaTopic topicResource) { statuses.add(new KafkaTopicStatusBuilder(topicResource.getStatus()).build()); Long generation = topicResource.getMetadata().getGeneration(); return Future.succeededFuture(new KafkaTopicBuilder(topicResource) @@ -142,7 +143,7 @@ public Future updateResourceStatus(KafkaTopic topicResource) { } @Override - public Future deleteResource(ResourceName resourceName) { + public Future deleteResource(Reconciliation reconciliation, ResourceName resourceName) { Promise handler = Promise.promise(); AsyncResult response = deleteResponse.apply(resourceName); if (response.succeeded()) { diff --git a/topic-operator/src/test/java/io/strimzi/operator/topic/MockKafka.java b/topic-operator/src/test/java/io/strimzi/operator/topic/MockKafka.java index e0aa9ddf4b..297b7f4757 100644 --- a/topic-operator/src/test/java/io/strimzi/operator/topic/MockKafka.java +++ b/topic-operator/src/test/java/io/strimzi/operator/topic/MockKafka.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.topic; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.junit5.VertxTestContext; import org.apache.kafka.clients.admin.NewTopic; @@ -129,7 +130,7 @@ public MockKafka setTopicExistsResult(Function> topic } @Override - public Future createTopic(Topic t) { + public Future createTopic(Reconciliation reconciliation, Topic t) { NewTopic newTopic = TopicSerialization.toNewTopic(t, null); Future event = createTopicResponse.apply(newTopic.name()); if (event.succeeded()) { @@ -152,7 +153,7 @@ public Future createTopic(Topic t) { } @Override - public Future deleteTopic(TopicName topicName) { + public Future deleteTopic(Reconciliation reconciliation, TopicName topicName) { Future event = deleteTopicResponse.apply(topicName); if (event.succeeded()) { topics.remove(topicName); @@ -161,7 +162,7 @@ public Future deleteTopic(TopicName topicName) { } @Override - public Future topicExists(TopicName topicName) { + public Future topicExists(Reconciliation reconciliation, TopicName topicName) { Future event = topicExistsResult.apply(topicName); if (event == null) { throw new IllegalStateException(); @@ -176,7 +177,7 @@ public MockKafka setUpdateTopicResponse(Function> update } @Override - public Future updateTopicConfig(Topic topic) { + public Future updateTopicConfig(Reconciliation reconciliation, Topic topic) { Future event = updateTopicResponse.apply(topic.getTopicName()); if (event.succeeded()) { Topic t = topics.get(topic.getTopicName()); @@ -190,7 +191,7 @@ public Future updateTopicConfig(Topic topic) { } @Override - public Future increasePartitions(Topic topic) { + public Future increasePartitions(Reconciliation reconciliation, Topic topic) { Future event = updateTopicResponse.apply(topic.getTopicName()); if (event.succeeded()) { Topic t = topics.get(topic.getTopicName()); @@ -204,7 +205,7 @@ public Future increasePartitions(Topic topic) { } @Override - public Future topicMetadata(TopicName topicName) { + public Future topicMetadata(Reconciliation reconciliation, TopicName topicName) { return getTopicNameFutureFunction().apply(topicName); } diff --git a/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorBaseIT.java b/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorBaseIT.java index ea77d2005a..39e18c1740 100644 --- a/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorBaseIT.java +++ b/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorBaseIT.java @@ -332,6 +332,7 @@ protected KafkaTopic createKafkaTopicResource(String topicName) throws Interrupt /** * Create a topic in Kafka with default partitions and replicas from broker settings. + * * @param topicName The name of the topic. * @return The name of the KafkaTopic resource that was created in Kube. * @throws InterruptedException @@ -343,6 +344,7 @@ protected String createTopic(String topicName) throws InterruptedException, Exec /** * Create a topic in Kafka with a single partition and RF=1. + * * @param topicName The name of the topic. * @return The name of the KafkaTopic resource that was created in Kube. * @throws InterruptedException @@ -354,6 +356,7 @@ protected String createTopic(String topicName, int numPartitions, short numRepli /** * Create a topic in Kafka with a single partition and the given replica assignments + * * @param topicName The name of the topic. * @param replicaAssignments The replica assignments. * @return The name of the KafkaTopic resource that was created in Kube. diff --git a/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorMockTest.java b/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorMockTest.java index cf870c4236..25c4946185 100644 --- a/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorMockTest.java +++ b/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorMockTest.java @@ -10,6 +10,7 @@ import io.strimzi.api.kafka.KafkaTopicList; import io.strimzi.api.kafka.model.KafkaTopic; import io.strimzi.api.kafka.model.KafkaTopicBuilder; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.test.mockkube.MockKube; import io.vertx.core.Future; @@ -243,7 +244,7 @@ void testCreatedInKube(VertxTestContext context, KafkaTopic kt) throws Interrupt Topic getFromKafka(VertxTestContext context, String topicName) throws InterruptedException { AtomicReference ref = new AtomicReference<>(); Checkpoint async = context.checkpoint(); - Future kafkaMetadata = session.kafka.topicMetadata(new TopicName(topicName)); + Future kafkaMetadata = session.kafka.topicMetadata(Reconciliation.DUMMY_RECONCILIATION, new TopicName(topicName)); kafkaMetadata.map(metadata -> TopicSerialization.fromTopicMetadata(metadata)).onComplete(fromKafka -> { if (fromKafka.succeeded()) { ref.set(fromKafka.result()); diff --git a/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorTest.java b/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorTest.java index 1d929a4187..774b97036d 100644 --- a/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorTest.java +++ b/topic-operator/src/test/java/io/strimzi/operator/topic/TopicOperatorTest.java @@ -15,6 +15,7 @@ import io.strimzi.operator.common.MaxAttemptsExceededException; import io.strimzi.operator.common.MetricsProvider; import io.strimzi.operator.common.MicrometerMetricsProvider; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.AsyncResult; import io.vertx.core.Future; import io.vertx.core.Vertx; @@ -336,7 +337,7 @@ public void testOnTopicCreated(VertxTestContext context) { mockKafka.setTopicExistsResult(t -> Future.succeededFuture(true)); mockKafka.setTopicMetadataResponse(topicName, topicMetadata, null); mockK8s.setCreateResponse(resourceName, null); - LogContext logContext = LogContext.zkWatch("///", topicName.toString()); + LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString()); Checkpoint async = context.checkpoint(); topicOperator.onTopicCreated(logContext, topicName).onComplete(ar -> { assertSucceeded(context, ar); @@ -392,7 +393,7 @@ public void testOnTopicCreated_retry(VertxTestContext context) { return Future.failedFuture("This should never happen"); }); mockK8s.setCreateResponse(resourceName, null); - LogContext logContext = LogContext.zkWatch("///", topicName.toString()); + LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString()); Checkpoint async = context.checkpoint(); topicOperator.onTopicCreated(logContext, topicName).onComplete(ar -> { assertSucceeded(context, ar); @@ -440,7 +441,7 @@ public void testOnTopicCreated_retryTimeout(VertxTestContext context) { mockKafka.setTopicExistsResult(t -> Future.succeededFuture(true)); mockKafka.setTopicMetadataResponse(topicName, null, null); - LogContext logContext = LogContext.zkWatch("///", topicName.toString()); + LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString()); Checkpoint async = context.checkpoint(); topicOperator.onTopicCreated(logContext, topicName).onComplete(ar -> { assertFailed(context, ar); @@ -474,7 +475,7 @@ public void testOnTopicChanged(VertxTestContext context) { KafkaTopic resource = TopicSerialization.toTopicResource(kubeTopic, labels); mockKafka.setCreateTopicResponse(topicName.toString(), null) - .createTopic(kafkaTopic); + .createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic); mockKafka.setTopicMetadataResponse(topicName, Utils.getTopicMetadata(kafkaTopic), null); //mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture()); @@ -485,7 +486,7 @@ public void testOnTopicChanged(VertxTestContext context) { mockK8s.setCreateResponse(resourceName, null) .createResource(resource); mockK8s.setModifyResponse(resourceName, null); - LogContext logContext = LogContext.zkWatch("///", topicName.toString()); + LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString()); Checkpoint async = context.checkpoint(3); topicOperator.onTopicConfigChanged(logContext, topicName).onComplete(ar -> { assertSucceeded(context, ar); @@ -548,7 +549,7 @@ public void testReconcile_withResource_noKafka_noPrivate(VertxTestContext contex CountDownLatch latch = new CountDownLatch(1); Checkpoint async = context.checkpoint(1); - topicOperator.reconcile(reconciliation(), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockKafka.assertExists(context, kubeTopic.getTopicName()); mockTopicStore.assertExists(context, kubeTopic.getTopicName()); @@ -587,7 +588,7 @@ public void testReconcile_withResource_noKafka_withPrivate(VertxTestContext cont Checkpoint async = context.checkpoint(); - topicOperator.reconcile(reconciliation(), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockKafka.assertNotExists(context, kubeTopic.getTopicName()); mockTopicStore.assertNotExists(context, kubeTopic.getTopicName()); @@ -611,11 +612,11 @@ public void testReconcile_noResource_withKafka_noPrivate(VertxTestContext contex mockTopicStore.setCreateTopicResponse(topicName, null); mockK8s.setCreateResponse(topicName.asKubeName(), null); mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture()); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); async0.await(); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); CountDownLatch async = new CountDownLatch(2); - topicOperator.reconcile(reconciliation(), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockTopicStore.assertExists(context, topicName); mockK8s.assertExists(context, topicName.asKubeName()); @@ -652,8 +653,8 @@ public void testReconcile_noResource_withKafka_noPrivate(VertxTestContext contex }); } - TopicOperator.Reconciliation reconciliation() { - return topicOperator.new Reconciliation("test", true) { + TopicOperator.Reconciliation reconciliation(LogContext logContext) { + return topicOperator.new Reconciliation(logContext, "test", true) { @Override public Future execute() { return Future.succeededFuture(); @@ -672,15 +673,15 @@ public void testReconcile_noResource_withKafka_withPrivate(VertxTestContext cont Topic privateTopic = kafkaTopic; CountDownLatch async0 = new CountDownLatch(2); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); mockKafka.setDeleteTopicResponse(topicName, null); mockTopicStore.setCreateTopicResponse(topicName, null); mockTopicStore.create(kafkaTopic).onComplete(ar -> async0.countDown()); mockTopicStore.setDeleteTopicResponse(topicName, null); async0.await(); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); Checkpoint async = context.checkpoint(); - topicOperator.reconcile(reconciliation(), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockTopicStore.assertNotExists(context, topicName); mockK8s.assertNotExists(context, topicName.asKubeName()); @@ -702,16 +703,16 @@ public void testReconcile_withResource_withKafka_noPrivate_matching(VertxTestCon CountDownLatch async0 = new CountDownLatch(2); mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture()); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); mockK8s.setCreateResponse(topicName.asKubeName(), null); KafkaTopic topicResource = TopicSerialization.toTopicResource(kubeTopic, labels); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); mockK8s.createResource(topicResource).onComplete(ar -> async0.countDown()); mockTopicStore.setCreateTopicResponse(topicName, null); async0.await(); Checkpoint async = context.checkpoint(); - topicOperator.reconcile(reconciliation(), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockTopicStore.assertExists(context, topicName); mockK8s.assertExists(context, topicName.asKubeName()); @@ -757,16 +758,16 @@ public void testReconcile_withResource_withKafka_noPrivate_overriddenName(VertxT CountDownLatch async0 = new CountDownLatch(2); mockKafka.setCreateTopicResponse(topicName_ -> Future.succeededFuture()); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); mockK8s.setCreateResponse(kubeName, null); KafkaTopic topicResource = TopicSerialization.toTopicResource(kubeTopic, labels); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); mockK8s.createResource(topicResource).onComplete(ar -> async0.countDown()); mockTopicStore.setCreateTopicResponse(topicName, null); async0.await(); Checkpoint async = context.checkpoint(); - topicOperator.reconcile(reconciliation(), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, null, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockTopicStore.assertExists(context, topicName); mockK8s.assertExists(context, kubeName); @@ -806,11 +807,11 @@ public void testReconcile_withResource_withKafka_noPrivate_configsReconcilable(V CountDownLatch async0 = new CountDownLatch(2); mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture()); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture()); KafkaTopic topic = TopicSerialization.toTopicResource(kubeTopic, labels); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); mockK8s.setCreateResponse(topicName.asKubeName(), null); mockK8s.createResource(topic).onComplete(ar -> async0.countDown()); mockK8s.setModifyResponse(topicName.asKubeName(), null); @@ -818,7 +819,7 @@ public void testReconcile_withResource_withKafka_noPrivate_configsReconcilable(V async0.await(); CountDownLatch async = new CountDownLatch(2); - topicOperator.reconcile(reconciliation(), logContext, topic, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, topic, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockTopicStore.assertExists(context, topicName); mockK8s.assertExists(context, topicName.asKubeName()); @@ -865,10 +866,10 @@ public void testReconcile_withResource_withKafka_noPrivate_irreconcilable(VertxT CountDownLatch async0 = new CountDownLatch(2); mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture()); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); KafkaTopic topic = TopicSerialization.toTopicResource(kubeTopic, labels); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); mockK8s.setCreateResponse(topicName.asKubeName(), null); mockK8s.createResource(topic).onComplete(ar -> async0.countDown()); mockK8s.setModifyResponse(topicName.asKubeName(), null); @@ -876,7 +877,7 @@ public void testReconcile_withResource_withKafka_noPrivate_irreconcilable(VertxT async0.await(); CountDownLatch async = new CountDownLatch(2); - topicOperator.reconcile(reconciliation(), logContext, topic, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, topic, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockK8s.assertContainsEvent(context, e -> e.getMessage().contains("KafkaTopic is incompatible with the topic metadata. " + @@ -917,11 +918,11 @@ public void testReconcile_withResource_withKafka_withPrivate_3WayMerge(VertxTest CountDownLatch async0 = new CountDownLatch(3); mockKafka.setCreateTopicResponse(topicName -> Future.succeededFuture()); - mockKafka.createTopic(kafkaTopic).onComplete(ar -> async0.countDown()); + mockKafka.createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic).onComplete(ar -> async0.countDown()); mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture()); KafkaTopic resource = TopicSerialization.toTopicResource(kubeTopic, labels); - LogContext logContext = LogContext.periodic(topicName.toString()); + LogContext logContext = LogContext.periodic(topicName.toString(), topicOperator.getNamespace(), topicName.toString()); mockK8s.setCreateResponse(topicName.asKubeName(), null); mockK8s.createResource(resource).onComplete(ar -> async0.countDown()); mockK8s.setModifyResponse(topicName.asKubeName(), null); @@ -930,7 +931,7 @@ public void testReconcile_withResource_withKafka_withPrivate_3WayMerge(VertxTest async0.await(); CountDownLatch async = new CountDownLatch(3); - topicOperator.reconcile(reconciliation(), logContext, resource, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { + topicOperator.reconcile(reconciliation(logContext), logContext, resource, kubeTopic, kafkaTopic, privateTopic).onComplete(reconcileResult -> { assertSucceeded(context, reconcileResult); mockK8s.assertNoEvents(context); mockTopicStore.read(topicName).onComplete(readResult -> { @@ -969,7 +970,7 @@ private void resourceRemoved(VertxTestContext context, CountDownLatch latch, Exc Topic privateTopic = kubeTopic; mockKafka.setCreateTopicResponse(topicName.toString(), null) - .createTopic(kafkaTopic); + .createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic); mockKafka.setTopicMetadataResponse(topicName, Utils.getTopicMetadata(kubeTopic), null); mockKafka.setDeleteTopicResponse(topicName, deleteTopicException); @@ -1025,10 +1026,10 @@ public void testOnKafkaTopicChanged(VertxTestContext context) { Topic kafkaTopic = new Topic.Builder(topicName, resourceName, 10, (short) 2, map("cleanup.policy", "bar"), null).build(); Topic privateTopic = kafkaTopic; KafkaTopic resource = TopicSerialization.toTopicResource(kubeTopic, labels); - LogContext logContext = LogContext.zkWatch("///", topicName.toString()); + LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString()); mockKafka.setCreateTopicResponse(topicName.toString(), null) - .createTopic(kafkaTopic); + .createTopic(Reconciliation.DUMMY_RECONCILIATION, kafkaTopic); mockKafka.setTopicMetadataResponse(topicName, Utils.getTopicMetadata(kafkaTopic), null); mockKafka.setUpdateTopicResponse(topicName -> Future.succeededFuture()); @@ -1126,7 +1127,7 @@ private void topicDeleted(VertxTestContext context, Exception storeException, Ex mockKafka.setTopicExistsResult(t -> Future.succeededFuture(topicExists)); - LogContext logContext = LogContext.zkWatch("///", topicName.toString()); + LogContext logContext = LogContext.zkWatch("///", topicName.toString(), topicOperator.getNamespace(), topicName.toString()); Checkpoint async = context.checkpoint(); topicOperator.onTopicDeleted(logContext, topicName).onComplete(ar -> { if (k8sException != null diff --git a/user-operator/pom.xml b/user-operator/pom.xml index 121b3d79f3..b9137cd5d5 100644 --- a/user-operator/pom.xml +++ b/user-operator/pom.xml @@ -43,11 +43,11 @@ org.apache.logging.log4j - log4j-api + log4j-core org.apache.logging.log4j - log4j-core + log4j-api org.apache.logging.log4j diff --git a/user-operator/src/main/java/io/strimzi/operator/user/Main.java b/user-operator/src/main/java/io/strimzi/operator/user/Main.java index 57fc411d2a..0fd5d510f0 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/Main.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/Main.java @@ -38,19 +38,19 @@ @SuppressFBWarnings("DM_EXIT") @SuppressWarnings("deprecation") public class Main { - private static final Logger log = LogManager.getLogger(Main.class.getName()); + private final static Logger LOGGER = LogManager.getLogger(Main.class); static { try { Crds.registerCustomKinds(); } catch (Error | RuntimeException t) { - log.error("Failed to register CRDs", t); + LOGGER.error("Failed to register CRDs", t); throw t; } } public static void main(String[] args) { - log.info("UserOperator {} is starting", Main.class.getPackage().getImplementationVersion()); + LOGGER.info("UserOperator {} is starting", Main.class.getPackage().getImplementationVersion()); UserOperatorConfig config = UserOperatorConfig.fromMap(System.getenv()); //Setup Micrometer metrics options VertxOptions options = new VertxOptions().setMetricsOptions( @@ -65,7 +65,7 @@ public static void main(String[] args) { run(vertx, client, adminClientProvider, config).onComplete(ar -> { if (ar.failed()) { - log.error("Unable to start operator", ar.cause()); + LOGGER.error("Unable to start operator", ar.cause()); System.exit(1); } }); @@ -100,9 +100,9 @@ static Future run(Vertx vertx, KubernetesClient client, AdminClientProvi vertx.deployVerticle(operator, res -> { if (res.succeeded()) { - log.info("User Operator verticle started in namespace {}", config.getNamespace()); + LOGGER.info("User Operator verticle started in namespace {}", config.getNamespace()); } else { - log.error("User Operator verticle in namespace {} failed to start", config.getNamespace(), res.cause()); + LOGGER.error("User Operator verticle in namespace {} failed to start", config.getNamespace(), res.cause()); System.exit(1); } promise.handle(res); diff --git a/user-operator/src/main/java/io/strimzi/operator/user/UserOperator.java b/user-operator/src/main/java/io/strimzi/operator/user/UserOperator.java index eac9d1c065..cf1f718c4a 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/UserOperator.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/UserOperator.java @@ -11,19 +11,19 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.http.HttpServer; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.concurrent.TimeUnit; import io.micrometer.prometheus.PrometheusMeterRegistry; import io.vertx.micrometer.backends.BackendRegistries; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; /** * An "operator" for managing assemblies of various types in a particular namespace. */ public class UserOperator extends AbstractVerticle { - private static final Logger log = LogManager.getLogger(UserOperator.class.getName()); + private static final Logger LOGGER = LogManager.getLogger(UserOperator.class.getName()); private static final int HEALTH_SERVER_PORT = 8081; @@ -41,7 +41,7 @@ public UserOperator(String namespace, UserOperatorConfig config, KubernetesClient client, KafkaUserOperator kafkaUserOperator) { - log.info("Creating UserOperator for namespace {}", namespace); + LOGGER.info("Creating UserOperator for namespace {}", namespace); this.namespace = namespace; this.reconciliationInterval = config.getReconciliationIntervalMs(); this.client = client; @@ -51,19 +51,19 @@ public UserOperator(String namespace, @Override public void start(Promise start) { - log.info("Starting UserOperator for namespace {}", namespace); + LOGGER.info("Starting UserOperator for namespace {}", namespace); // Configure the executor here, but it is used only in other places getVertx().createSharedWorkerExecutor("kubernetes-ops-pool", 10, TimeUnit.SECONDS.toNanos(120)); kafkaUserOperator.createWatch(namespace, kafkaUserOperator.recreateWatch(namespace)) .compose(w -> { - log.info("Started operator for {} kind", "KafkaUser"); + LOGGER.info("Started operator for {} kind", "KafkaUser"); watch = w; - log.info("Setting up periodic reconciliation for namespace {}", namespace); + LOGGER.info("Setting up periodic reconciliation for namespace {}", namespace); this.reconcileTimer = vertx.setPeriodic(this.reconciliationInterval, res2 -> { - log.info("Triggering periodic reconciliation for namespace {}...", namespace); + LOGGER.info("Triggering periodic reconciliation for namespace {}...", namespace); reconcileAll("timer"); }); @@ -74,7 +74,7 @@ public void start(Promise start) { @Override public void stop(Promise stop) { - log.info("Stopping UserOperator for namespace {}", namespace); + LOGGER.info("Stopping UserOperator for namespace {}", namespace); vertx.cancelTimer(reconcileTimer); if (watch != null) { @@ -109,9 +109,9 @@ private Future startHealthServer() { }) .listen(HEALTH_SERVER_PORT, ar -> { if (ar.succeeded()) { - log.info("UserOperator is now ready (health server listening on {})", HEALTH_SERVER_PORT); + LOGGER.info("UserOperator is now ready (health server listening on {})", HEALTH_SERVER_PORT); } else { - log.error("Unable to bind health server on {}", HEALTH_SERVER_PORT, ar.cause()); + LOGGER.error("Unable to bind health server on {}", HEALTH_SERVER_PORT, ar.cause()); } result.handle(ar); }); diff --git a/user-operator/src/main/java/io/strimzi/operator/user/model/KafkaUserModel.java b/user-operator/src/main/java/io/strimzi/operator/user/model/KafkaUserModel.java index ff7ddc2f61..26086a0fe9 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/model/KafkaUserModel.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/model/KafkaUserModel.java @@ -21,13 +21,13 @@ import io.strimzi.certs.OpenSslCertManager; import io.strimzi.operator.cluster.model.ClientsCa; import io.strimzi.operator.cluster.model.InvalidResourceException; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.Util; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.user.UserOperatorConfig; import io.strimzi.operator.user.model.acl.SimpleAclRule; import io.strimzi.operator.common.PasswordGenerator; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import javax.naming.InvalidNameException; import javax.naming.ldap.LdapName; @@ -42,7 +42,7 @@ import java.util.stream.Collectors; public class KafkaUserModel { - private static final Logger log = LogManager.getLogger(KafkaUserModel.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaUserModel.class.getName()); public static final String KEY_PASSWORD = "password"; public static final String KEY_SASL_JAAS_CONFIG = "sasl.jaas.config"; @@ -93,6 +93,7 @@ protected KafkaUserModel(String namespace, String name, Labels labels, String se /** * Creates instance of KafkaUserModel from CRD definition. * + * @param reconciliation The reconciliation * @param certManager CertManager instance for work with certificates. * @param passwordGenerator A password generator. * @param kafkaUser The Custom Resource based on which the model should be created. @@ -102,7 +103,8 @@ protected KafkaUserModel(String namespace, String name, Labels labels, String se * @param secretPrefix The prefix used to add to the name of the Secret generated from the KafkaUser resource. * @return The user model. */ - public static KafkaUserModel fromCrd(CertManager certManager, + public static KafkaUserModel fromCrd(Reconciliation reconciliation, + CertManager certManager, PasswordGenerator passwordGenerator, KafkaUser kafkaUser, Secret clientsCaCert, @@ -120,10 +122,10 @@ public static KafkaUserModel fromCrd(CertManager certManager, throw new InvalidResourceException("Users with TLS client authentication can have a username (name of the KafkaUser custom resource) only up to 64 characters long."); } - result.maybeGenerateCertificates(certManager, passwordGenerator, clientsCaCert, clientsCaKey, userSecret, + result.maybeGenerateCertificates(reconciliation, certManager, passwordGenerator, clientsCaCert, clientsCaKey, userSecret, UserOperatorConfig.getClientsCaValidityDays(), UserOperatorConfig.getClientsCaRenewalDays()); } else if (kafkaUser.getSpec().getAuthentication() instanceof KafkaUserScramSha512ClientAuthentication) { - result.maybeGeneratePassword(passwordGenerator, userSecret); + result.maybeGeneratePassword(reconciliation, passwordGenerator, userSecret); } if (kafkaUser.getSpec().getAuthorization() != null && kafkaUser.getSpec().getAuthorization().getType().equals(KafkaUserAuthorizationSimple.TYPE_SIMPLE)) { @@ -170,6 +172,7 @@ public Secret generateSecret() { /** * Manage certificates generation based on those already present in the Secrets * + * @param reconciliation The reconciliation * @param certManager CertManager instance for handling certificates creation * @param passwordGenerator PasswordGenerator instance for generating passwords * @param clientsCaCertSecret The clients CA certificate Secret. @@ -179,7 +182,7 @@ public Secret generateSecret() { * @param renewalDays The renewal days. */ @SuppressWarnings("checkstyle:BooleanExpressionComplexity") - public void maybeGenerateCertificates(CertManager certManager, PasswordGenerator passwordGenerator, + public void maybeGenerateCertificates(Reconciliation reconciliation, CertManager certManager, PasswordGenerator passwordGenerator, Secret clientsCaCertSecret, Secret clientsCaKeySecret, Secret userSecret, int validityDays, int renewalDays) { if (clientsCaCertSecret == null) { @@ -187,15 +190,15 @@ public void maybeGenerateCertificates(CertManager certManager, PasswordGenerator } else if (clientsCaKeySecret == null) { throw new NoCertificateSecretException("The Clients CA Key Secret is missing"); } else { - ClientsCa clientsCa = new ClientsCa(certManager, passwordGenerator, + ClientsCa clientsCa = new ClientsCa(reconciliation, certManager, + passwordGenerator, clientsCaCertSecret.getMetadata().getName(), clientsCaCertSecret, clientsCaCertSecret.getMetadata().getName(), clientsCaKeySecret, validityDays, renewalDays, - false, - null); + false, null); this.caCert = clientsCa.currentCaCertBase64(); if (userSecret != null) { // Secret already exists -> lets verify if it has keys from the same CA @@ -231,7 +234,7 @@ public void maybeGenerateCertificates(CertManager certManager, PasswordGenerator decodeFromSecret(userSecret, "user.key"), decodeFromSecret(userSecret, "user.crt")); } catch (IOException e) { - log.error("Error generating the keystore for user {}", name, e); + LOGGER.errorCr(reconciliation, "Error generating the keystore for user {}", name, e); } } return; @@ -241,17 +244,18 @@ public void maybeGenerateCertificates(CertManager certManager, PasswordGenerator try { this.userCertAndKey = clientsCa.generateSignedCert(name); } catch (IOException e) { - log.error("Error generating signed certificate for user {}", name, e); + LOGGER.errorCr(reconciliation, "Error generating signed certificate for user {}", name, e); } } } /** + * @param reconciliation The reconciliation. * @param generator The password generator. * @param userSecret The Secret containing any existing password. */ - public void maybeGeneratePassword(PasswordGenerator generator, Secret userSecret) { + public void maybeGeneratePassword(Reconciliation reconciliation, PasswordGenerator generator, Secret userSecret) { if (userSecret != null) { // Secret already exists -> lets verify if it has a password String password = userSecret.getData().get(KEY_PASSWORD); @@ -260,7 +264,7 @@ public void maybeGeneratePassword(PasswordGenerator generator, Secret userSecret return; } } - log.debug("Generating user password"); + LOGGER.debugCr(reconciliation, "Generating user password"); this.scramSha512Password = generator.generate(); } @@ -278,6 +282,7 @@ protected byte[] decodeFromSecret(Secret secret, String key) { /** * Creates secret with the data + * * @param data Map with the Secret content * @return The secret. */ diff --git a/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserOperator.java b/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserOperator.java index 13d5370e31..0f81627e6c 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserOperator.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserOperator.java @@ -13,6 +13,7 @@ import io.strimzi.api.kafka.model.status.KafkaUserStatus; import io.strimzi.certs.CertManager; import io.strimzi.operator.common.AbstractOperator; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.MicrometerMetricsProvider; import io.strimzi.operator.common.PasswordGenerator; import io.strimzi.operator.common.Reconciliation; @@ -29,8 +30,6 @@ import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.nio.charset.StandardCharsets; import java.util.Base64; @@ -45,7 +44,7 @@ */ public class KafkaUserOperator extends AbstractOperator> { - private static final Logger log = LogManager.getLogger(KafkaUserOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaUserOperator.class.getName()); private final SecretOperator secretOperations; private final SimpleAclOperator aclOperations; @@ -96,7 +95,7 @@ public KafkaUserOperator(Vertx vertx, public Future> allResourceNames(String namespace) { return CompositeFuture.join(super.allResourceNames(namespace), invokeAsync(aclOperations::getUsersWithAcls), - invokeAsync(scramShaCredentialOperator::list)).map(compositeFuture -> { + invokeAsync(() -> scramShaCredentialOperator.list())).map(compositeFuture -> { Set names = compositeFuture.resultAt(0); names.addAll(toResourceRef(namespace, compositeFuture.resultAt(1))); names.addAll(toResourceRef(namespace, compositeFuture.resultAt(2))); @@ -128,6 +127,7 @@ private Future invokeAsync(Supplier getter) { * Creates or updates the user. The implementation * should not assume that any resources are in any particular state (e.g. that the absence on * one resource means that all resources need to be created). + * * @param reconciliation Unique identification for the reconciliation * @param resource KafkaUser resources with the desired user configuration. * @return a Future @@ -144,13 +144,14 @@ protected Future createOrUpdate(Reconciliation reconciliation, KafkaUserModel user; try { - user = KafkaUserModel.fromCrd(certManager, passwordGenerator, resource, clientsCaCert, clientsCaKey, userSecret, secretPrefix); + user = KafkaUserModel.fromCrd(reconciliation, certManager, passwordGenerator, resource, clientsCaCert, clientsCaKey, userSecret, secretPrefix); } catch (Exception e) { + LOGGER.warnCr(reconciliation, e); StatusUtils.setStatusConditionAndObservedGeneration(resource, userStatus, Future.failedFuture(e)); return Future.failedFuture(new ReconciliationException(userStatus, e)); } - log.debug("{}: Updating User {} in namespace {}", reconciliation, userName, namespace); + LOGGER.debugCr(reconciliation, "Updating User {} in namespace {}", userName, namespace); Secret desired = user.generateSecret(); String password = null; @@ -180,12 +181,12 @@ protected Future createOrUpdate(Reconciliation reconciliation, // Reconciliation of Quotas and of SCRAM-SHA credentials changes the same fields and cannot be done in parallel // because they would overwrite each other's data! CompositeFuture.join( - scramShaCredentialOperator.reconcile(user.getName(), password) - .compose(ignore -> CompositeFuture.join(kafkaUserQuotasOperator.reconcile(KafkaUserModel.getTlsUserName(userName), finalTlsQuotas), - kafkaUserQuotasOperator.reconcile(KafkaUserModel.getScramUserName(userName), finalScramOrNoneQuotas))), - reconcileSecretAndSetStatus(namespace, user, desired, userStatus), - aclOperations.reconcile(KafkaUserModel.getTlsUserName(userName), tlsAcls), - aclOperations.reconcile(KafkaUserModel.getScramUserName(userName), scramOrNoneAcls)) + scramShaCredentialOperator.reconcile(reconciliation, user.getName(), password) + .compose(ignore -> CompositeFuture.join(kafkaUserQuotasOperator.reconcile(reconciliation, KafkaUserModel.getTlsUserName(userName), finalTlsQuotas), + kafkaUserQuotasOperator.reconcile(reconciliation, KafkaUserModel.getScramUserName(userName), finalScramOrNoneQuotas))), + reconcileSecretAndSetStatus(reconciliation, namespace, user, desired, userStatus), + aclOperations.reconcile(reconciliation, KafkaUserModel.getTlsUserName(userName), tlsAcls), + aclOperations.reconcile(reconciliation, KafkaUserModel.getScramUserName(userName), scramOrNoneAcls)) .onComplete(reconciliationResult -> { StatusUtils.setStatusConditionAndObservedGeneration(resource, userStatus, reconciliationResult.mapEmpty()); userStatus.setUsername(user.getUserName()); @@ -200,8 +201,8 @@ protected Future createOrUpdate(Reconciliation reconciliation, return handler.future(); } - protected Future> reconcileSecretAndSetStatus(String namespace, KafkaUserModel user, Secret desired, KafkaUserStatus userStatus) { - return secretOperations.reconcile(namespace, user.getSecretName(), desired).compose(ar -> { + protected Future> reconcileSecretAndSetStatus(Reconciliation reconciliation, String namespace, KafkaUserModel user, Secret desired, KafkaUserStatus userStatus) { + return secretOperations.reconcile(reconciliation, namespace, user.getSecretName(), desired).compose(ar -> { if (desired != null) { userStatus.setSecret(desired.getMetadata().getName()); } @@ -218,13 +219,13 @@ protected Future> reconcileSecretAndSetStatus(String nam protected Future delete(Reconciliation reconciliation) { String namespace = reconciliation.namespace(); String user = reconciliation.name(); - log.debug("{}: Deleting User {} from namespace {}", reconciliation, user, namespace); - return CompositeFuture.join(secretOperations.reconcile(namespace, KafkaUserModel.getSecretName(secretPrefix, user), null), - aclOperations.reconcile(KafkaUserModel.getTlsUserName(user), null), - aclOperations.reconcile(KafkaUserModel.getScramUserName(user), null), - scramShaCredentialOperator.reconcile(KafkaUserModel.getScramUserName(user), null) - .compose(ignore -> kafkaUserQuotasOperator.reconcile(KafkaUserModel.getTlsUserName(user), null)) - .compose(ignore -> kafkaUserQuotasOperator.reconcile(KafkaUserModel.getScramUserName(user), null))) + LOGGER.debugCr(reconciliation, "Deleting User {} from namespace {}", user, namespace); + return CompositeFuture.join(secretOperations.reconcile(reconciliation, namespace, KafkaUserModel.getSecretName(secretPrefix, user), null), + aclOperations.reconcile(reconciliation, KafkaUserModel.getTlsUserName(user), null), + aclOperations.reconcile(reconciliation, KafkaUserModel.getScramUserName(user), null), + scramShaCredentialOperator.reconcile(reconciliation, KafkaUserModel.getScramUserName(user), null) + .compose(ignore -> kafkaUserQuotasOperator.reconcile(reconciliation, KafkaUserModel.getTlsUserName(user), null)) + .compose(ignore -> kafkaUserQuotasOperator.reconcile(reconciliation, KafkaUserModel.getScramUserName(user), null))) .map(Boolean.TRUE); } diff --git a/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserQuotasOperator.java b/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserQuotasOperator.java index e43120c329..495b1dcafd 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserQuotasOperator.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/operator/KafkaUserQuotasOperator.java @@ -5,6 +5,8 @@ package io.strimzi.operator.user.operator; import io.strimzi.api.kafka.model.KafkaUserQuotas; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.operator.resource.ReconcileResult; import io.vertx.core.Future; import io.vertx.core.Promise; @@ -14,8 +16,6 @@ import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.quota.ClientQuotaFilterComponent; import org.apache.kafka.common.quota.ClientQuotaEntity; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.Collections; import java.util.HashSet; @@ -24,7 +24,7 @@ import java.util.Set; public class KafkaUserQuotasOperator { - private static final Logger log = LogManager.getLogger(KafkaUserQuotasOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaUserQuotasOperator.class.getName()); private final Vertx vertx; private final Admin adminClient; @@ -34,19 +34,19 @@ public KafkaUserQuotasOperator(Vertx vertx, Admin adminClient) { this.adminClient = adminClient; } - Future> reconcile(String username, KafkaUserQuotas quotas) { + Future> reconcile(Reconciliation reconciliation, String username, KafkaUserQuotas quotas) { Promise> prom = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { try { - boolean exists = exists(username); + boolean exists = exists(reconciliation, username); if (quotas != null) { - createOrUpdate(username, quotas); + createOrUpdate(reconciliation, username, quotas); future.complete(exists ? ReconcileResult.created(quotas) : ReconcileResult.patched(quotas)); } else { if (exists) { - delete(username); + delete(reconciliation, username); future.complete(ReconcileResult.deleted()); } else { future.complete(ReconcileResult.noop(null)); @@ -65,69 +65,72 @@ Future> reconcile(String username, KafkaUserQuo /** * Create or update the quotas for the given user. * + * @param reconciliation The reconciliation * @param username The name of the user which should be created or updated * @param quotas The desired user quotas * @throws Exception when altering quotas fails */ - public void createOrUpdate(String username, KafkaUserQuotas quotas) throws Exception { - KafkaUserQuotas current = describeUserQuotas(username); + public void createOrUpdate(Reconciliation reconciliation, String username, KafkaUserQuotas quotas) throws Exception { + KafkaUserQuotas current = describeUserQuotas(reconciliation, username); if (current != null) { - log.debug("Checking quota updates for user {}", username); + LOGGER.debugCr(reconciliation, "Checking quota updates for user {}", username); if (!quotasEquals(current, quotas)) { - log.debug("Updating quotas for user {}", username); - alterUserQuotas(username, toClientQuotaAlterationOps(quotas)); + LOGGER.debugCr(reconciliation, "Updating quotas for user {}", username); + alterUserQuotas(reconciliation, username, toClientQuotaAlterationOps(quotas)); } else { - log.debug("Nothing to update in quotas for user {}", username); + LOGGER.debugCr(reconciliation, "Nothing to update in quotas for user {}", username); } } else { - log.debug("Creating quotas for user {}", username); - alterUserQuotas(username, toClientQuotaAlterationOps(quotas)); + LOGGER.debugCr(reconciliation, "Creating quotas for user {}", username); + alterUserQuotas(reconciliation, username, toClientQuotaAlterationOps(quotas)); } } /** * Determine whether the given user has quotas. * + * @param reconciliation The reconciliation * @param username Name of the user * * @return True if the user exists */ - boolean exists(String username) throws Exception { - return describeUserQuotas(username) != null; + boolean exists(Reconciliation reconciliation, String username) throws Exception { + return describeUserQuotas(reconciliation, username) != null; } /** * Delete the quotas for the given user. * It is not an error if the user doesn't exist, or doesn't currently have any quotas. * + * @param reconciliation The reconciliation * @param username Name of the user * @throws Exception when altering quotas fails */ - public void delete(String username) throws Exception { - KafkaUserQuotas current = describeUserQuotas(username); + public void delete(Reconciliation reconciliation, String username) throws Exception { + KafkaUserQuotas current = describeUserQuotas(reconciliation, username); if (current != null) { - log.debug("Deleting quotas for user {}", username); + LOGGER.debugCr(reconciliation, "Deleting quotas for user {}", username); current.setProducerByteRate(null); current.setConsumerByteRate(null); current.setRequestPercentage(null); - alterUserQuotas(username, toClientQuotaAlterationOps(current)); + alterUserQuotas(reconciliation, username, toClientQuotaAlterationOps(current)); } else { - log.warn("Quotas for user {} already don't exist", username); + LOGGER.warnCr(reconciliation, "Quotas for user {} already don't exist", username); } } - protected void alterUserQuotas(String username, Set ops) throws Exception { + protected void alterUserQuotas(Reconciliation reconciliation, String username, Set ops) throws Exception { ClientQuotaEntity cqe = new ClientQuotaEntity(Collections.singletonMap(ClientQuotaEntity.USER, username)); ClientQuotaAlteration cqa = new ClientQuotaAlteration(cqe, ops); try { adminClient.alterClientQuotas(Collections.singleton(cqa)).all().get(); } catch (Exception e) { - log.error("Creating/Altering quotas for user {} failed", username, e); + LOGGER.errorCr(reconciliation, "Creating/Altering quotas for user {} failed", username, e); throw e; } } - protected KafkaUserQuotas describeUserQuotas(String username) throws Exception { + protected KafkaUserQuotas describeUserQuotas(Reconciliation reconciliation, String username) throws Exception { ClientQuotaFilterComponent c = ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, username); ClientQuotaFilter f = ClientQuotaFilter.contains(Collections.singleton(c)); KafkaUserQuotas current = null; @@ -138,7 +141,7 @@ protected KafkaUserQuotas describeUserQuotas(String username) throws Exception { current = fromClientQuota(map.get(cqe)); } } catch (Exception e) { - log.error("Getting quotas for user {} failed", username, e); + LOGGER.errorCr(reconciliation, "Getting quotas for user {} failed", username, e); throw e; } return current; diff --git a/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentials.java b/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentials.java index b019f6bf17..228bc7d2a9 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentials.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentials.java @@ -4,6 +4,8 @@ */ package io.strimzi.operator.user.operator; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.vertx.core.json.JsonObject; import org.I0Itec.zkclient.ZkClient; import org.I0Itec.zkclient.serialize.BytesPushThroughSerializer; @@ -11,8 +13,6 @@ import org.apache.kafka.common.security.scram.internals.ScramCredentialUtils; import org.apache.kafka.common.security.scram.internals.ScramFormatter; import org.apache.kafka.common.security.scram.internals.ScramMechanism; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.nio.charset.Charset; import java.security.NoSuchAlgorithmException; @@ -23,7 +23,7 @@ * Utility class for managing Scram credentials */ public class ScramShaCredentials { - private static final Logger log = LogManager.getLogger(ScramShaCredentials.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ScramShaCredentials.class.getName()); private final static int ITERATIONS = 4096; private final static int CONNECTION_TIMEOUT = 30_000; @@ -38,22 +38,23 @@ public ScramShaCredentials(String zookeeperUrl, int zookeeperSessionTimeout) { /** * Create or update the SCRAM-SHA credentials for the given user. * + * @param reconciliation The reconciliation * @param username The name of the user which should be created or updated * @param password The desired user password */ - public void createOrUpdate(String username, String password) { + public void createOrUpdate(Reconciliation reconciliation, String username, String password) { byte[] data = zkClient.readData("/config/users/" + username, true); if (data != null) { - log.debug("Updating {} credentials for user {}", mechanism.mechanismName(), username); + LOGGER.debugCr(reconciliation, "Updating {} credentials for user {}", mechanism.mechanismName(), username); zkClient.writeData("/config/users/" + username, updateUserJson(data, password)); } else { - log.debug("Creating {} credentials for user {}", mechanism.mechanismName(), username); + LOGGER.debugCr(reconciliation, "Creating {} credentials for user {}", mechanism.mechanismName(), username); ensurePath("/config/users"); zkClient.createPersistent("/config/users/" + username, createUserJson(password)); } - notifyChanges(username); + notifyChanges(reconciliation, username); } private boolean configJsonIsEmpty(JsonObject json) { @@ -66,22 +67,23 @@ private boolean configJsonIsEmpty(JsonObject json) { * Delete the SCRAM-SHA credentials for the given user. * It is not an error if the user doesn't exist, or doesn't currently have any SCRAM-SHA credentials. * + * @param reconciliation The reconciliation * @param username Name of the user */ - public void delete(String username) { + public void delete(Reconciliation reconciliation, String username) { byte[] data = zkClient.readData("/config/users/" + username, true); if (data != null) { - log.debug("Deleting {} credentials for user {}", mechanism.mechanismName(), username); + LOGGER.debugCr(reconciliation, "Deleting {} credentials for user {}", mechanism.mechanismName(), username); JsonObject deletedJson = removeScramCredentialsFromUserJson(data); if (configJsonIsEmpty(deletedJson)) { zkClient.deleteRecursive("/config/users/" + username); } else { zkClient.writeData("/config/users/" + username, deletedJson.toBuffer().getBytes()); } - notifyChanges(username); + notifyChanges(reconciliation, username); } else { - log.warn("Credentials for user {} already don't exist", username); + LOGGER.warnCr(reconciliation, "Credentials for user {} already don't exist", username); } } @@ -109,7 +111,7 @@ public boolean exists(String username) { ScramCredentialUtils.credentialFromString(scramCredentials); return true; } catch (IllegalArgumentException e) { - log.warn("Invalid {} credentials for user {}", mechanism.mechanismName(), username); + LOGGER.warnOp("Invalid {} credentials for user {}", mechanism.mechanismName(), username); } } } @@ -142,10 +144,11 @@ public List list() { /** * This notifies Kafka about the changes we have made * + * @param reconciliation The reconciliation * @param username Name of the user whose configuration changed */ - private void notifyChanges(String username) { - log.debug("Notifying changes for user {}", username); + private void notifyChanges(Reconciliation reconciliation, String username) { + LOGGER.debugCr(reconciliation, "Notifying changes for user {}", username); ensurePath("/config/changes"); diff --git a/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentialsOperator.java b/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentialsOperator.java index 4f0ca256a3..81355c71dc 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentialsOperator.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/operator/ScramShaCredentialsOperator.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.user.operator; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Future; import io.vertx.core.Promise; import io.vertx.core.Vertx; @@ -21,17 +22,17 @@ public ScramShaCredentialsOperator(Vertx vertx, ScramShaCredentials credsManager this.vertx = vertx; } - Future reconcile(String username, String password) { + Future reconcile(Reconciliation reconciliation, String username, String password) { Promise promise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { boolean exists = credsManager.exists(username); if (password != null) { - credsManager.createOrUpdate(username, password); + credsManager.createOrUpdate(reconciliation, username, password); future.complete(null); } else { if (exists) { - credsManager.delete(username); + credsManager.delete(reconciliation, username); future.complete(null); } else { future.complete(null); diff --git a/user-operator/src/main/java/io/strimzi/operator/user/operator/SimpleAclOperator.java b/user-operator/src/main/java/io/strimzi/operator/user/operator/SimpleAclOperator.java index c69e63dc01..4ae071613a 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/operator/SimpleAclOperator.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/operator/SimpleAclOperator.java @@ -5,6 +5,8 @@ package io.strimzi.operator.user.operator; import io.strimzi.operator.cluster.model.InvalidResourceException; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.ReconciliationLogger; import io.strimzi.operator.common.operator.resource.ReconcileResult; import io.strimzi.operator.user.model.KafkaUserModel; import io.strimzi.operator.user.model.acl.SimpleAclRule; @@ -23,8 +25,6 @@ import org.apache.kafka.common.resource.ResourcePatternFilter; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.utils.SecurityUtils; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import java.util.ArrayList; import java.util.Arrays; @@ -38,7 +38,7 @@ * SimlpeAclOperator is responsible for managing the authorization rules in Apache Kafka / Apache Zookeeper. */ public class SimpleAclOperator { - private static final Logger log = LogManager.getLogger(SimpleAclOperator.class.getName()); + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(SimpleAclOperator.class.getName()); private static final List IGNORED_USERS = Arrays.asList("*", "ANONYMOUS"); @@ -59,18 +59,19 @@ public SimpleAclOperator(Vertx vertx, Admin adminClient) { /** * Reconciles Acl rules for given user * + * @param reconciliation The reconciliation * @param username User name of the reconciled user. When using TLS client auth, the username should be already in the Kafka format, e.g. CN=my-user * @param desired The list of desired Acl rules * @return the Future with reconcile result */ - public Future>> reconcile(String username, Set desired) { + public Future>> reconcile(Reconciliation reconciliation, String username, Set desired) { Promise>> promise = Promise.promise(); vertx.createSharedWorkerExecutor("kubernetes-ops-pool").executeBlocking( future -> { Set current; try { - current = getAcls(username); + current = getAcls(reconciliation, username); } catch (Exception e) { // if authorization is not enabled in the Kafka resource, but the KafkaUser resource doesn't // have ACLs, the UO can just ignore the corresponding exception @@ -78,7 +79,7 @@ public Future>> reconcile(String username, Se future.complete(); return; } else { - log.error("Reconciliation failed for user {}", username, e); + LOGGER.errorCr(reconciliation, "Reconciliation failed for user {}", username, e); future.fail(e); return; } @@ -86,19 +87,19 @@ public Future>> reconcile(String username, Se if (desired == null || desired.isEmpty()) { if (current.size() == 0) { - log.debug("User {}: No expected Acl rules and no existing Acl rules -> NoOp", username); + LOGGER.debugCr(reconciliation, "User {}: No expected Acl rules and no existing Acl rules -> NoOp", username); future.complete(ReconcileResult.noop(desired)); } else { - log.debug("User {}: No expected Acl rules, but {} existing Acl rules -> Deleting rules", username, current.size()); - internalDelete(username, current).onComplete(future); + LOGGER.debugCr(reconciliation, "User {}: No expected Acl rules, but {} existing Acl rules -> Deleting rules", username, current.size()); + internalDelete(reconciliation, username, current).onComplete(future); } } else { if (current.isEmpty()) { - log.debug("User {}: {} expected Acl rules, but no existing Acl rules -> Adding rules", username, desired.size()); - internalCreate(username, desired).onComplete(future); + LOGGER.debugCr(reconciliation, "User {}: {} expected Acl rules, but no existing Acl rules -> Adding rules", username, desired.size()); + internalCreate(reconciliation, username, desired).onComplete(future); } else { - log.debug("User {}: {} expected Acl rules and {} existing Acl rules -> Reconciling rules", username, desired.size(), current.size()); - internalUpdate(username, desired, current).onComplete(future); + LOGGER.debugCr(reconciliation, "User {}: {} expected Acl rules and {} existing Acl rules -> Reconciling rules", username, desired.size(), current.size()); + internalUpdate(reconciliation, username, desired, current).onComplete(future); } } }, @@ -111,12 +112,12 @@ public Future>> reconcile(String username, Se /** * Create all ACLs for given user */ - protected Future>> internalCreate(String username, Set desired) { + protected Future>> internalCreate(Reconciliation reconciliation, String username, Set desired) { try { Collection aclBindings = getAclBindings(username, desired); adminClient.createAcls(aclBindings).all().get(); } catch (Exception e) { - log.error("Adding Acl rules for user {} failed", username, e); + LOGGER.errorCr(reconciliation, "Adding Acl rules for user {} failed", username, e); return Future.failedFuture(e); } @@ -126,9 +127,9 @@ protected Future>> internalCreate(String user /** * Update all ACLs for given user. * This method is using Sets to decide which rules need to be added and which need to be deleted. - * It delagates to {@link #internalCreate internalCreate} and {@link #internalDelete internalDelete} methods for the actual addition or deletion. + * It delegates to {@link #internalCreate internalCreate} and {@link #internalDelete internalDelete} methods for the actual addition or deletion. */ - protected Future>> internalUpdate(String username, Set desired, Set current) { + protected Future>> internalUpdate(Reconciliation reconciliation, String username, Set desired, Set current) { Set toBeDeleted = new HashSet<>(current); toBeDeleted.removeAll(desired); @@ -136,8 +137,8 @@ protected Future>> internalUpdate(String user toBeAdded.removeAll(current); List updates = new ArrayList<>(2); - updates.add(internalDelete(username, toBeDeleted)); - updates.add(internalCreate(username, toBeAdded)); + updates.add(internalDelete(reconciliation, username, toBeDeleted)); + updates.add(internalCreate(reconciliation, username, toBeAdded)); Promise>> promise = Promise.promise(); @@ -145,7 +146,7 @@ protected Future>> internalUpdate(String user if (res.succeeded()) { promise.complete(ReconcileResult.patched(desired)); } else { - log.error("Updating Acl rules for user {} failed", username, res.cause()); + LOGGER.errorCr(reconciliation, "Updating Acl rules for user {} failed", username, res.cause()); promise.fail(res.cause()); } }); @@ -174,13 +175,13 @@ private Collection getAclBindings(String username, Set>> internalDelete(String username, Set current) { + protected Future>> internalDelete(Reconciliation reconciliation, String username, Set current) { try { Collection aclBindingFilters = getAclBindingFilters(username, current); adminClient.deleteAcls(aclBindingFilters).all().get(); } catch (Exception e) { - log.error("Deleting Acl rules for user {} failed", username, e); + LOGGER.errorCr(reconciliation, "Deleting Acl rules for user {} failed", username, e); return Future.failedFuture(e); } return Future.succeededFuture(ReconcileResult.deleted()); @@ -189,11 +190,12 @@ protected Future>> internalDelete(String user /** * Returns Set of ACLs applying to single user. * + * @param reconciliation The reconciliation * @param username Name of the user. * @return The Set of ACLs applying to single user. */ - public Set getAcls(String username) { - log.debug("Searching for ACL rules of user {}", username); + public Set getAcls(Reconciliation reconciliation, String username) { + LOGGER.debugCr(reconciliation, "Searching for ACL rules of user {}", username); Set result = new HashSet<>(); KafkaPrincipal principal = new KafkaPrincipal("User", username); @@ -213,9 +215,9 @@ public Set getAcls(String username) { } if (aclBindings != null) { - log.debug("ACL rules for user {}", username); + LOGGER.debugOp("ACL rules for user {}", username); for (AclBinding aclBinding : aclBindings) { - log.debug("{}", aclBinding); + LOGGER.debugOp("{}", aclBinding); result.add(SimpleAclRule.fromAclBinding(aclBinding)); } } @@ -232,7 +234,7 @@ public Set getUsersWithAcls() { Set result = new HashSet<>(); Set ignored = new HashSet<>(IGNORED_USERS.size()); - log.debug("Searching for Users with any ACL rules"); + LOGGER.debugOp("Searching for Users with any ACL rules"); Collection aclBindings; try { @@ -250,13 +252,13 @@ public Set getUsersWithAcls() { if (IGNORED_USERS.contains(username)) { if (!ignored.contains(username)) { - // This info message is loged only once per reocnciliation even if there are multiple rules - log.info("Existing ACLs for user '{}' will be ignored.", username); + // This info message is loged only once per reconciliation even if there are multiple rules + LOGGER.infoOp("Existing ACLs for user '{}' will be ignored.", username); ignored.add(username); } } else { - if (log.isTraceEnabled()) { - log.trace("Adding user {} to Set of users with ACLs", username); + if (LOGGER.isTraceEnabled()) { + LOGGER.traceOp("Adding user {} to Set of users with ACLs", username); } result.add(username); diff --git a/user-operator/src/test/java/io/strimzi/operator/user/model/KafkaUserModelTest.java b/user-operator/src/test/java/io/strimzi/operator/user/model/KafkaUserModelTest.java index a737ac52eb..82c0a1b78c 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/model/KafkaUserModelTest.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/model/KafkaUserModelTest.java @@ -15,6 +15,7 @@ import io.strimzi.certs.CertManager; import io.strimzi.operator.cluster.model.InvalidResourceException; import io.strimzi.operator.common.PasswordGenerator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.MockCertManager; import io.strimzi.operator.user.ResourceUtils; @@ -52,7 +53,7 @@ public void checkOwnerReference(OwnerReference ownerRef, HasMetadata resource) @Test public void testFromCrdTlsUser() { - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); assertThat(model.namespace, is(ResourceUtils.NAMESPACE)); assertThat(model.name, is(ResourceUtils.NAME)); @@ -70,7 +71,7 @@ public void testFromCrdTlsUser() { @Test public void testFromCrdQuotaUser() { - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, quotasUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, quotasUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); assertThat(model.namespace, is(ResourceUtils.NAMESPACE)); assertThat(model.name, is(ResourceUtils.NAME)); @@ -89,7 +90,7 @@ public void testFromCrdQuotaUser() { @Test public void testFromCrdQuotaUserWithNullValues() { KafkaUser quotasUserWithNulls = ResourceUtils.createKafkaUserQuotas(null, 2000, null); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, quotasUserWithNulls, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, quotasUserWithNulls, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); assertThat(model.namespace, is(ResourceUtils.NAMESPACE)); assertThat(model.name, is(ResourceUtils.NAME)); @@ -106,7 +107,7 @@ public void testFromCrdQuotaUserWithNullValues() { @Test public void testGenerateSecret() { - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generatedSecret = model.generateSecret(); assertThat(generatedSecret.getData().keySet(), is(set("ca.crt", "user.crt", "user.key", "user.p12", "user.password"))); @@ -129,7 +130,7 @@ public void testGenerateSecret() { @Test public void testGenerateSecretWithPrefix() { String secretPrefix = "strimzi-"; - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, secretPrefix); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, secretPrefix); Secret generatedSecret = model.generateSecret(); assertThat(generatedSecret.getData().keySet(), is(set("ca.crt", "user.crt", "user.key", "user.p12", "user.password"))); @@ -165,7 +166,7 @@ public void testGenerateSecretWithMetadataOverrides() { .endSpec() .build(); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, userWithTemplate, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, userWithTemplate, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generatedSecret = model.generateSecret(); assertThat(generatedSecret.getData().keySet(), is(set("ca.crt", "user.crt", "user.key", "user.p12", "user.password"))); @@ -188,7 +189,7 @@ public void testGenerateSecretWithMetadataOverrides() { @Test public void testGenerateSecretGeneratesCertificateWhenNoSecretExistsProvidedByUser() { - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generated = model.generateSecret(); assertThat(new String(model.decodeFromSecret(generated, "ca.crt")), is("clients-ca-crt")); @@ -210,7 +211,7 @@ public void testGenerateSecretGeneratesCertificateAtCaChange() { Secret clientsCaKeySecret = ResourceUtils.createClientsCaKeySecret(); clientsCaKeySecret.getData().put("ca.key", Base64.getEncoder().encodeToString("different-clients-ca-key".getBytes())); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCertSecret, clientsCaKeySecret, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCertSecret, clientsCaKeySecret, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generatedSecret = model.generateSecret(); assertThat(new String(model.decodeFromSecret(generatedSecret, "ca.crt")), is("different-clients-ca-crt")); @@ -227,7 +228,7 @@ public void testGenerateSecretGeneratesCertificateAtCaChange() { public void testGenerateSecretGeneratedCertificateDoesNotChangeFromUserProvided() { Secret userCert = ResourceUtils.createUserSecretTls(); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generatedSecret = model.generateSecret(); // These values match those in ResourceUtils.createUserSecretTls() @@ -244,7 +245,7 @@ public void testGenerateSecretGeneratedCertificateDoesNotChangeFromUserProvided( @Test public void testGenerateSecretGeneratesCertificateWithExistingScramSha() { Secret userCert = ResourceUtils.createUserSecretScramSha(); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generated = model.generateSecret(); assertThat(new String(model.decodeFromSecret(generated, "ca.crt")), is("clients-ca-crt")); @@ -259,14 +260,14 @@ public void testGenerateSecretGeneratesCertificateWithExistingScramSha() { @Test public void testGenerateSecretGeneratesKeyStoreWhenOldVersionSecretExists() { - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret oldSecret = model.generateSecret(); // remove keystore and password to simulate a Secret from a previous version oldSecret.getData().remove("user.p12"); oldSecret.getData().remove("user.password"); - model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, oldSecret, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tlsUser, clientsCaCert, clientsCaKey, oldSecret, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generatedSecret = model.generateSecret(); assertThat(generatedSecret.getData().keySet(), is(set("ca.crt", "user.crt", "user.key", "user.p12", "user.password"))); @@ -284,7 +285,7 @@ public void testGenerateSecretGeneratesKeyStoreWhenOldVersionSecretExists() { @Test public void testGenerateSecretGeneratesPasswordWhenNoUserSecretExists() { - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generatedSecret = model.generateSecret(); assertThat(generatedSecret.getMetadata().getName(), is(ResourceUtils.NAME)); @@ -312,7 +313,7 @@ public void testGenerateSecretGeneratesPasswordWhenNoUserSecretExists() { @Test public void testGenerateSecretGeneratesPasswordKeepingExistingScramShaPassword() { Secret scramShaSecret = ResourceUtils.createUserSecretScramSha(); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, scramShaSecret, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, scramShaSecret, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generated = model.generateSecret(); assertThat(generated.getMetadata().getName(), is(ResourceUtils.NAME)); @@ -336,7 +337,7 @@ public void testGenerateSecretGeneratesPasswordKeepingExistingScramShaPassword() @Test public void testGenerateSecretGeneratesPasswordFromExistingTlsSecret() { Secret userCert = ResourceUtils.createUserSecretTls(); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret generated = model.generateSecret(); assertThat(generated.getMetadata().getName(), is(ResourceUtils.NAME)); @@ -368,7 +369,7 @@ public void testGenerateSecretWithNoTlsAuthenticationKafkaUserReturnsNull() { KafkaUser user = ResourceUtils.createKafkaUserTls(); user.setSpec(new KafkaUserSpec()); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, user, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, user, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); assertThat(model.generateSecret(), is(nullValue())); } @@ -379,7 +380,7 @@ public void testGetSimpleAclRulesWithNoSimpleAuthorizationReturnsNull() { KafkaUser user = ResourceUtils.createKafkaUserTls(); user.setSpec(new KafkaUserSpec()); - KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, user, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel model = KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, user, clientsCaCert, clientsCaKey, userCert, UserOperatorConfig.DEFAULT_SECRET_PREFIX); assertThat(model.getSimpleAclRules(), is(nullValue())); } @@ -407,7 +408,7 @@ public void testFromCrdTlsUserWith65CharTlsUsernameThrows() { assertThrows(InvalidResourceException.class, () -> { // 65 characters => Should throw exception with TLS - KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tooLong, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tooLong, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); }); } @@ -420,7 +421,7 @@ public void testFromCrdTlsUserWith64CharTlsUsernameValid() { .endMetadata() .build(); - KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, notTooLong, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, notTooLong, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); } @Test @@ -432,6 +433,6 @@ public void testFromCrdScramShaUserWith65CharSaslUsernameValid() { .endMetadata() .build(); - KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tooLong, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); + KafkaUserModel.fromCrd(Reconciliation.DUMMY_RECONCILIATION, mockCertManager, passwordGenerator, tooLong, clientsCaCert, clientsCaKey, null, UserOperatorConfig.DEFAULT_SECRET_PREFIX); } } diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorTest.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorTest.java index 37418e1abc..f02df7ccbc 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorTest.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorTest.java @@ -85,13 +85,13 @@ public void testCreateTlsUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE, UserOperatorConfig.DEFAULT_SECRET_PREFIX); KafkaUser user = ResourceUtils.createKafkaUserTls(); @@ -101,7 +101,7 @@ public void testCreateTlsUser(VertxTestContext context) { when(mockSecretOps.get(anyString(), eq("user-key"))).thenReturn(clientsCaKey); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.createOrUpdate(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME), user) @@ -161,18 +161,18 @@ public void testUpdateUserNoChange(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaUser user = ResourceUtils.createKafkaUserTls(); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE, UserOperatorConfig.DEFAULT_SECRET_PREFIX); Secret clientsCa = ResourceUtils.createClientsCaCertSecret(); @@ -182,7 +182,7 @@ public void testUpdateUserNoChange(VertxTestContext context) { when(mockSecretOps.get(anyString(), eq("user-key"))).thenReturn(clientsCaKey); when(mockSecretOps.get(anyString(), eq(KafkaUserModel.getSecretName(UserOperatorConfig.DEFAULT_SECRET_PREFIX, user.getMetadata().getName())))).thenReturn(userCert); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.createOrUpdate(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME), user) @@ -240,24 +240,24 @@ public void testUpdateUserNoAuthenticationAndNoAuthorization(VertxTestContext co ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); KafkaUser user = ResourceUtils.createKafkaUserTls(); user.getSpec().setAuthorization(null); user.getSpec().setAuthentication(null); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE, UserOperatorConfig.DEFAULT_SECRET_PREFIX); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.createOrUpdate(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME), user) @@ -303,14 +303,14 @@ public void testUpdateUserNewCert(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE, UserOperatorConfig.DEFAULT_SECRET_PREFIX); @@ -327,7 +327,7 @@ public void testUpdateUserNewCert(VertxTestContext context) { when(mockSecretOps.get(anyString(), eq(KafkaUserModel.getSecretName(UserOperatorConfig.DEFAULT_SECRET_PREFIX, user.getMetadata().getName())))).thenReturn(userCert); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.createOrUpdate(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME), user) @@ -367,14 +367,14 @@ public void testDeleteTlsUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); - when(aclOps.reconcile(aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(anyString(), eq(null))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), anyString(), eq(null))).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE, UserOperatorConfig.DEFAULT_SECRET_PREFIX); @@ -416,13 +416,13 @@ public void testReconcileNewTlsUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(ResourceUtils.CA_CERT_NAME))).thenReturn(clientsCa); when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(ResourceUtils.CA_KEY_NAME))).thenReturn(clientsCaKey); @@ -430,8 +430,8 @@ public void testReconcileNewTlsUser(VertxTestContext context) { when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(user); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME)) @@ -500,13 +500,13 @@ public void testReconcileExistingTlsUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(clientsCa.getMetadata().getName()))).thenReturn(clientsCa); when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(clientsCaKey.getMetadata().getName()))).thenReturn(clientsCaKey); @@ -514,8 +514,8 @@ public void testReconcileExistingTlsUser(VertxTestContext context) { when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(user); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME)) @@ -582,19 +582,19 @@ public void testReconcileDeleteTlsUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); - when(aclOps.reconcile(aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(clientsCa.getMetadata().getName()))).thenReturn(clientsCa); when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(userCert); when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(null); - when(quotasOps.reconcile(anyString(), eq(null))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), anyString(), eq(null))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME)) @@ -706,22 +706,22 @@ public void testReconcileNewScramShaUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor scramUserCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor scramPasswordCaptor = ArgumentCaptor.forClass(String.class); - when(scramOps.reconcile(scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(null); when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(user); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME)) @@ -790,22 +790,22 @@ public void testReconcileExistingScramShaUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretCaptor = ArgumentCaptor.forClass(Secret.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor scramUserCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor scramPasswordCaptor = ArgumentCaptor.forClass(String.class); - when(scramOps.reconcile(scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor> aclRulesCaptor = ArgumentCaptor.forClass(Set.class); - when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(userCert); when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(user); when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); - when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), any(KafkaUser.class))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME)) @@ -872,20 +872,20 @@ public void testReconcileDeleteScramShaUser(VertxTestContext context) { ArgumentCaptor secretNamespaceCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor secretNameCaptor = ArgumentCaptor.forClass(String.class); - when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); ArgumentCaptor scramUserCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor scramPasswordCaptor = ArgumentCaptor.forClass(String.class); - when(scramOps.reconcile(scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor aclNameCaptor = ArgumentCaptor.forClass(String.class); - when(aclOps.reconcile(aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture()); when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(userCert); when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(null); - when(quotasOps.reconcile(anyString(), eq(null))).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), anyString(), eq(null))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME)) @@ -928,12 +928,12 @@ public void testUserStatusNotReadyIfSecretFailedReconcile(VertxTestContext conte when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); when(mockCrdOps.get(anyString(), anyString())).thenReturn(user); - when(mockSecretOps.reconcile(anyString(), anyString(), any(Secret.class))).thenReturn(Future.failedFuture(failureMsg)); - when(aclOps.reconcile(anyString(), any())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any(Secret.class))).thenReturn(Future.failedFuture(failureMsg)); + when(aclOps.reconcile(any(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor userCaptor = ArgumentCaptor.forClass(KafkaUser.class); - when(mockCrdOps.updateStatusAsync(userCaptor.capture())).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), userCaptor.capture())).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.fromMap(ResourceUtils.LABELS), @@ -967,12 +967,12 @@ public void testUserStatusReady(VertxTestContext context) { when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user)); when(mockCrdOps.get(anyString(), anyString())).thenReturn(user); - when(mockSecretOps.reconcile(anyString(), anyString(), any(Secret.class))).thenReturn(Future.succeededFuture()); - when(aclOps.reconcile(anyString(), any())).thenReturn(Future.succeededFuture()); - when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockSecretOps.reconcile(any(), anyString(), anyString(), any(Secret.class))).thenReturn(Future.succeededFuture()); + when(aclOps.reconcile(any(), anyString(), any())).thenReturn(Future.succeededFuture()); + when(scramOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor userCaptor = ArgumentCaptor.forClass(KafkaUser.class); - when(mockCrdOps.updateStatusAsync(userCaptor.capture())).thenReturn(Future.succeededFuture()); - when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture()); + when(mockCrdOps.updateStatusAsync(any(), userCaptor.capture())).thenReturn(Future.succeededFuture()); + when(quotasOps.reconcile(any(), any(), any())).thenReturn(Future.succeededFuture()); KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.fromMap(ResourceUtils.LABELS), diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserQuotasIT.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserQuotasIT.java index cf07c93e44..214b23a9f0 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserQuotasIT.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserQuotasIT.java @@ -6,6 +6,7 @@ import io.strimzi.api.kafka.model.KafkaUserQuotas; import io.strimzi.operator.common.DefaultAdminClientProvider; +import io.strimzi.operator.common.Reconciliation; import io.vertx.core.Vertx; import io.vertx.junit5.Checkpoint; import io.vertx.junit5.VertxExtension; @@ -88,9 +89,9 @@ public void testRegularUserExistsAfterCreate() throws Exception { } public void testUserExistsAfterCreate(String username) throws Exception { - assertThat(kuq.exists(username), is(false)); - kuq.createOrUpdate(username, defaultQuotas); - assertThat(kuq.exists(username), is(true)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(false)); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); } @Test @@ -104,7 +105,7 @@ public void testRegularUserDoesNotExistPriorToCreate() throws Exception { } public void testUserDoesNotExistPriorToCreate(String username) throws Exception { - assertThat(kuq.exists(username), is(false)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(false)); } @Test @@ -129,7 +130,7 @@ public void testCreateOrUpdate(String username) throws Exception { KafkaUserQuotas newQuotas = new KafkaUserQuotas(); newQuotas.setConsumerByteRate(1000); newQuotas.setProducerByteRate(2000); - kuq.createOrUpdate(username, newQuotas); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, newQuotas); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); testDescribeUserQuotas(username, newQuotas); } @@ -146,10 +147,10 @@ public void testCreateOrUpdateTwiceRegularUSer() throws Exception { public void testCreateOrUpdateTwice(String username) throws Exception { assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(false)); - assertThat(kuq.describeUserQuotas(username), is(nullValue())); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username), is(nullValue())); - kuq.createOrUpdate(username, defaultQuotas); - kuq.createOrUpdate(username, defaultQuotas); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); testDescribeUserQuotas(username, defaultQuotas); } @@ -165,12 +166,12 @@ public void testDeleteRegularUser() throws Exception { } public void testDelete(String username) throws Exception { - kuq.createOrUpdate(username, defaultQuotas); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); - assertThat(kuq.exists(username), is(true)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); - kuq.delete(username); - assertThat(kuq.exists(username), is(false)); + kuq.delete(Reconciliation.DUMMY_RECONCILIATION, username); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(false)); } @Test @@ -184,29 +185,31 @@ public void testDeleteTwiceRegularUser() throws Exception { } public void testDeleteTwice(String username) throws Exception { - kuq.createOrUpdate(username, defaultQuotas); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); - assertThat(kuq.exists(username), is(true)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); - kuq.delete(username); - kuq.delete(username); - assertThat(kuq.exists(username), is(false)); + kuq.delete(Reconciliation.DUMMY_RECONCILIATION, username); + kuq.delete(Reconciliation.DUMMY_RECONCILIATION, username); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(false)); } @Test public void testUpdateConsumerByteRate() throws Exception { - kuq.createOrUpdate("changeProducerByteRate", defaultQuotas); + String username = "changeConsumerByteRate"; + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); defaultQuotas.setConsumerByteRate(4000); - kuq.createOrUpdate("changeProducerByteRate", defaultQuotas); - assertThat(kuq.describeUserQuotas("changeProducerByteRate").getConsumerByteRate(), is(4000)); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username).getConsumerByteRate(), is(4000)); } @Test public void testUpdateProducerByteRate() throws Exception { - kuq.createOrUpdate("changeProducerByteRate", defaultQuotas); + String username = "changeProducerByteRate"; + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); defaultQuotas.setProducerByteRate(8000); - kuq.createOrUpdate("changeProducerByteRate", defaultQuotas); - assertThat(kuq.describeUserQuotas("changeProducerByteRate").getProducerByteRate(), is(8000)); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, defaultQuotas); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username).getProducerByteRate(), is(8000)); } @Test @@ -267,12 +270,12 @@ public void testReconcileCreatesUserWithQuotas(String username, VertxTestContext quotas.setProducerByteRate(1_000_000); quotas.setRequestPercentage(50); - assertThat(kuq.exists(username), is(false)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(false)); Checkpoint async = testContext.checkpoint(); - kuq.reconcile(username, quotas) + kuq.reconcile(Reconciliation.DUMMY_RECONCILIATION, username, quotas) .onComplete(testContext.succeeding(rr -> testContext.verify(() -> { - assertThat(kuq.exists(username), is(true)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); testDescribeUserQuotas(username, quotas); async.flag(); @@ -295,8 +298,8 @@ public void testReconcileUpdatesUserQuotaValues(String username, VertxTestContex initialQuotas.setProducerByteRate(1_000_000); initialQuotas.setRequestPercentage(50); - kuq.createOrUpdate(username, initialQuotas); - assertThat(kuq.exists(username), is(true)); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, initialQuotas); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); testDescribeUserQuotas(username, initialQuotas); KafkaUserQuotas updatedQuotas = new KafkaUserQuotas(); @@ -305,9 +308,9 @@ public void testReconcileUpdatesUserQuotaValues(String username, VertxTestContex updatedQuotas.setRequestPercentage(75); Checkpoint async = testContext.checkpoint(); - kuq.reconcile(username, updatedQuotas) + kuq.reconcile(Reconciliation.DUMMY_RECONCILIATION, username, updatedQuotas) .onComplete(testContext.succeeding(rr -> testContext.verify(() -> { - assertThat(kuq.exists(username), is(true)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); testDescribeUserQuotas(username, updatedQuotas); async.flag(); @@ -330,8 +333,8 @@ public void testReconcileUpdatesUserQuotasWithFieldRemovals(String username, Ver initialQuotas.setProducerByteRate(1_000_000); initialQuotas.setRequestPercentage(50); - kuq.createOrUpdate(username, initialQuotas); - assertThat(kuq.exists(username), is(true)); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, initialQuotas); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); testDescribeUserQuotas(username, initialQuotas); KafkaUserQuotas updatedQuotas = new KafkaUserQuotas(); @@ -339,9 +342,9 @@ public void testReconcileUpdatesUserQuotasWithFieldRemovals(String username, Ver updatedQuotas.setProducerByteRate(3_000_000); Checkpoint async = testContext.checkpoint(); - kuq.reconcile(username, updatedQuotas) + kuq.reconcile(Reconciliation.DUMMY_RECONCILIATION, username, updatedQuotas) .onComplete(testContext.succeeding(rr -> testContext.verify(() -> { - assertThat(kuq.exists(username), is(true)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(true)); testDescribeUserQuotas(username, updatedQuotas); async.flag(); @@ -365,14 +368,14 @@ public void testReconcileDeletesUserForNullQuota(String username, VertxTestConte initialQuotas.setProducerByteRate(1_000_000); initialQuotas.setRequestPercentage(50); - kuq.createOrUpdate(username, initialQuotas); - assertThat(kuq.exists(username), is(true)); + kuq.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, initialQuotas); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(true)); testDescribeUserQuotas(username, initialQuotas); Checkpoint async = testContext.checkpoint(); - kuq.reconcile(username, null) + kuq.reconcile(Reconciliation.DUMMY_RECONCILIATION, username, null) .onComplete(testContext.succeeding(rr -> testContext.verify(() -> { - assertThat(kuq.exists(username), is(false)); + assertThat(kuq.exists(Reconciliation.DUMMY_RECONCILIATION, username), is(false)); async.flag(); }))); } @@ -392,20 +395,20 @@ private String encodeUsername(String username) { private void createScramShaUser(String username, String password) { // creating SCRAM-SHA user upfront to check it works because it shares same path in ZK as quotas ScramShaCredentials scramShaCred = new ScramShaCredentials(kafkaCluster.zKConnectString(), 6_000); - scramShaCred.createOrUpdate(username, password); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, username, password); assertThat(scramShaCred.exists(username), is(true)); assertThat(scramShaCred.isPathExist("/config/users/" + username), is(true)); } private void testUserQuotasNotExist(String username) throws Exception { - assertThat(kuq.describeUserQuotas(username), is(nullValue())); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username), is(nullValue())); assertThat(isPathExist("/config/users/" + encodeUsername(username)), is(false)); } private void testDescribeUserQuotas(String username, KafkaUserQuotas quotas) throws Exception { - assertThat(kuq.describeUserQuotas(username), is(notNullValue())); - assertThat(kuq.describeUserQuotas(username).getConsumerByteRate(), is(quotas.getConsumerByteRate())); - assertThat(kuq.describeUserQuotas(username).getProducerByteRate(), is(quotas.getProducerByteRate())); - assertThat(kuq.describeUserQuotas(username).getRequestPercentage(), is(quotas.getRequestPercentage())); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username), is(notNullValue())); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username).getConsumerByteRate(), is(quotas.getConsumerByteRate())); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username).getProducerByteRate(), is(quotas.getProducerByteRate())); + assertThat(kuq.describeUserQuotas(Reconciliation.DUMMY_RECONCILIATION, username).getRequestPercentage(), is(quotas.getRequestPercentage())); } } diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/ScramShaCredentialsIT.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/ScramShaCredentialsIT.java index e21e0407d2..15613c95b9 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/operator/ScramShaCredentialsIT.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/ScramShaCredentialsIT.java @@ -4,6 +4,7 @@ */ package io.strimzi.operator.user.operator; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.test.EmbeddedZooKeeper; import io.vertx.core.json.JsonObject; import org.junit.jupiter.api.AfterAll; @@ -47,7 +48,7 @@ public void createSTS() { @Test public void testUserExistsAfterCreate() { assertThat(scramShaCred.exists("userExists"), is(false)); - scramShaCred.createOrUpdate("userExists", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "userExists", "foo-password"); assertThat(scramShaCred.exists("userExists"), is(true)); } @@ -58,52 +59,52 @@ public void testUserDoeNotExistPriorToCreate() { @Test public void testCreateOrUpdate() { - scramShaCred.createOrUpdate("normalCreate", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "normalCreate", "foo-password"); assertThat(scramShaCred.exists("normalCreate"), is(true)); assertThat(scramShaCred.isPathExist("/config/users/normalCreate"), is(true)); } @Test public void testCreateOrUpdateTwice() { - scramShaCred.createOrUpdate("doubleCreate", "foo-password"); - scramShaCred.createOrUpdate("doubleCreate", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "doubleCreate", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "doubleCreate", "foo-password"); assertThat(scramShaCred.exists("doubleCreate"), is(true)); assertThat(scramShaCred.isPathExist("/config/users/doubleCreate"), is(true)); } @Test public void testDelete() { - scramShaCred.createOrUpdate("normalDelete", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "normalDelete", "foo-password"); assertThat(scramShaCred.exists("normalDelete"), is(true)); assertThat(scramShaCred.isPathExist("/config/users/normalDelete"), is(true)); - scramShaCred.delete("normalDelete"); + scramShaCred.delete(Reconciliation.DUMMY_RECONCILIATION, "normalDelete"); assertThat(scramShaCred.exists("normalDelete"), is(false)); assertThat(scramShaCred.isPathExist("/config/users/normalDelete"), is(false)); } @Test public void testDeleteTwice() { - scramShaCred.createOrUpdate("doubleDelete", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "doubleDelete", "foo-password"); assertThat(scramShaCred.exists("doubleDelete"), is(true)); assertThat(scramShaCred.isPathExist("/config/users/doubleDelete"), is(true)); - scramShaCred.delete("doubleDelete"); - scramShaCred.delete("doubleDelete"); + scramShaCred.delete(Reconciliation.DUMMY_RECONCILIATION, "doubleDelete"); + scramShaCred.delete(Reconciliation.DUMMY_RECONCILIATION, "doubleDelete"); assertThat(scramShaCred.exists("doubleDelete"), is(false)); assertThat(scramShaCred.isPathExist("/config/users/doubleDelete"), is(false)); } @Test public void testCreateOrUpdatePasswordUpdate() { - scramShaCred.createOrUpdate("changePassword", "changePassword-password"); - scramShaCred.createOrUpdate("changePassword", "changePassword-password2"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "changePassword", "changePassword-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "changePassword", "changePassword-password2"); assertThat(scramShaCred.exists("changePassword"), is(true)); assertThat(scramShaCred.isPathExist("/config/users/changePassword"), is(true)); } @Test public void testListListsCreatedUsers() { - scramShaCred.createOrUpdate("listSome", "foo-password"); + scramShaCred.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, "listSome", "foo-password"); assertThat(scramShaCred.list(), hasItem("listSome")); } @@ -111,7 +112,7 @@ public void testListListsCreatedUsers() { public void testListWithNoUsersReturnsEmptyList() { // Ensure all users deleted from other tests for (String user : scramShaCred.list()) { - scramShaCred.delete(user); + scramShaCred.delete(Reconciliation.DUMMY_RECONCILIATION, user); } assertThat(scramShaCred.list(), is(empty())); } diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorIT.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorIT.java index 693f39a8fa..cc3ff0aa38 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorIT.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorIT.java @@ -8,6 +8,7 @@ import io.strimzi.api.kafka.model.AclResourcePatternType; import io.strimzi.api.kafka.model.AclRuleType; import io.strimzi.operator.common.DefaultAdminClientProvider; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.user.model.acl.SimpleAclRule; import io.strimzi.operator.user.model.acl.SimpleAclRuleResource; import io.strimzi.operator.user.model.acl.SimpleAclRuleResourceType; @@ -38,6 +39,7 @@ @ExtendWith(VertxExtension.class) public class SimpleAclOperatorIT { + private static final int TEST_TIMEOUT = 60; private static Vertx vertx; @@ -70,7 +72,7 @@ public static void beforeAll() { @Test public void testNoAclRules(VertxTestContext context) { - Set acls = simpleAclOperator.getAcls("no-acls-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "no-acls-user"); context.verify(() -> { assertThat(acls, IsEmptyCollection.empty()); }); @@ -86,9 +88,9 @@ public void testCreateAclRule(VertxTestContext context) throws InterruptedExcept AclOperation.READ); CountDownLatch async = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user", Collections.singleton(rule)) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user", Collections.singleton(rule)) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user"); assertThat(acls, hasSize(1)); assertThat(acls, hasItem(rule)); async.countDown(); @@ -107,9 +109,9 @@ public void testCreateAndUpdateAclRule(VertxTestContext context) throws Interrup AclOperation.READ); CountDownLatch async1 = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user", Collections.singleton(rule1)) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user", Collections.singleton(rule1)) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user"); assertThat(acls, hasSize(1)); assertThat(acls, hasItem(rule1)); async1.countDown(); @@ -124,9 +126,9 @@ public void testCreateAndUpdateAclRule(VertxTestContext context) throws Interrup AclOperation.WRITE); CountDownLatch async2 = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user", new HashSet<>(asList(rule1, rule2))) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user", new HashSet<>(asList(rule1, rule2))) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user"); assertThat(acls, hasSize(2)); assertThat(acls, hasItems(rule1, rule2)); async2.countDown(); @@ -145,9 +147,9 @@ public void testCreateAndDeleteAclRule(VertxTestContext context) throws Interrup AclOperation.READ); CountDownLatch async1 = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user", Collections.singleton(rule1)) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user", Collections.singleton(rule1)) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user"); assertThat(acls, hasSize(1)); assertThat(acls, hasItem(rule1)); async1.countDown(); @@ -156,9 +158,9 @@ public void testCreateAndDeleteAclRule(VertxTestContext context) throws Interrup async1.await(TEST_TIMEOUT, TimeUnit.SECONDS); CountDownLatch async2 = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user", null) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user", null) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user"); assertThat(acls, IsEmptyCollection.empty()); async2.countDown(); })); @@ -176,9 +178,9 @@ public void testUsersWithAcls(VertxTestContext context) throws InterruptedExcept AclOperation.READ); CountDownLatch async1 = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user", Collections.singleton(rule1)) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user", Collections.singleton(rule1)) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user"); assertThat(acls, hasSize(1)); assertThat(acls, hasItem(rule1)); async1.countDown(); @@ -193,9 +195,9 @@ public void testUsersWithAcls(VertxTestContext context) throws InterruptedExcept AclOperation.WRITE); CountDownLatch async2 = new CountDownLatch(1); - simpleAclOperator.reconcile("my-user-2", Collections.singleton(rule2)) + simpleAclOperator.reconcile(Reconciliation.DUMMY_RECONCILIATION, "my-user-2", Collections.singleton(rule2)) .onComplete(ignore -> context.verify(() -> { - Set acls = simpleAclOperator.getAcls("my-user-2"); + Set acls = simpleAclOperator.getAcls(Reconciliation.DUMMY_RECONCILIATION, "my-user-2"); assertThat(acls, hasSize(1)); assertThat(acls, hasItem(rule2)); async2.countDown(); diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorTest.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorTest.java index df2c4f9be5..37685282c6 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorTest.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/SimpleAclOperatorTest.java @@ -7,6 +7,7 @@ import io.strimzi.api.kafka.model.AclOperation; import io.strimzi.api.kafka.model.AclResourcePatternType; import io.strimzi.api.kafka.model.AclRuleType; +import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.user.model.acl.SimpleAclRule; import io.strimzi.operator.user.model.acl.SimpleAclRuleResource; import io.strimzi.operator.user.model.acl.SimpleAclRuleResourceType; @@ -130,7 +131,7 @@ public void testReconcileInternalCreateAddsAclsToAuthorizer(VertxTestContext con }); Checkpoint async = context.checkpoint(); - aclOp.reconcile("CN=foo", new LinkedHashSet<>(asList(resource2ReadRule, resource2WriteRule, resource1DescribeRule))) + aclOp.reconcile(Reconciliation.DUMMY_RECONCILIATION, "CN=foo", new LinkedHashSet<>(asList(resource2ReadRule, resource2WriteRule, resource1DescribeRule))) .onComplete(context.succeeding(rr -> context.verify(() -> { Collection capturedAclBindings = aclBindingsCaptor.getValue(); assertThat(capturedAclBindings, hasSize(3)); @@ -169,7 +170,7 @@ public void testReconcileInternalUpdateCreatesNewAclsAndDeletesOldAcls(VertxTest }); Checkpoint async = context.checkpoint(); - aclOp.reconcile("CN=foo", new LinkedHashSet(asList(rule1))) + aclOp.reconcile(Reconciliation.DUMMY_RECONCILIATION, "CN=foo", new LinkedHashSet(asList(rule1))) .onComplete(context.succeeding(rr -> context.verify(() -> { // Create Write rule for resource 2 @@ -212,7 +213,7 @@ public void testReconcileInternalDelete(VertxTestContext context) { }); Checkpoint async = context.checkpoint(); - aclOp.reconcile("CN=foo", null) + aclOp.reconcile(Reconciliation.DUMMY_RECONCILIATION, "CN=foo", null) .onComplete(context.succeeding(rr -> context.verify(() -> { Collection capturedAclBindingFilters = aclBindingFiltersCaptor.getValue();