diff --git a/.checkstyle/checkstyle.xml b/.checkstyle/checkstyle.xml
index a3715ec24c..2e5fe3af90 100644
--- a/.checkstyle/checkstyle.xml
+++ b/.checkstyle/checkstyle.xml
@@ -44,7 +44,7 @@
-
+
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 03bdde7591..673ec5d692 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,6 +11,7 @@
* Support pausing reconciliation of KafkaTopic CR with annotation `strimzi.io/pause-reconciliation`
* Update cruise control to 2.5.55
* Support for broker load information added to the rebalance optimization proposal. Information on the load difference, before and after a rebalance is stored in a ConfigMap
+* Add support for selectively changing the verbosity of logging for individual CRs, using markers.
### Changes, deprecations and removals
diff --git a/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java b/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java
index 713c36d129..0c9c034d5b 100644
--- a/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java
+++ b/api-conversion/src/main/java/io/strimzi/kafka/api/conversion/converter/Conversion.java
@@ -32,7 +32,7 @@
* @param The converted type
*/
public interface Conversion {
- Logger log = LogManager.getLogger(Conversion.class);
+ Logger LOGGER = LogManager.getLogger(Conversion.class);
Conversion NOOP = new Conversion<>() {
@Override
diff --git a/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java b/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java
index b9044a1dbf..4a232a1b68 100644
--- a/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java
+++ b/certificate-manager/src/main/java/io/strimzi/certs/OpenSslCertManager.java
@@ -57,7 +57,7 @@ public class OpenSslCertManager implements CertManager {
.appendOffsetId().toFormatter();
public static final int MAXIMUM_CN_LENGTH = 64;
- private static final Logger log = LogManager.getLogger(OpenSslCertManager.class);
+ private static final Logger LOGGER = LogManager.getLogger(OpenSslCertManager.class);
public static final ZoneId UTC = ZoneId.of("UTC");
public OpenSslCertManager() {}
@@ -79,12 +79,12 @@ static void delete(Path fileOrDir) throws IOException {
try {
Files.delete(path);
} catch (IOException e) {
- log.debug("File could not be deleted: {}", fileOrDir);
+ LOGGER.debug("File could not be deleted: {}", fileOrDir);
}
});
} else {
if (!Files.deleteIfExists(fileOrDir)) {
- log.debug("File not deleted, because it did not exist: {}", fileOrDir);
+ LOGGER.debug("File not deleted, because it did not exist: {}", fileOrDir);
}
}
}
@@ -534,8 +534,8 @@ public OpensslArgs optArg(String opt, File file) throws IOException {
return optArg(opt, file, false);
}
public OpensslArgs optArg(String opt, File file, boolean mayLog) throws IOException {
- if (mayLog && log.isTraceEnabled()) {
- log.trace("Contents of {} for option {} is:\n{}", file, opt, Files.readString(file.toPath()));
+ if (mayLog && LOGGER.isTraceEnabled()) {
+ LOGGER.trace("Contents of {} for option {} is:\n{}", file, opt, Files.readString(file.toPath()));
}
opt(opt);
pb.command().add(file.getAbsolutePath());
@@ -614,7 +614,7 @@ public void exec(boolean failOnNonZero) throws IOException {
pb.redirectErrorStream(true)
.redirectOutput(out.toFile());
- log.debug("Running command {}", pb.command());
+ LOGGER.debug("Running command {}", pb.command());
Process proc = pb.start();
@@ -626,18 +626,18 @@ public void exec(boolean failOnNonZero) throws IOException {
if (failOnNonZero && result != 0) {
String output = Files.readString(out, Charset.defaultCharset());
- if (!log.isDebugEnabled()) {
+ if (!LOGGER.isDebugEnabled()) {
// Include the command if we've not logged it already
- log.error("Got result {} from command {} with output\n{}", result, pb.command(), output);
+ LOGGER.error("Got result {} from command {} with output\n{}", result, pb.command(), output);
} else {
- log.error("Got result {} with output\n{}", result, output);
+ LOGGER.error("Got result {} with output\n{}", result, output);
}
throw new RuntimeException("openssl status code " + result);
} else {
- if (log.isTraceEnabled()) {
- log.trace("Got output\n{}", Files.readString(out, Charset.defaultCharset()));
+ if (LOGGER.isTraceEnabled()) {
+ LOGGER.trace("Got output\n{}", Files.readString(out, Charset.defaultCharset()));
}
- log.debug("Got result {}", result);
+ LOGGER.debug("Got result {}", result);
}
} catch (InterruptedException ignored) {
diff --git a/cluster-operator/pom.xml b/cluster-operator/pom.xml
index 8c466f24b6..c89e57a542 100644
--- a/cluster-operator/pom.xml
+++ b/cluster-operator/pom.xml
@@ -122,11 +122,11 @@
org.apache.logging.log4j
- log4j-api
+ log4j-core
org.apache.logging.log4j
- log4j-core
+ log4j-api
org.apache.logging.log4j
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java
index a93debd710..89f04d878a 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperator.java
@@ -23,8 +23,6 @@
import io.vertx.core.Handler;
import io.vertx.core.Promise;
import io.vertx.core.http.HttpServer;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.util.ArrayList;
import java.util.List;
@@ -34,6 +32,8 @@
import static java.util.Arrays.asList;
import io.micrometer.prometheus.PrometheusMeterRegistry;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
/**
* An "operator" for managing assemblies of various types in a particular namespace .
@@ -42,7 +42,7 @@
*/
public class ClusterOperator extends AbstractVerticle {
- private static final Logger log = LogManager.getLogger(ClusterOperator.class.getName());
+ private static final Logger LOGGER = LogManager.getLogger(ClusterOperator.class.getName());
public static final String STRIMZI_CLUSTER_OPERATOR_DOMAIN = "cluster.operator.strimzi.io";
private static final String NAME_SUFFIX = "-cluster-operator";
@@ -78,7 +78,7 @@ public ClusterOperator(String namespace,
KafkaBridgeAssemblyOperator kafkaBridgeAssemblyOperator,
KafkaRebalanceAssemblyOperator kafkaRebalanceAssemblyOperator,
MetricsProvider metricsProvider) {
- log.info("Creating ClusterOperator for namespace {}", namespace);
+ LOGGER.info("Creating ClusterOperator for namespace {}", namespace);
this.namespace = namespace;
this.config = config;
this.client = client;
@@ -95,7 +95,7 @@ public ClusterOperator(String namespace,
@Override
public void start(Promise start) {
- log.info("Starting ClusterOperator for namespace {}", namespace);
+ LOGGER.info("Starting ClusterOperator for namespace {}", namespace);
// Configure the executor here, but it is used only in other places
getVertx().createSharedWorkerExecutor("kubernetes-ops-pool", config.getOperationsThreadPoolSize(), TimeUnit.SECONDS.toNanos(120));
@@ -109,7 +109,7 @@ public void start(Promise start) {
}
for (AbstractOperator, ?, ?, ?> operator : operators) {
watchFutures.add(operator.createWatch(namespace, operator.recreateWatch(namespace)).compose(w -> {
- log.info("Opened watch for {} operator", operator.kind());
+ LOGGER.info("Opened watch for {} operator", operator.kind());
watchByKind.put(operator.kind(), w);
return Future.succeededFuture();
}));
@@ -120,9 +120,9 @@ public void start(Promise start) {
CompositeFuture.join(watchFutures)
.compose(f -> {
- log.info("Setting up periodic reconciliation for namespace {}", namespace);
+ LOGGER.info("Setting up periodic reconciliation for namespace {}", namespace);
this.reconcileTimer = vertx.setPeriodic(this.config.getReconciliationIntervalMs(), res2 -> {
- log.info("Triggering periodic reconciliation for namespace {}...", namespace);
+ LOGGER.info("Triggering periodic reconciliation for namespace {}...", namespace);
reconcileAll("timer");
});
return startHealthServer().map((Void) null);
@@ -133,7 +133,7 @@ public void start(Promise start) {
@Override
public void stop(Promise stop) {
- log.info("Stopping ClusterOperator for namespace {}", namespace);
+ LOGGER.info("Stopping ClusterOperator for namespace {}", namespace);
vertx.cancelTimer(reconcileTimer);
for (Watch watch : watchByKind.values()) {
if (watch != null) {
@@ -182,9 +182,9 @@ private Future startHealthServer() {
})
.listen(HEALTH_SERVER_PORT, ar -> {
if (ar.succeeded()) {
- log.info("ClusterOperator is now ready (health server listening on {})", HEALTH_SERVER_PORT);
+ LOGGER.info("ClusterOperator is now ready (health server listening on {})", HEALTH_SERVER_PORT);
} else {
- log.error("Unable to bind health server on {}", HEALTH_SERVER_PORT, ar.cause());
+ LOGGER.error("Unable to bind health server on {}", HEALTH_SERVER_PORT, ar.cause());
}
result.handle(ar);
});
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java
index 3894130c6d..81c9986f8c 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/ClusterOperatorConfig.java
@@ -33,7 +33,8 @@
* Cluster Operator configuration
*/
public class ClusterOperatorConfig {
- private static final Logger log = LogManager.getLogger(ClusterOperatorConfig.class.getName());
+ private static final Logger LOGGER = LogManager.getLogger(ClusterOperatorConfig.class.getName());
+
public static final String STRIMZI_NAMESPACE = "STRIMZI_NAMESPACE";
public static final String STRIMZI_FULL_RECONCILIATION_INTERVAL_MS = "STRIMZI_FULL_RECONCILIATION_INTERVAL_MS";
@@ -159,7 +160,7 @@ public static ClusterOperatorConfig fromMap(Map map) {
*/
private static void warningsForRemovedEndVars(Map map) {
if (map.containsKey(STRIMZI_DEFAULT_TLS_SIDECAR_KAFKA_IMAGE)) {
- log.warn("Kafka TLS sidecar container has been removed and the environment variable {} is not used anymore. " +
+ LOGGER.warn("Kafka TLS sidecar container has been removed and the environment variable {} is not used anymore. " +
"You can remove it from the Strimzi Cluster Operator deployment.", STRIMZI_DEFAULT_TLS_SIDECAR_KAFKA_IMAGE);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java
index d1a6108597..d5b8ba829e 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java
@@ -20,14 +20,13 @@
import io.strimzi.operator.cluster.operator.assembly.KafkaRebalanceAssemblyOperator;
import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier;
import io.strimzi.operator.common.PasswordGenerator;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.operator.resource.ClusterRoleOperator;
import io.vertx.core.CompositeFuture;
import io.vertx.core.Future;
import io.vertx.core.Promise;
import io.vertx.core.Vertx;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.io.BufferedReader;
import java.io.IOException;
@@ -43,10 +42,12 @@
import io.vertx.core.VertxOptions;
import io.vertx.micrometer.MicrometerMetricsOptions;
import io.vertx.micrometer.VertxPrometheusOptions;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
@SuppressFBWarnings("DM_EXIT")
public class Main {
- private static final Logger log = LogManager.getLogger(Main.class.getName());
+ private static final Logger LOGGER = LogManager.getLogger(Main.class.getName());
static {
try {
@@ -57,9 +58,9 @@ public class Main {
}
public static void main(String[] args) {
- log.info("ClusterOperator {} is starting", Main.class.getPackage().getImplementationVersion());
+ LOGGER.info("ClusterOperator {} is starting", Main.class.getPackage().getImplementationVersion());
ClusterOperatorConfig config = ClusterOperatorConfig.fromMap(System.getenv());
- log.info("Cluster Operator configuration is {}", config);
+ LOGGER.info("Cluster Operator configuration is {}", config);
String dnsCacheTtl = System.getenv("STRIMZI_DNS_CACHE_TTL") == null ? "30" : System.getenv("STRIMZI_DNS_CACHE_TTL");
Security.setProperty("networkaddress.cache.ttl", dnsCacheTtl);
@@ -78,21 +79,21 @@ public static void main(String[] args) {
if (crs.succeeded()) {
PlatformFeaturesAvailability.create(vertx, client).onComplete(pfa -> {
if (pfa.succeeded()) {
- log.info("Environment facts gathered: {}", pfa.result());
+ LOGGER.info("Environment facts gathered: {}", pfa.result());
run(vertx, client, pfa.result(), config).onComplete(ar -> {
if (ar.failed()) {
- log.error("Unable to start operator for 1 or more namespace", ar.cause());
+ LOGGER.error("Unable to start operator for 1 or more namespace", ar.cause());
System.exit(1);
}
});
} else {
- log.error("Failed to gather environment facts", pfa.cause());
+ LOGGER.error("Failed to gather environment facts", pfa.cause());
System.exit(1);
}
});
} else {
- log.error("Failed to create Cluster Roles", crs.cause());
+ LOGGER.error("Failed to create Cluster Roles", crs.cause());
System.exit(1);
}
});
@@ -119,7 +120,7 @@ static CompositeFuture run(Vertx vertx, KubernetesClient client, PlatformFeature
if (pfa.supportsS2I()) {
kafkaConnectS2IClusterOperations = new KafkaConnectS2IAssemblyOperator(vertx, pfa, resourceOperatorSupplier, config);
} else {
- log.info("The KafkaConnectS2I custom resource definition can only be used in environment which supports OpenShift build, image and apps APIs. These APIs do not seem to be supported in this environment.");
+ LOGGER.info("The KafkaConnectS2I custom resource definition can only be used in environment which supports OpenShift build, image and apps APIs. These APIs do not seem to be supported in this environment.");
}
KafkaMirrorMaker2AssemblyOperator kafkaMirrorMaker2AssemblyOperator =
@@ -152,9 +153,9 @@ static CompositeFuture run(Vertx vertx, KubernetesClient client, PlatformFeature
vertx.deployVerticle(operator,
res -> {
if (res.succeeded()) {
- log.info("Cluster Operator verticle started in namespace {} with label selector {}", namespace, config.getCustomResourceSelector());
+ LOGGER.info("Cluster Operator verticle started in namespace {} with label selector {}", namespace, config.getCustomResourceSelector());
} else {
- log.error("Cluster Operator verticle in namespace {} failed to start", namespace, res.cause());
+ LOGGER.error("Cluster Operator verticle in namespace {} failed to start", namespace, res.cause());
System.exit(1);
}
prom.handle(res);
@@ -176,17 +177,17 @@ static CompositeFuture run(Vertx vertx, KubernetesClient client, PlatformFeature
clusterRoles.put("strimzi-kafka-client", "033-ClusterRole-strimzi-kafka-client.yaml");
for (Map.Entry clusterRole : clusterRoles.entrySet()) {
- log.info("Creating cluster role {}", clusterRole.getKey());
+ LOGGER.info("Creating cluster role {}", clusterRole.getKey());
try (BufferedReader br = new BufferedReader(
new InputStreamReader(Main.class.getResourceAsStream("/cluster-roles/" + clusterRole.getValue()),
StandardCharsets.UTF_8))) {
String yaml = br.lines().collect(Collectors.joining(System.lineSeparator()));
ClusterRole role = ClusterRoleOperator.convertYamlToClusterRole(yaml);
- Future fut = cro.reconcile(role.getMetadata().getName(), role);
+ Future fut = cro.reconcile(new Reconciliation("start-cluster-operator", "Deployment", config.getOperatorNamespace(), "cluster-operator"), role.getMetadata().getName(), role);
futures.add(fut);
} catch (IOException e) {
- log.error("Failed to create Cluster Roles.", e);
+ LOGGER.error("Failed to create Cluster Roles.", e);
throw new RuntimeException(e);
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java
index 91c250aaa5..6315dc0069 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractConfiguration.java
@@ -5,9 +5,9 @@
package io.strimzi.operator.cluster.model;
+import io.strimzi.operator.common.Reconciliation;
+import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.model.OrderedProperties;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.util.Collections;
import java.util.List;
@@ -20,7 +20,7 @@
* Abstract class for processing and generating configuration passed by the user.
*/
public abstract class AbstractConfiguration {
- private static final Logger log = LogManager.getLogger(AbstractConfiguration.class.getName());
+ private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractConfiguration.class.getName());
private final OrderedProperties options = new OrderedProperties();
@@ -28,98 +28,105 @@ public abstract class AbstractConfiguration {
* Constructor used to instantiate this class from String configuration. Should be used to create configuration
* from the Assembly.
*
+ * @param reconciliation The reconciliation
* @param configuration Configuration in String format. Should contain zero or more lines with with key=value
* pairs.
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
*/
- public AbstractConfiguration(String configuration, List forbiddenPrefixes) {
+ public AbstractConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes) {
options.addStringPairs(configuration);
- filterForbidden(forbiddenPrefixes);
+ filterForbidden(reconciliation, forbiddenPrefixes);
}
/**
* Constructor used to instantiate this class from String configuration. Should be used to create configuration
* from the Assembly.
*
+ * @param reconciliation The reconciliation
* @param configuration Configuration in String format. Should contain zero or more lines with with key=value
* pairs.
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
* @param defaults Properties object with default options
*/
- public AbstractConfiguration(String configuration, List forbiddenPrefixes, Map defaults) {
+ public AbstractConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes, Map defaults) {
options.addMapPairs(defaults);
options.addStringPairs(configuration);
- filterForbidden(forbiddenPrefixes);
+ filterForbidden(reconciliation, forbiddenPrefixes);
}
/**
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
*/
- public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes) {
+ public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes) {
options.addIterablePairs(jsonOptions);
- filterForbidden(forbiddenPrefixes);
+ filterForbidden(reconciliation, forbiddenPrefixes);
}
/**
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
* @param forbiddenPrefixExceptions Exceptions excluded from forbidden prefix options checking
*/
- public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions) {
+ public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions) {
options.addIterablePairs(jsonOptions);
- filterForbidden(forbiddenPrefixes, forbiddenPrefixExceptions);
+ filterForbidden(reconciliation, forbiddenPrefixes, forbiddenPrefixExceptions);
}
/**
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
* @param defaults Properties object with default options
*/
- public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes, Map defaults) {
+ public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes, Map defaults) {
options.addMapPairs(defaults);
options.addIterablePairs(jsonOptions);
- filterForbidden(forbiddenPrefixes);
+ filterForbidden(reconciliation, forbiddenPrefixes);
}
/**
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
* @param forbiddenPrefixExceptions Exceptions excluded from forbidden prefix options checking
* @param defaults Properties object with default options
*/
- public AbstractConfiguration(Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions, Map defaults) {
+ public AbstractConfiguration(Reconciliation reconciliation, Iterable> jsonOptions, List forbiddenPrefixes, List forbiddenPrefixExceptions, Map defaults) {
options.addMapPairs(defaults);
options.addIterablePairs(jsonOptions);
- filterForbidden(forbiddenPrefixes, forbiddenPrefixExceptions);
+ filterForbidden(reconciliation, forbiddenPrefixes, forbiddenPrefixExceptions);
}
/**
* Filters forbidden values from the configuration.
*
+ * @param reconciliation The reconciliation
* @param forbiddenPrefixes List with configuration key prefixes which are not allowed. All keys which start with one of
* these prefixes will be ignored.
* @param forbiddenPrefixExceptions Exceptions excluded from forbidden prefix options checking
*/
- private void filterForbidden(List forbiddenPrefixes, List forbiddenPrefixExceptions) {
+ private void filterForbidden(Reconciliation reconciliation, List forbiddenPrefixes, List forbiddenPrefixExceptions) {
options.filter(k -> forbiddenPrefixes.stream().anyMatch(s -> {
boolean forbidden = k.toLowerCase(Locale.ENGLISH).startsWith(s);
if (forbidden) {
@@ -127,16 +134,16 @@ private void filterForbidden(List forbiddenPrefixes, List forbid
forbidden = false;
}
if (forbidden) {
- log.warn("Configuration option \"{}\" is forbidden and will be ignored", k);
+ LOGGER.warnCr(reconciliation, "Configuration option \"{}\" is forbidden and will be ignored", k);
} else {
- log.trace("Configuration option \"{}\" is allowed and will be passed to the assembly", k);
+ LOGGER.traceCr(reconciliation, "Configuration option \"{}\" is allowed and will be passed to the assembly", k);
}
return forbidden;
}));
}
- private void filterForbidden(List forbiddenPrefixes) {
- this.filterForbidden(forbiddenPrefixes, Collections.emptyList());
+ private void filterForbidden(Reconciliation reconciliation, List forbiddenPrefixes) {
+ this.filterForbidden(reconciliation, forbiddenPrefixes, Collections.emptyList());
}
public String getConfigOption(String configOption) {
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java
index e4f86134e4..6cee20c84f 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/AbstractModel.java
@@ -83,11 +83,11 @@
import io.strimzi.api.kafka.model.template.PodManagementPolicy;
import io.strimzi.operator.common.MetricsAndLogging;
import io.strimzi.operator.common.Annotations;
+import io.strimzi.operator.common.Reconciliation;
+import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.operator.common.model.OrderedProperties;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.io.InputStream;
@@ -109,7 +109,7 @@ public abstract class AbstractModel {
public static final String STRIMZI_CLUSTER_OPERATOR_NAME = "strimzi-cluster-operator";
- protected static final Logger log = LogManager.getLogger(AbstractModel.class.getName());
+ protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractModel.class.getName());
protected static final String LOG4J2_MONITOR_INTERVAL = "30";
protected static final String DEFAULT_JVM_XMS = "128M";
@@ -183,6 +183,7 @@ public abstract class AbstractModel {
}
}
+ protected final Reconciliation reconciliation;
protected final String cluster;
protected final String namespace;
@@ -291,10 +292,12 @@ public abstract class AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
* @param applicationName Name of the application that the extending class is deploying
*/
- protected AbstractModel(HasMetadata resource, String applicationName) {
+ protected AbstractModel(Reconciliation reconciliation, HasMetadata resource, String applicationName) {
+ this.reconciliation = reconciliation;
this.cluster = resource.getMetadata().getName();
this.namespace = resource.getMetadata().getNamespace();
this.labels = Labels.generateDefaultLabels(resource, applicationName, STRIMZI_CLUSTER_OPERATOR_NAME);
@@ -402,33 +405,34 @@ public OrderedProperties getDefaultLogConfig() {
if (logConfigFileName == null || logConfigFileName.isEmpty()) {
return new OrderedProperties();
}
- return getOrderedProperties(getDefaultLogConfigFileName());
+ return getOrderedProperties(reconciliation, getDefaultLogConfigFileName());
}
/**
* Read a config file and returns the properties in a deterministic order.
*
+ * @param reconciliation The reconciliation
* @param configFileName The filename.
* @return The OrderedProperties of the inputted file.
*/
- public static OrderedProperties getOrderedProperties(String configFileName) {
+ public static OrderedProperties getOrderedProperties(Reconciliation reconciliation, String configFileName) {
if (configFileName == null || configFileName.isEmpty()) {
throw new IllegalArgumentException("configFileName must be non-empty string");
}
OrderedProperties properties = new OrderedProperties();
InputStream is = AbstractModel.class.getResourceAsStream("/" + configFileName);
if (is == null) {
- log.warn("Cannot find resource '{}'", configFileName);
+ LOGGER.warnCr(reconciliation, "Cannot find resource '{}'", configFileName);
} else {
try {
properties.addStringPairs(is);
} catch (IOException e) {
- log.warn("Unable to read default log config from '{}'", configFileName);
+ LOGGER.warnCr(reconciliation, "Unable to read default log config from '{}'", configFileName);
} finally {
try {
is.close();
} catch (IOException e) {
- log.error("Failed to close stream. Reason: " + e.getMessage());
+ LOGGER.errorCr(reconciliation, "Failed to close stream. Reason: " + e.getMessage());
}
}
}
@@ -437,6 +441,7 @@ public static OrderedProperties getOrderedProperties(String configFileName) {
/**
* Transforms map to log4j properties file format.
+ *
* @param properties map of log4j properties.
* @return log4j properties as a String.
*/
@@ -484,7 +489,7 @@ public String parseLogging(Logging logging, ConfigMap externalCm) {
if (newRootLogger != null && !rootAppenderName.isEmpty() && !newRootLogger.contains(",")) {
// this should never happen as appender name is added in default configuration
- log.debug("Newly set rootLogger does not contain appender. Setting appender to {}.", rootAppenderName);
+ LOGGER.debugCr(reconciliation, "Newly set rootLogger does not contain appender. Setting appender to {}.", rootAppenderName);
String level = newSettings.asMap().get("log4j.rootLogger");
newSettings.addPair("log4j.rootLogger", level + ", " + rootAppenderName);
}
@@ -510,7 +515,7 @@ public String parseLogging(Logging logging, ConfigMap externalCm) {
throw new InvalidResourceException("Property logging.valueFrom has to be specified when using external logging.");
}
} else {
- log.debug("logging is not set, using default loggers");
+ LOGGER.debugCr(reconciliation, "logging is not set, using default loggers");
return createLog4jProperties(getDefaultLogConfig());
}
}
@@ -523,10 +528,10 @@ private String getRootAppenderNamesFromDefaultLoggingConfig(OrderedProperties ne
if (tmp.length == 2) {
appenderName = tmp[1].trim();
} else {
- log.warn("Logging configuration for root logger does not contain appender.");
+ LOGGER.warnCr(reconciliation, "Logging configuration for root logger does not contain appender.");
}
} else {
- log.warn("Logger log4j.rootLogger not set.");
+ LOGGER.warnCr(reconciliation, "Logger log4j.rootLogger not set.");
}
return appenderName;
}
@@ -574,13 +579,13 @@ protected String parseMetrics(ConfigMap externalCm) {
if (getMetricsConfigInCm() != null) {
if (getMetricsConfigInCm() instanceof JmxPrometheusExporterMetrics) {
if (externalCm == null) {
- log.warn("ConfigMap {} does not exist. Metrics disabled.",
+ LOGGER.warnCr(reconciliation, "ConfigMap {} does not exist. Metrics disabled.",
((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName());
throw new InvalidResourceException("ConfigMap " + ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName() + " does not exist.");
} else {
String data = externalCm.getData().get(((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getKey());
if (data == null) {
- log.warn("ConfigMap {} does not contain specified key {}. Metrics disabled.", ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName(),
+ LOGGER.warnCr(reconciliation, "ConfigMap {} does not contain specified key {}. Metrics disabled.", ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName(),
((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getKey());
throw new InvalidResourceException("ConfigMap " + ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getName()
+ " does not contain specified key " + ((JmxPrometheusExporterMetrics) getMetricsConfigInCm()).getValueFrom().getConfigMapKeyRef().getKey() + ".");
@@ -599,7 +604,7 @@ protected String parseMetrics(ConfigMap externalCm) {
}
}
} else {
- log.warn("Unknown type of metrics {}.", getMetricsConfigInCm().getClass());
+ LOGGER.warnCr(reconciliation, "Unknown type of metrics {}.", getMetricsConfigInCm().getClass());
throw new InvalidResourceException("Unknown type of metrics " + getMetricsConfigInCm().getClass() + ".");
}
}
@@ -692,6 +697,7 @@ protected static void validatePersistentStorage(Storage storage) {
/**
* Checks if the supplied PersistentClaimStorage has a valid size
+ *
* @param storage
*
* @throws InvalidResourceException if the persistent storage size is not valid
@@ -822,13 +828,13 @@ protected ContainerPort createContainerPort(String name, int port, String protoc
.withProtocol(protocol)
.withContainerPort(port)
.build();
- log.trace("Created container port {}", containerPort);
+ LOGGER.traceCr(reconciliation, "Created container port {}", containerPort);
return containerPort;
}
protected ServicePort createServicePort(String name, int port, int targetPort, String protocol) {
ServicePort servicePort = createServicePort(name, port, targetPort, null, protocol);
- log.trace("Created service port {}", servicePort);
+ LOGGER.traceCr(reconciliation, "Created service port {}", servicePort);
return servicePort;
}
@@ -842,7 +848,7 @@ protected ServicePort createServicePort(String name, int port, int targetPort, I
builder.withNodePort(nodePort);
}
ServicePort servicePort = builder.build();
- log.trace("Created service port {}", servicePort);
+ LOGGER.traceCr(reconciliation, "Created service port {}", servicePort);
return servicePort;
}
@@ -954,7 +960,7 @@ protected Service createService(String name, String type, List port
service.getSpec().setIpFamilies(ipFamilies.stream().map(IpFamily::toValue).collect(Collectors.toList()));
}
- log.trace("Created service {}", service);
+ LOGGER.traceCr(reconciliation, "Created service {}", service);
return service;
}
@@ -991,7 +997,7 @@ protected Service createHeadlessService(List ports) {
service.getSpec().setIpFamilies(templateHeadlessServiceIpFamilies.stream().map(IpFamily::toValue).collect(Collectors.toList()));
}
- log.trace("Created headless service {}", service);
+ LOGGER.traceCr(reconciliation, "Created headless service {}", service);
return service;
}
@@ -1516,7 +1522,7 @@ protected void addContainerEnvsToExistingEnvs(List existingEnvs, List volumeList, String volumeNamePrefi
/**
* Creates the VolumeMounts used for authentication of Kafka client based components
- *
* @param authentication Authentication object from CRD
* @param volumeMountList List where the volumes will be added
* @param tlsVolumeMount Path where the TLS certs should be mounted
@@ -164,7 +158,6 @@ public static void configureClientAuthenticationVolumeMounts(KafkaClientAuthenti
/**
* Creates the VolumeMounts used for authentication of Kafka client based components
- *
* @param authentication Authentication object from CRD
* @param volumeMountList List where the volume mounts will be added
* @param tlsVolumeMount Path where the TLS certs should be mounted
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java
index de2d81d318..20db8a6799 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ClusterCa.java
@@ -12,6 +12,7 @@
import io.strimzi.certs.Subject;
import io.strimzi.operator.cluster.ClusterOperator;
import io.strimzi.operator.common.PasswordGenerator;
+import io.strimzi.operator.common.Reconciliation;
import java.io.IOException;
import java.util.HashMap;
@@ -34,11 +35,11 @@ public class ClusterCa extends Ca {
private final Pattern ipv4Address = Pattern.compile("[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}");
- public ClusterCa(CertManager certManager, PasswordGenerator passwordGenerator, String clusterName, Secret caCertSecret, Secret caKeySecret) {
- this(certManager, passwordGenerator, clusterName, caCertSecret, caKeySecret, 365, 30, true, null);
+ public ClusterCa(Reconciliation reconciliation, CertManager certManager, PasswordGenerator passwordGenerator, String clusterName, Secret caCertSecret, Secret caKeySecret) {
+ this(reconciliation, certManager, passwordGenerator, clusterName, caCertSecret, caKeySecret, 365, 30, true, null);
}
- public ClusterCa(CertManager certManager,
+ public ClusterCa(Reconciliation reconciliation, CertManager certManager,
PasswordGenerator passwordGenerator,
String clusterName,
Secret clusterCaCert,
@@ -47,12 +48,12 @@ public ClusterCa(CertManager certManager,
int renewalDays,
boolean generateCa,
CertificateExpirationPolicy policy) {
- super(certManager, passwordGenerator, "cluster-ca",
+ super(reconciliation, certManager, passwordGenerator,
+ "cluster-ca",
AbstractModel.clusterCaCertSecretName(clusterName),
forceRenewal(clusterCaCert, clusterCaKey, "cluster-ca.key"),
AbstractModel.clusterCaKeySecretName(clusterName),
- adapt060ClusterCaSecret(clusterCaKey),
- validityDays, renewalDays, generateCa, policy);
+ adapt060ClusterCaSecret(clusterCaKey), validityDays, renewalDays, generateCa, policy);
this.clusterName = clusterName;
}
@@ -143,8 +144,9 @@ public Map generateZkCerts(Kafka kafka, boolean isMaintenanc
return subject;
};
- log.debug("{}: Reconciling zookeeper certificates", this);
+ LOGGER.debugCr(reconciliation, "{}: Reconciling zookeeper certificates", this);
return maybeCopyOrGenerateCerts(
+ reconciliation,
kafka.getSpec().getZookeeper().getReplicas(),
subjectFn,
zkNodesSecret,
@@ -153,7 +155,7 @@ public Map generateZkCerts(Kafka kafka, boolean isMaintenanc
}
public Map generateBrokerCerts(Kafka kafka, Set externalBootstrapAddresses,
- Map> externalAddresses, boolean isMaintenanceTimeWindowsSatisfied) throws IOException {
+ Map> externalAddresses, boolean isMaintenanceTimeWindowsSatisfied) throws IOException {
String cluster = kafka.getMetadata().getName();
String namespace = kafka.getMetadata().getNamespace();
@@ -202,8 +204,9 @@ public Map generateBrokerCerts(Kafka kafka, Set exte
return subject;
};
- log.debug("{}: Reconciling kafka broker certificates", this);
+ LOGGER.debugCr(reconciliation, "{}: Reconciling kafka broker certificates", this);
return maybeCopyOrGenerateCerts(
+ reconciliation,
kafka.getSpec().getKafka().getReplicas(),
subjectFn,
brokersSecret,
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java
index ee128ab551..b537bcaf9a 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControl.java
@@ -44,6 +44,7 @@
import io.strimzi.operator.cluster.model.cruisecontrol.Capacity;
import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters;
import io.strimzi.operator.common.Annotations;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.operator.common.model.OrderedProperties;
@@ -134,10 +135,11 @@ public class CruiseControl extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected CruiseControl(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected CruiseControl(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = CruiseControlResources.deploymentName(cluster);
this.serviceName = CruiseControlResources.serviceName(cluster);
this.ancillaryConfigMapName = metricAndLogConfigsName(cluster);
@@ -171,13 +173,13 @@ protected static String defaultBootstrapServers(String cluster) {
}
@SuppressWarnings("deprecation")
- public static CruiseControl fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
+ public static CruiseControl fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
CruiseControl cruiseControl = null;
CruiseControlSpec spec = kafkaAssembly.getSpec().getCruiseControl();
KafkaClusterSpec kafkaClusterSpec = kafkaAssembly.getSpec().getKafka();
if (spec != null) {
- cruiseControl = new CruiseControl(kafkaAssembly);
+ cruiseControl = new CruiseControl(reconciliation, kafkaAssembly);
cruiseControl.isDeployed = true;
cruiseControl.setReplicas(DEFAULT_REPLICAS);
@@ -201,9 +203,9 @@ public static CruiseControl fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup ver
cruiseControl.tlsSidecarImage = tlsSideCarImage;
cruiseControl.setTlsSidecar(tlsSidecar);
- cruiseControl = updateConfiguration(spec, cruiseControl);
+ cruiseControl = cruiseControl.updateConfiguration(spec);
- KafkaConfiguration configuration = new KafkaConfiguration(kafkaClusterSpec.getConfig().entrySet());
+ KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
if (configuration.getConfigOption(MIN_INSYNC_REPLICAS) != null) {
cruiseControl.minInsyncReplicas = configuration.getConfigOption(MIN_INSYNC_REPLICAS);
}
@@ -239,8 +241,8 @@ public static CruiseControl fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup ver
return cruiseControl;
}
- public static CruiseControl updateConfiguration(CruiseControlSpec spec, CruiseControl cruiseControl) {
- CruiseControlConfiguration userConfiguration = new CruiseControlConfiguration(spec.getConfig().entrySet());
+ public CruiseControl updateConfiguration(CruiseControlSpec spec) {
+ CruiseControlConfiguration userConfiguration = new CruiseControlConfiguration(reconciliation, spec.getConfig().entrySet());
for (Map.Entry defaultEntry : CruiseControlConfiguration.getCruiseControlDefaultPropertiesMap().entrySet()) {
if (userConfiguration.getConfigOption(defaultEntry.getKey()) == null) {
userConfiguration.setConfigOption(defaultEntry.getKey(), defaultEntry.getValue());
@@ -248,8 +250,8 @@ public static CruiseControl updateConfiguration(CruiseControlSpec spec, CruiseCo
}
// Ensure that the configured anomaly.detection.goals are a sub-set of the default goals
checkGoals(userConfiguration);
- cruiseControl.setConfiguration(userConfiguration);
- return cruiseControl;
+ this.setConfiguration(userConfiguration);
+ return this;
}
/**
@@ -259,7 +261,7 @@ public static CruiseControl updateConfiguration(CruiseControlSpec spec, CruiseCo
* @param configuration The configuration instance to be checked.
* @throws UnsupportedOperationException If the configuration contains self.healing.goals configurations.
*/
- public static void checkGoals(CruiseControlConfiguration configuration) {
+ public void checkGoals(CruiseControlConfiguration configuration) {
// If self healing goals are defined then these take precedence.
// Right now, self.healing.goals must either be null or an empty list
if (configuration.getConfigOption(CruiseControlConfigurationParameters.CRUISE_CONTROL_SELF_HEALING_CONFIG_KEY.toString()) != null) {
@@ -285,7 +287,7 @@ public static void checkGoals(CruiseControlConfiguration configuration) {
// If the anomaly detection goals contain goals which are not in the default goals then the CC startup
// checks will fail, so we make the anomaly goals match the default goals
configuration.setConfigOption(CruiseControlConfigurationParameters.CRUISE_CONTROL_ANOMALY_DETECTION_CONFIG_KEY.toString(), defaultGoalsString);
- log.warn("Anomaly goals contained goals which are not in the configured default goals. Anomaly goals have " +
+ LOGGER.warnCr(reconciliation, "Anomaly goals contained goals which are not in the configured default goals. Anomaly goals have " +
"been changed to match the specified default goals.");
}
}
@@ -547,7 +549,7 @@ public Secret generateSecret(ClusterCa clusterCa, boolean isMaintenanceTimeWindo
return null;
}
Secret secret = clusterCa.cruiseControlSecret();
- return ModelUtils.buildSecret(clusterCa, secret, namespace, CruiseControl.secretName(cluster), name, "cruise-control", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied);
+ return ModelUtils.buildSecret(reconciliation, clusterCa, secret, namespace, CruiseControl.secretName(cluster), name, "cruise-control", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied);
}
/**
@@ -615,7 +617,7 @@ public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels oper
.endSpec()
.build();
- log.trace("Created network policy {}", networkPolicy);
+ LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy);
return networkPolicy;
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java
index 3bb6b50d82..d54a26284e 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CruiseControlConfiguration.java
@@ -8,6 +8,7 @@
import io.strimzi.api.kafka.model.CruiseControlSpec;
import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlGoals;
import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters;
+import io.strimzi.operator.common.Reconciliation;
import java.util.Arrays;
import java.util.Collections;
@@ -106,14 +107,15 @@ public class CruiseControlConfiguration extends AbstractConfiguration {
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public CruiseControlConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS);
+ public CruiseControlConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS);
}
- private CruiseControlConfiguration(String configuration, List forbiddenPrefixes) {
- super(configuration, forbiddenPrefixes);
+ private CruiseControlConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes) {
+ super(reconciliation, configuration, forbiddenPrefixes);
}
public static Map getCruiseControlDefaultPropertiesMap() {
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java
index 36f82ebca5..90b241fca1 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityOperator.java
@@ -33,6 +33,7 @@
import io.strimzi.api.kafka.model.template.EntityOperatorTemplate;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
import io.strimzi.operator.cluster.Main;
+import io.strimzi.operator.common.Reconciliation;
import java.io.BufferedReader;
import java.io.IOException;
@@ -79,8 +80,8 @@ public class EntityOperator extends AbstractModel {
/**
*/
- protected EntityOperator(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected EntityOperator(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = entityOperatorName(cluster);
this.replicas = EntityOperatorSpec.DEFAULT_REPLICAS;
this.zookeeperConnect = defaultZookeeperConnect(cluster);
@@ -137,21 +138,22 @@ public boolean isDeployed() {
/**
* Create a Entity Operator from given desired resource
*
+ * @param reconciliation The reconciliation
* @param kafkaAssembly desired resource with cluster configuration containing the Entity Operator one
* @param versions The versions.
* @return Entity Operator instance, null if not configured in the ConfigMap
*/
- public static EntityOperator fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
+ public static EntityOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
EntityOperator result = null;
EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator();
if (entityOperatorSpec != null) {
- result = new EntityOperator(kafkaAssembly);
+ result = new EntityOperator(reconciliation, kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
- EntityTopicOperator topicOperator = EntityTopicOperator.fromCrd(kafkaAssembly);
- EntityUserOperator userOperator = EntityUserOperator.fromCrd(kafkaAssembly);
+ EntityTopicOperator topicOperator = EntityTopicOperator.fromCrd(reconciliation, kafkaAssembly);
+ EntityUserOperator userOperator = EntityUserOperator.fromCrd(reconciliation, kafkaAssembly);
TlsSidecar tlsSidecar = entityOperatorSpec.getTlsSidecar();
if (entityOperatorSpec.getTemplate() != null) {
@@ -217,7 +219,7 @@ protected String getDefaultLogConfigFileName() {
public Deployment generateDeployment(boolean isOpenShift, Map annotations, ImagePullPolicy imagePullPolicy, List imagePullSecrets) {
if (!isDeployed()) {
- log.warn("Topic and/or User Operators not declared: Entity Operator will not be deployed");
+ LOGGER.warnCr(reconciliation, "Topic and/or User Operators not declared: Entity Operator will not be deployed");
return null;
}
@@ -321,7 +323,7 @@ public Secret generateSecret(ClusterCa clusterCa, boolean isMaintenanceTimeWindo
return null;
}
Secret secret = clusterCa.entityOperatorSecret();
- return ModelUtils.buildSecret(clusterCa, secret, namespace, EntityOperator.secretName(cluster), name,
+ return ModelUtils.buildSecret(reconciliation, clusterCa, secret, namespace, EntityOperator.secretName(cluster), name,
"entity-operator", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied);
}
@@ -386,7 +388,7 @@ public Role generateRole(String ownerNamespace, String namespace) {
ClusterRole cr = yamlReader.readValue(yaml, ClusterRole.class);
rules = cr.getRules();
} catch (IOException e) {
- log.error("Failed to read entity-operator ClusterRole.", e);
+ LOGGER.errorCr(reconciliation, "Failed to read entity-operator ClusterRole.", e);
throw new RuntimeException(e);
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java
index 7f0c98dd0c..a3f6cda195 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java
@@ -23,6 +23,7 @@
import io.strimzi.api.kafka.model.Probe;
import io.strimzi.api.kafka.model.ProbeBuilder;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.model.OrderedProperties;
import java.util.ArrayList;
@@ -76,10 +77,11 @@ public class EntityTopicOperator extends AbstractModel {
protected SecurityContext templateContainerSecurityContext;
/**
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected EntityTopicOperator(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected EntityTopicOperator(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = topicOperatorName(cluster);
this.readinessPath = "/";
this.readinessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS;
@@ -200,10 +202,11 @@ public String getAncillaryConfigMapKeyLogConfig() {
/**
* Create an Entity Topic Operator from given desired resource
*
+ * @param reconciliation The reconciliation
* @param kafkaAssembly desired resource with cluster configuration containing the Entity Topic Operator one
* @return Entity Topic Operator instance, null if not configured in the ConfigMap
*/
- public static EntityTopicOperator fromCrd(Kafka kafkaAssembly) {
+ public static EntityTopicOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly) {
EntityTopicOperator result = null;
EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator();
if (entityOperatorSpec != null) {
@@ -212,7 +215,7 @@ public static EntityTopicOperator fromCrd(Kafka kafkaAssembly) {
if (topicOperatorSpec != null) {
String namespace = kafkaAssembly.getMetadata().getNamespace();
- result = new EntityTopicOperator(kafkaAssembly);
+ result = new EntityTopicOperator(reconciliation, kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
String image = topicOperatorSpec.getImage();
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java
index 79f08ff31c..aaa5c99f21 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityUserOperator.java
@@ -24,6 +24,7 @@
import io.strimzi.api.kafka.model.Probe;
import io.strimzi.api.kafka.model.ProbeBuilder;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.model.OrderedProperties;
import java.util.ArrayList;
@@ -82,10 +83,11 @@ public class EntityUserOperator extends AbstractModel {
protected SecurityContext templateContainerSecurityContext;
/**
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected EntityUserOperator(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected EntityUserOperator(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = userOperatorName(cluster);
this.readinessPath = "/";
this.livenessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS;
@@ -210,10 +212,11 @@ public String getAncillaryConfigMapKeyLogConfig() {
/**
* Create an Entity User Operator from given desired resource
*
+ * @param reconciliation The reconciliation
* @param kafkaAssembly desired resource with cluster configuration containing the Entity User Operator one
* @return Entity User Operator instance, null if not configured in the ConfigMap
*/
- public static EntityUserOperator fromCrd(Kafka kafkaAssembly) {
+ public static EntityUserOperator fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly) {
EntityUserOperator result = null;
EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator();
if (entityOperatorSpec != null) {
@@ -222,7 +225,7 @@ public static EntityUserOperator fromCrd(Kafka kafkaAssembly) {
if (userOperatorSpec != null) {
String namespace = kafkaAssembly.getMetadata().getNamespace();
- result = new EntityUserOperator(kafkaAssembly);
+ result = new EntityUserOperator(reconciliation, kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
String image = userOperatorSpec.getImage();
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java
index faec248918..c2c92b56b9 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/JmxTrans.java
@@ -34,6 +34,7 @@
import io.strimzi.operator.cluster.model.components.JmxTransQueries;
import io.strimzi.operator.cluster.model.components.JmxTransServer;
import io.strimzi.operator.cluster.model.components.JmxTransServers;
+import io.strimzi.operator.common.Reconciliation;
import java.util.ArrayList;
import java.util.Arrays;
@@ -79,10 +80,11 @@ public class JmxTrans extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected JmxTrans(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected JmxTrans(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = JmxTransResources.deploymentName(cluster);
this.clusterName = cluster;
this.replicas = 1;
@@ -99,7 +101,7 @@ protected JmxTrans(HasMetadata resource) {
this.isMetricsEnabled = true;
}
- public static JmxTrans fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
+ public static JmxTrans fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
JmxTrans result = null;
JmxTransSpec spec = kafkaAssembly.getSpec().getJmxTrans();
if (spec != null) {
@@ -107,10 +109,10 @@ public static JmxTrans fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions
String error = String.format("Can't start up JmxTrans '%s' in '%s' as Kafka spec.kafka.jmxOptions is not specified",
JmxTransResources.deploymentName(kafkaAssembly.getMetadata().getName()),
kafkaAssembly.getMetadata().getNamespace());
- log.warn(error);
+ LOGGER.warnCr(reconciliation, error);
throw new InvalidResourceException(error);
}
- result = new JmxTrans(kafkaAssembly);
+ result = new JmxTrans(reconciliation, kafkaAssembly);
result.isDeployed = true;
if (kafkaAssembly.getSpec().getKafka().getJmxOptions().getAuthentication() instanceof KafkaJmxAuthenticationPassword) {
@@ -202,7 +204,7 @@ private String generateJMXConfig(JmxTransSpec spec, int numOfBrokers) throws Jso
try {
return mapper.writeValueAsString(servers);
} catch (JsonProcessingException e) {
- log.error("Could not create JmxTrans config json because: " + e.getMessage());
+ LOGGER.errorCr(reconciliation, "Could not create JmxTrans config json because: " + e.getMessage());
throw e;
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java
index af728ac28d..55890842f9 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeCluster.java
@@ -32,6 +32,7 @@
import io.strimzi.api.kafka.model.template.KafkaBridgeTemplate;
import io.strimzi.api.kafka.model.tracing.Tracing;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.operator.common.model.OrderedProperties;
@@ -120,10 +121,11 @@ public class KafkaBridgeCluster extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected KafkaBridgeCluster(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected KafkaBridgeCluster(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = KafkaBridgeResources.deploymentName(cluster);
this.serviceName = KafkaBridgeResources.serviceName(cluster);
this.ancillaryConfigMapName = KafkaBridgeResources.metricsAndLogConfigMapName(cluster);
@@ -139,9 +141,9 @@ protected KafkaBridgeCluster(HasMetadata resource) {
this.logAndMetricsConfigMountPath = "/opt/strimzi/custom-config/";
}
- public static KafkaBridgeCluster fromCrd(KafkaBridge kafkaBridge, KafkaVersion.Lookup versions) {
+ public static KafkaBridgeCluster fromCrd(Reconciliation reconciliation, KafkaBridge kafkaBridge, KafkaVersion.Lookup versions) {
- KafkaBridgeCluster kafkaBridgeCluster = new KafkaBridgeCluster(kafkaBridge);
+ KafkaBridgeCluster kafkaBridgeCluster = new KafkaBridgeCluster(reconciliation, kafkaBridge);
KafkaBridgeSpec spec = kafkaBridge.getSpec();
kafkaBridgeCluster.tracing = spec.getTracing();
@@ -172,7 +174,10 @@ public static KafkaBridgeCluster fromCrd(KafkaBridge kafkaBridge, KafkaVersion.L
kafkaBridgeCluster.setTls(spec.getTls() != null ? spec.getTls() : null);
- AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null);
+ String warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null);
+ if (!warnMsg.isEmpty()) {
+ LOGGER.warnCr(reconciliation, warnMsg);
+ }
kafkaBridgeCluster.setAuthentication(spec.getAuthentication());
if (spec.getTemplate() != null) {
@@ -207,7 +212,7 @@ public static KafkaBridgeCluster fromCrd(KafkaBridge kafkaBridge, KafkaVersion.L
kafkaBridgeCluster.setHttpEnabled(true);
kafkaBridgeCluster.setKafkaBridgeHttpConfig(spec.getHttp());
} else {
- log.warn("No protocol specified.");
+ LOGGER.warnCr(reconciliation, "No protocol specified.");
throw new InvalidResourceException("No protocol for communication with Bridge specified. Use HTTP.");
}
kafkaBridgeCluster.setOwnerReference(kafkaBridge);
@@ -355,8 +360,8 @@ protected List getEnvVars() {
}
varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_BOOTSTRAP_SERVERS, bootstrapServers));
- varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_CONSUMER_CONFIG, kafkaBridgeConsumer == null ? "" : new KafkaBridgeConsumerConfiguration(kafkaBridgeConsumer.getConfig().entrySet()).getConfiguration()));
- varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_PRODUCER_CONFIG, kafkaBridgeProducer == null ? "" : new KafkaBridgeProducerConfiguration(kafkaBridgeProducer.getConfig().entrySet()).getConfiguration()));
+ varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_CONSUMER_CONFIG, kafkaBridgeConsumer == null ? "" : new KafkaBridgeConsumerConfiguration(reconciliation, kafkaBridgeConsumer.getConfig().entrySet()).getConfiguration()));
+ varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_PRODUCER_CONFIG, kafkaBridgeProducer == null ? "" : new KafkaBridgeProducerConfiguration(reconciliation, kafkaBridgeProducer.getConfig().entrySet()).getConfiguration()));
varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_ID, cluster));
varList.add(buildEnvVar(ENV_VAR_KAFKA_BRIDGE_HTTP_ENABLED, String.valueOf(httpEnabled)));
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java
index 89f8f5423d..8127407123 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeConsumerConfiguration.java
@@ -6,6 +6,7 @@
package io.strimzi.operator.cluster.model;
import io.strimzi.api.kafka.model.KafkaBridgeConsumerSpec;
+import io.strimzi.operator.common.Reconciliation;
import java.util.HashMap;
import java.util.List;
@@ -30,9 +31,10 @@ public class KafkaBridgeConsumerConfiguration extends AbstractConfiguration {
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public KafkaBridgeConsumerConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public KafkaBridgeConsumerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java
index 58cd8486df..e146c4debb 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaBridgeProducerConfiguration.java
@@ -6,6 +6,7 @@
package io.strimzi.operator.cluster.model;
import io.strimzi.api.kafka.model.KafkaBridgeProducerSpec;
+import io.strimzi.operator.common.Reconciliation;
import java.util.HashMap;
import java.util.List;
@@ -30,9 +31,10 @@ public class KafkaBridgeProducerConfiguration extends AbstractConfiguration {
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public KafkaBridgeProducerConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public KafkaBridgeProducerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java
index 293911e1e2..5ac6e31ad4 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java
@@ -78,6 +78,7 @@
import io.strimzi.operator.cluster.operator.resource.cruisecontrol.CruiseControlConfigurationParameters;
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.PasswordGenerator;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
import io.strimzi.operator.common.operator.resource.StatusUtils;
@@ -237,10 +238,11 @@ public class KafkaCluster extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- private KafkaCluster(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ private KafkaCluster(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = kafkaClusterName(cluster);
this.serviceName = serviceName(cluster);
this.headlessServiceName = headlessServiceName(cluster);
@@ -358,13 +360,13 @@ public static String clientsCaCertSecretName(String cluster) {
return KafkaResources.clientsCaCertificateSecretName(cluster);
}
- public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
- return fromCrd(kafkaAssembly, versions, null, 0);
+ public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
+ return fromCrd(reconciliation, kafkaAssembly, versions, null, 0);
}
@SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS", "deprecation"})
- public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
- KafkaCluster result = new KafkaCluster(kafkaAssembly);
+ public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
+ KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
@@ -413,9 +415,9 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers
// Handle Kafka broker configuration
KafkaVersion desiredVersion = versions.version(kafkaClusterSpec.getVersion());
- KafkaConfiguration configuration = new KafkaConfiguration(kafkaClusterSpec.getConfig().entrySet());
+ KafkaConfiguration configuration = new KafkaConfiguration(reconciliation, kafkaClusterSpec.getConfig().entrySet());
configureCruiseControlMetrics(kafkaAssembly, result, configuration);
- validateConfiguration(kafkaAssembly, desiredVersion, configuration);
+ validateConfiguration(reconciliation, kafkaAssembly, desiredVersion, configuration);
result.setConfiguration(configuration);
// Parse different types of metrics configurations
@@ -425,15 +427,15 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers
Storage newStorage = kafkaClusterSpec.getStorage();
AbstractModel.validatePersistentStorage(newStorage);
- StorageDiff diff = new StorageDiff(oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
+ StorageDiff diff = new StorageDiff(reconciliation, oldStorage, newStorage, oldReplicas, kafkaClusterSpec.getReplicas());
if (!diff.isEmpty()) {
- log.warn("Only the following changes to Kafka storage are allowed: " +
+ LOGGER.warnCr(reconciliation, "Only the following changes to Kafka storage are allowed: " +
"changing the deleteClaim flag, " +
"adding volumes to Jbod storage or removing volumes from Jbod storage, " +
"changing overrides to nodes which do not exist yet" +
"and increasing size of persistent claim volumes (depending on the volume type and used storage class).");
- log.warn("The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " +
+ LOGGER.warnCr(reconciliation, "The desired Kafka storage configuration in the custom resource {}/{} contains changes which are not allowed. As a " +
"result, all storage changes will be ignored. Use DEBUG level logging for more information " +
"about the detected changes.", kafkaAssembly.getMetadata().getNamespace(), kafkaAssembly.getMetadata().getName());
@@ -457,11 +459,11 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers
// Configure listeners
if (kafkaClusterSpec.getListeners() == null || kafkaClusterSpec.getListeners().getGenericKafkaListeners() == null) {
- log.error("The required field .spec.kafka.listeners is missing");
+ LOGGER.errorCr(reconciliation, "The required field .spec.kafka.listeners is missing");
throw new InvalidResourceException("The required field .spec.kafka.listeners is missing");
}
List listeners = kafkaClusterSpec.getListeners().getGenericKafkaListeners();
- ListenersValidator.validate(kafkaClusterSpec.getReplicas(), listeners);
+ ListenersValidator.validate(reconciliation, kafkaClusterSpec.getReplicas(), listeners);
result.setListeners(listeners);
// Set authorization
@@ -471,7 +473,7 @@ public static KafkaCluster fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup vers
} else {
KafkaAuthorizationKeycloak authorizationKeycloak = (KafkaAuthorizationKeycloak) kafkaClusterSpec.getAuthorization();
if (authorizationKeycloak.getClientId() == null || authorizationKeycloak.getTokenEndpointUri() == null) {
- log.error("Keycloak Authorization: Token Endpoint URI and clientId are both required");
+ LOGGER.errorCr(reconciliation, "Keycloak Authorization: Token Endpoint URI and clientId are both required");
throw new InvalidResourceException("Keycloak Authorization: Token Endpoint URI and clientId are both required");
}
}
@@ -626,16 +628,17 @@ private static void configureCruiseControlMetrics(Kafka kafkaAssembly, KafkaClus
/**
* Validates the Kafka broker configuration against the configuration options of the desired Kafka version.
*
+ * @param reconciliation The reconciliation
* @param kafkaAssembly Kafka custom resource
* @param desiredVersion Desired Kafka version
* @param configuration Kafka broker configuration
*/
- private static void validateConfiguration(Kafka kafkaAssembly, KafkaVersion desiredVersion, KafkaConfiguration configuration) {
+ private static void validateConfiguration(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion desiredVersion, KafkaConfiguration configuration) {
List errorsInConfig = configuration.validate(desiredVersion);
if (!errorsInConfig.isEmpty()) {
for (String error : errorsInConfig) {
- log.warn("Kafka {}/{} has invalid spec.kafka.config: {}",
+ LOGGER.warnCr(reconciliation, "Kafka {}/{} has invalid spec.kafka.config: {}",
kafkaAssembly.getMetadata().getNamespace(),
kafkaAssembly.getMetadata().getName(),
error);
@@ -670,19 +673,18 @@ protected static void validateIntConfigProperty(String propertyName, KafkaCluste
* @param externalBootstrapDnsName The set of DNS names for bootstrap service (should be appended to every broker certificate)
* @param externalDnsNames The list of DNS names for broker pods (should be appended only to specific certificates for given broker)
* @param isMaintenanceTimeWindowsSatisfied Indicates whether we are in the maintenance window or not.
- * This is used for certificate renewals
*/
public void generateCertificates(Kafka kafka, ClusterCa clusterCa, Set externalBootstrapDnsName,
- Map> externalDnsNames, boolean isMaintenanceTimeWindowsSatisfied) {
- log.debug("Generating certificates");
+ Map> externalDnsNames, boolean isMaintenanceTimeWindowsSatisfied) {
+ LOGGER.debugCr(reconciliation, "Generating certificates");
try {
brokerCerts = clusterCa.generateBrokerCerts(kafka, externalBootstrapDnsName, externalDnsNames, isMaintenanceTimeWindowsSatisfied);
} catch (IOException e) {
- log.warn("Error while generating certificates", e);
+ LOGGER.warnCr(reconciliation, "Error while generating certificates", e);
}
- log.debug("End generating certificates");
+ LOGGER.debugCr(reconciliation, "End generating certificates");
}
/**
@@ -845,7 +847,8 @@ public List generateExternalServices(int pod) {
String serviceName = ListenersUtils.backwardsCompatibleBrokerServiceName(cluster, pod, listener);
List ports = Collections.singletonList(
- createServicePort(ListenersUtils.backwardsCompatiblePortName(listener),
+ createServicePort(
+ ListenersUtils.backwardsCompatiblePortName(listener),
listener.getPort(),
listener.getPort(),
ListenersUtils.brokerNodePort(listener, pod),
@@ -1801,7 +1804,7 @@ public NetworkPolicy generateNetworkPolicy(String operatorNamespace, Labels oper
.endSpec()
.build();
- log.trace("Created network policy {}", networkPolicy);
+ LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy);
return networkPolicy;
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java
index ba231e189e..cfc430526a 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConfiguration.java
@@ -10,6 +10,7 @@
import io.strimzi.kafka.config.model.ConfigModel;
import io.strimzi.kafka.config.model.ConfigModels;
import io.strimzi.kafka.config.model.Scope;
+import io.strimzi.operator.common.Reconciliation;
import java.io.IOException;
import java.io.InputStream;
@@ -43,35 +44,40 @@ public class KafkaConfiguration extends AbstractConfiguration {
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public KafkaConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS);
+ public KafkaConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS);
}
- private KafkaConfiguration(String configuration, List forbiddenPrefixes) {
- super(configuration, forbiddenPrefixes);
+ private KafkaConfiguration(Reconciliation reconciliation, String configuration, List forbiddenPrefixes) {
+ super(reconciliation, configuration, forbiddenPrefixes);
}
/**
* Returns a KafkaConfiguration created without forbidden option filtering.
+ *
+ * @param reconciliation The reconciliation
* @param string A string representation of the Properties
* @return The KafkaConfiguration
*/
- public static KafkaConfiguration unvalidated(String string) {
- return new KafkaConfiguration(string, emptyList());
+ public static KafkaConfiguration unvalidated(Reconciliation reconciliation, String string) {
+ return new KafkaConfiguration(reconciliation, string, emptyList());
}
/**
* Returns a KafkaConfiguration created without forbidden option filtering.
+ *
+ * @param reconciliation The reconciliation
* @param map A map representation of the Properties
* @return The KafkaConfiguration
*/
- public static KafkaConfiguration unvalidated(Map map) {
+ public static KafkaConfiguration unvalidated(Reconciliation reconciliation, Map map) {
StringBuilder string = new StringBuilder();
map.entrySet().forEach(entry -> string.append(entry.getKey() + "=" + entry.getValue() + "\n"));
- return new KafkaConfiguration(string.toString(), emptyList());
+ return new KafkaConfiguration(reconciliation, string.toString(), emptyList());
}
/**
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java
index 26df397573..fb6115f0a4 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectBuild.java
@@ -33,6 +33,7 @@
import io.strimzi.api.kafka.model.template.KafkaConnectTemplate;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
import io.strimzi.operator.common.Annotations;
+import io.strimzi.operator.common.Reconciliation;
import java.util.ArrayList;
import java.util.Arrays;
@@ -57,10 +58,11 @@ public class KafkaConnectBuild extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected KafkaConnectBuild(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected KafkaConnectBuild(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = KafkaConnectResources.buildPodName(cluster);
this.image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_KANIKO_EXECUTOR_IMAGE, DEFAULT_KANIKO_EXECUTOR_IMAGE);
}
@@ -68,12 +70,13 @@ protected KafkaConnectBuild(HasMetadata resource) {
/**
* Created the KafkaConnectBuild instance from the Kafka Connect Custom Resource
*
+ * @param reconciliation The reconciliation
* @param kafkaConnect Kafka Connect CR with the build configuration
* @param versions Kafka versions configuration
* @return Instance of KafkaConnectBuild class
*/
- public static KafkaConnectBuild fromCrd(KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) {
- KafkaConnectBuild build = new KafkaConnectBuild(kafkaConnect);
+ public static KafkaConnectBuild fromCrd(Reconciliation reconciliation, KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) {
+ KafkaConnectBuild build = new KafkaConnectBuild(reconciliation, kafkaConnect);
KafkaConnectSpec spec = kafkaConnect.getSpec();
if (spec == null) {
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java
index d3e7648955..4c9e7d091e 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectCluster.java
@@ -60,6 +60,7 @@
import io.strimzi.api.kafka.model.tracing.Tracing;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
import io.strimzi.operator.common.PasswordGenerator;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
@@ -143,20 +144,22 @@ public class KafkaConnectCluster extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected KafkaConnectCluster(HasMetadata resource) {
- this(resource, APPLICATION_NAME);
+ protected KafkaConnectCluster(Reconciliation reconciliation, HasMetadata resource) {
+ this(reconciliation, resource, APPLICATION_NAME);
}
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
* @param applicationName configurable allow other classes to extend this class
*/
- protected KafkaConnectCluster(HasMetadata resource, String applicationName) {
- super(resource, applicationName);
+ protected KafkaConnectCluster(Reconciliation reconciliation, HasMetadata resource, String applicationName) {
+ super(reconciliation, resource, applicationName);
this.name = KafkaConnectResources.deploymentName(cluster);
this.serviceName = KafkaConnectResources.serviceName(cluster);
this.ancillaryConfigMapName = KafkaConnectResources.metricsAndLogConfigMapName(cluster);
@@ -172,10 +175,10 @@ protected KafkaConnectCluster(HasMetadata resource, String applicationName) {
this.logAndMetricsConfigMountPath = "/opt/kafka/custom-config/";
}
- public static KafkaConnectCluster fromCrd(KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) {
+ public static KafkaConnectCluster fromCrd(Reconciliation reconciliation, KafkaConnect kafkaConnect, KafkaVersion.Lookup versions) {
- KafkaConnectCluster cluster = fromSpec(kafkaConnect.getSpec(), versions,
- new KafkaConnectCluster(kafkaConnect));
+ KafkaConnectCluster cluster = fromSpec(reconciliation, kafkaConnect.getSpec(), versions,
+ new KafkaConnectCluster(reconciliation, kafkaConnect));
cluster.setOwnerReference(kafkaConnect);
@@ -188,7 +191,8 @@ public static KafkaConnectCluster fromCrd(KafkaConnect kafkaConnect, KafkaVersio
* thus permitting reuse of the setter-calling code for subclasses.
*/
@SuppressWarnings("deprecation")
- protected static C fromSpec(KafkaConnectSpec spec,
+ protected static C fromSpec(Reconciliation reconciliation,
+ KafkaConnectSpec spec,
KafkaVersion.Lookup versions,
C kafkaConnect) {
kafkaConnect.setReplicas(spec.getReplicas() != null && spec.getReplicas() >= 0 ? spec.getReplicas() : DEFAULT_REPLICAS);
@@ -196,7 +200,7 @@ protected static C fromSpec(KafkaConnectSpec spe
AbstractConfiguration config = kafkaConnect.getConfiguration();
if (config == null) {
- config = new KafkaConnectConfiguration(spec.getConfig().entrySet());
+ config = new KafkaConnectConfiguration(reconciliation, spec.getConfig().entrySet());
kafkaConnect.setConfiguration(config);
}
if (kafkaConnect.tracing != null) {
@@ -246,7 +250,10 @@ protected static C fromSpec(KafkaConnectSpec spe
kafkaConnect.setBootstrapServers(spec.getBootstrapServers());
kafkaConnect.setTls(spec.getTls());
- AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null);
+ String warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getAuthentication(), spec.getTls() != null);
+ if (!warnMsg.isEmpty()) {
+ LOGGER.warnCr(reconciliation, warnMsg);
+ }
kafkaConnect.setAuthentication(spec.getAuthentication());
if (spec.getTemplate() != null) {
@@ -371,7 +378,7 @@ private List getExternalConfigurationVolumes(boolean isOpenShift) {
if (name != null) {
if (volume.getConfigMap() != null && volume.getSecret() != null) {
- log.warn("Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name);
+ LOGGER.warnCr(reconciliation, "Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name);
} else {
if (volume.getConfigMap() != null) {
ConfigMapVolumeSource source = volume.getConfigMap();
@@ -443,7 +450,7 @@ private List getExternalConfigurationVolumeMounts() {
if (name != null) {
if (volume.getConfigMap() != null && volume.getSecret() != null) {
- log.warn("Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name);
+ LOGGER.warnCr(reconciliation, "Volume {} with external Kafka Connect configuration has to contain exactly one volume source reference to either ConfigMap or Secret", name);
} else if (volume.getConfigMap() != null || volume.getSecret() != null) {
VolumeMount volumeMount = new VolumeMountBuilder()
.withName(VolumeUtils.getValidVolumeName(EXTERNAL_CONFIGURATION_VOLUME_NAME_PREFIX + name))
@@ -628,7 +635,7 @@ private List getExternalConfigurationEnvVars() {
if (valueFrom != null) {
if (valueFrom.getConfigMapKeyRef() != null && valueFrom.getSecretKeyRef() != null) {
- log.warn("Environment variable {} with external Kafka Connect configuration has to contain exactly one reference to either ConfigMap or Secret", name);
+ LOGGER.warnCr(reconciliation, "Environment variable {} with external Kafka Connect configuration has to contain exactly one reference to either ConfigMap or Secret", name);
} else {
if (valueFrom.getConfigMapKeyRef() != null) {
EnvVarSource envVarSource = new EnvVarSourceBuilder()
@@ -646,7 +653,7 @@ private List getExternalConfigurationEnvVars() {
}
}
} else {
- log.warn("Name of an environment variable with external Kafka Connect configuration cannot start with `KAFKA_` or `STRIMZI`.");
+ LOGGER.warnCr(reconciliation, "Name of an environment variable with external Kafka Connect configuration cannot start with `KAFKA_` or `STRIMZI`.");
}
}
@@ -668,6 +675,7 @@ protected String getDefaultLogConfigFileName() {
/**
* Set the bootstrap servers to connect to
+ *
* @param bootstrapServers bootstrap servers comma separated list
*/
protected void setBootstrapServers(String bootstrapServers) {
@@ -834,7 +842,7 @@ public NetworkPolicy generateNetworkPolicy(boolean connectorOperatorEnabled,
.endSpec()
.build();
- log.trace("Created network policy {}", networkPolicy);
+ LOGGER.traceCr(reconciliation, "Created network policy {}", networkPolicy);
return networkPolicy;
} else {
return null;
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java
index 6b820d97d2..eb43d43885 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectConfiguration.java
@@ -6,6 +6,7 @@
package io.strimzi.operator.cluster.model;
import io.strimzi.api.kafka.model.KafkaConnectSpec;
+import io.strimzi.operator.common.Reconciliation;
import java.util.HashMap;
import java.util.List;
@@ -36,9 +37,10 @@ public class KafkaConnectConfiguration extends AbstractConfiguration {
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public KafkaConnectConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public KafkaConnectConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java
index f7c5c53869..4e3ed8da80 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaConnectS2ICluster.java
@@ -31,6 +31,7 @@
import io.strimzi.api.kafka.model.KafkaConnectS2I;
import io.strimzi.api.kafka.model.KafkaConnectS2IResources;
import io.strimzi.api.kafka.model.KafkaConnectS2ISpec;
+import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.Util;
import java.util.List;
@@ -50,15 +51,15 @@ public class KafkaConnectS2ICluster extends KafkaConnectCluster {
*
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- private KafkaConnectS2ICluster(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ private KafkaConnectS2ICluster(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
}
// Deprecation is suppressed because of KafkaConnectS2I
@SuppressWarnings("deprecation")
- public static KafkaConnectS2ICluster fromCrd(KafkaConnectS2I kafkaConnectS2I, KafkaVersion.Lookup versions) {
+ public static KafkaConnectS2ICluster fromCrd(Reconciliation reconciliation, KafkaConnectS2I kafkaConnectS2I, KafkaVersion.Lookup versions) {
KafkaConnectS2ISpec spec = kafkaConnectS2I.getSpec();
- KafkaConnectS2ICluster cluster = fromSpec(spec, versions, new KafkaConnectS2ICluster(kafkaConnectS2I));
+ KafkaConnectS2ICluster cluster = fromSpec(reconciliation, spec, versions, new KafkaConnectS2ICluster(reconciliation, kafkaConnectS2I));
if (spec.getBuild() != null) {
throw new InvalidResourceException(".spec.build can be used only with KafkaConnect and is not supported with KafkaConnectS2I.");
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java
index dd271e821b..4d8b961756 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaExporter.java
@@ -27,6 +27,7 @@
import io.strimzi.api.kafka.model.ProbeBuilder;
import io.strimzi.api.kafka.model.template.KafkaExporterTemplate;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
+import io.strimzi.operator.common.Reconciliation;
import java.util.ArrayList;
import java.util.Collections;
@@ -68,10 +69,11 @@ public class KafkaExporter extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected KafkaExporter(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected KafkaExporter(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = KafkaExporterResources.deploymentName(cluster);
this.replicas = 1;
this.readinessPath = "/metrics";
@@ -87,8 +89,8 @@ protected KafkaExporter(HasMetadata resource) {
}
- public static KafkaExporter fromCrd(Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
- KafkaExporter kafkaExporter = new KafkaExporter(kafkaAssembly);
+ public static KafkaExporter fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions) {
+ KafkaExporter kafkaExporter = new KafkaExporter(reconciliation, kafkaAssembly);
KafkaExporterSpec spec = kafkaAssembly.getSpec().getKafkaExporter();
if (spec != null) {
@@ -318,7 +320,7 @@ public Secret generateSecret(ClusterCa clusterCa, boolean isMaintenanceTimeWindo
return null;
}
Secret secret = clusterCa.kafkaExporterSecret();
- return ModelUtils.buildSecret(clusterCa, secret, namespace, KafkaExporter.secretName(cluster), name,
+ return ModelUtils.buildSecret(reconciliation, clusterCa, secret, namespace, KafkaExporter.secretName(cluster), name,
"kafka-exporter", labels, createOwnerReference(), isMaintenanceTimeWindowsSatisfied);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java
index e7efe36063..2072432ef9 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Cluster.java
@@ -25,6 +25,7 @@
import io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationPlain;
import io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationScramSha512;
import io.strimzi.api.kafka.model.authentication.KafkaClientAuthenticationTls;
+import io.strimzi.operator.common.Reconciliation;
import java.util.List;
import java.util.Map.Entry;
@@ -56,10 +57,11 @@ public class KafkaMirrorMaker2Cluster extends KafkaConnectCluster {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- private KafkaMirrorMaker2Cluster(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ private KafkaMirrorMaker2Cluster(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = KafkaMirrorMaker2Resources.deploymentName(cluster);
this.serviceName = KafkaMirrorMaker2Resources.serviceName(cluster);
this.ancillaryConfigMapName = KafkaMirrorMaker2Resources.metricsAndLogConfigMapName(cluster);
@@ -68,13 +70,15 @@ private KafkaMirrorMaker2Cluster(HasMetadata resource) {
/**
* Creates instance of KafkaMirrorMaker2Cluster from CRD definition.
*
+ * @param reconciliation The reconciliation
* @param kafkaMirrorMaker2 The Custom Resource based on which the cluster model should be created.
* @param versions The image versions for MirrorMaker 2.0 clusters.
* @return The MirrorMaker 2.0 cluster model.
*/
- public static KafkaMirrorMaker2Cluster fromCrd(KafkaMirrorMaker2 kafkaMirrorMaker2,
+ public static KafkaMirrorMaker2Cluster fromCrd(Reconciliation reconciliation,
+ KafkaMirrorMaker2 kafkaMirrorMaker2,
KafkaVersion.Lookup versions) {
- KafkaMirrorMaker2Cluster cluster = new KafkaMirrorMaker2Cluster(kafkaMirrorMaker2);
+ KafkaMirrorMaker2Cluster cluster = new KafkaMirrorMaker2Cluster(reconciliation, kafkaMirrorMaker2);
KafkaMirrorMaker2Spec spec = kafkaMirrorMaker2.getSpec();
cluster.setOwnerReference(kafkaMirrorMaker2);
@@ -95,8 +99,8 @@ public static KafkaMirrorMaker2Cluster fromCrd(KafkaMirrorMaker2 kafkaMirrorMake
.findFirst()
.orElseThrow(() -> new InvalidResourceException("connectCluster with alias " + connectClusterAlias + " cannot be found in the list of clusters at spec.clusters"));
}
- cluster.setConfiguration(new KafkaMirrorMaker2Configuration(connectCluster.getConfig().entrySet()));
- return fromSpec(buildKafkaConnectSpec(spec, connectCluster), versions, cluster);
+ cluster.setConfiguration(new KafkaMirrorMaker2Configuration(reconciliation, connectCluster.getConfig().entrySet()));
+ return fromSpec(reconciliation, buildKafkaConnectSpec(spec, connectCluster), versions, cluster);
}
@SuppressWarnings("deprecation")
@@ -211,7 +215,7 @@ private String buildClusterVolumeMountPath(final String baseVolumeMount, final
@SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity"})
@Override
protected List getEnvVars() {
- List varList = super.getEnvVars();
+ List varList = super.getEnvVars();
final StringBuilder clusterAliases = new StringBuilder();
final StringBuilder clustersTrustedCerts = new StringBuilder();
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java
index b71be67563..f5c1f8aeec 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMaker2Configuration.java
@@ -6,6 +6,7 @@
package io.strimzi.operator.cluster.model;
import io.strimzi.api.kafka.model.KafkaMirrorMaker2ClusterSpec;
+import io.strimzi.operator.common.Reconciliation;
import java.util.HashMap;
import java.util.List;
@@ -39,10 +40,11 @@ public class KafkaMirrorMaker2Configuration extends AbstractConfiguration {
* Constructor used to instantiate this class from JsonObject. Should be used to
* create configuration from ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value
* pairs.
*/
- public KafkaMirrorMaker2Configuration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public KafkaMirrorMaker2Configuration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java
index bee26ab4d4..57d0b0da45 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerCluster.java
@@ -27,12 +27,14 @@
import io.strimzi.api.kafka.model.ProbeBuilder;
import io.strimzi.api.kafka.model.template.KafkaMirrorMakerTemplate;
import io.strimzi.api.kafka.model.tracing.Tracing;
+import io.strimzi.operator.common.Reconciliation;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+@SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:NPathComplexity"})
public class KafkaMirrorMakerCluster extends AbstractModel {
protected static final String APPLICATION_NAME = "kafka-mirror-maker";
@@ -103,10 +105,11 @@ public class KafkaMirrorMakerCluster extends AbstractModel {
/**
* Constructor
*
+ * @param reconciliation The reconciliation
* @param resource Kubernetes resource with metadata containing the namespace and cluster name
*/
- protected KafkaMirrorMakerCluster(HasMetadata resource) {
- super(resource, APPLICATION_NAME);
+ protected KafkaMirrorMakerCluster(Reconciliation reconciliation, HasMetadata resource) {
+ super(reconciliation, resource, APPLICATION_NAME);
this.name = KafkaMirrorMakerResources.deploymentName(cluster);
this.serviceName = KafkaMirrorMakerResources.serviceName(cluster);
this.ancillaryConfigMapName = KafkaMirrorMakerResources.metricsAndLogConfigMapName(cluster);
@@ -122,8 +125,8 @@ protected KafkaMirrorMakerCluster(HasMetadata resource) {
}
@SuppressWarnings("deprecation")
- public static KafkaMirrorMakerCluster fromCrd(KafkaMirrorMaker kafkaMirrorMaker, KafkaVersion.Lookup versions) {
- KafkaMirrorMakerCluster kafkaMirrorMakerCluster = new KafkaMirrorMakerCluster(kafkaMirrorMaker);
+ public static KafkaMirrorMakerCluster fromCrd(Reconciliation reconciliation, KafkaMirrorMaker kafkaMirrorMaker, KafkaVersion.Lookup versions) {
+ KafkaMirrorMakerCluster kafkaMirrorMakerCluster = new KafkaMirrorMakerCluster(reconciliation, kafkaMirrorMaker);
KafkaMirrorMakerSpec spec = kafkaMirrorMaker.getSpec();
if (spec != null) {
@@ -144,14 +147,20 @@ public static KafkaMirrorMakerCluster fromCrd(KafkaMirrorMaker kafkaMirrorMaker,
if (include == null && whitelist == null) {
throw new InvalidResourceException("One of the fields include or whitelist needs to be specified.");
} else if (whitelist != null && include != null) {
- log.warn("Both include and whitelist fields are present. Whitelist is deprecated and will be ignored.");
+ LOGGER.warnCr(reconciliation, "Both include and whitelist fields are present. Whitelist is deprecated and will be ignored.");
}
kafkaMirrorMakerCluster.setInclude(include != null ? include : whitelist);
- AuthenticationUtils.validateClientAuthentication(spec.getProducer().getAuthentication(), spec.getProducer().getTls() != null);
+ String warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getProducer().getAuthentication(), spec.getProducer().getTls() != null);
+ if (!warnMsg.isEmpty()) {
+ LOGGER.warnCr(reconciliation, warnMsg);
+ }
kafkaMirrorMakerCluster.setProducer(spec.getProducer());
- AuthenticationUtils.validateClientAuthentication(spec.getConsumer().getAuthentication(), spec.getConsumer().getTls() != null);
+ warnMsg = AuthenticationUtils.validateClientAuthentication(spec.getConsumer().getAuthentication(), spec.getConsumer().getTls() != null);
+ if (!warnMsg.isEmpty()) {
+ LOGGER.warnCr(reconciliation, warnMsg);
+ }
kafkaMirrorMakerCluster.setConsumer(spec.getConsumer());
kafkaMirrorMakerCluster.setImage(versions.kafkaMirrorMakerImage(spec.getImage(), spec.getVersion()));
@@ -307,8 +316,8 @@ protected List getContainers(ImagePullPolicy imagePullPolicy) {
return containers;
}
- private KafkaMirrorMakerConsumerConfiguration getConsumerConfiguration() {
- KafkaMirrorMakerConsumerConfiguration config = new KafkaMirrorMakerConsumerConfiguration(consumer.getConfig().entrySet());
+ private KafkaMirrorMakerConsumerConfiguration getConsumerConfiguration() {
+ KafkaMirrorMakerConsumerConfiguration config = new KafkaMirrorMakerConsumerConfiguration(reconciliation, consumer.getConfig().entrySet());
if (tracing != null) {
config.setConfigOption("interceptor.classes", "io.opentracing.contrib.kafka.TracingConsumerInterceptor");
@@ -318,7 +327,7 @@ private KafkaMirrorMakerConsumerConfiguration getConsumerConfiguration() {
}
private KafkaMirrorMakerProducerConfiguration getProducerConfiguration() {
- KafkaMirrorMakerProducerConfiguration config = new KafkaMirrorMakerProducerConfiguration(producer.getConfig().entrySet());
+ KafkaMirrorMakerProducerConfiguration config = new KafkaMirrorMakerProducerConfiguration(reconciliation, producer.getConfig().entrySet());
if (tracing != null) {
config.setConfigOption("interceptor.classes", "io.opentracing.contrib.kafka.TracingProducerInterceptor");
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java
index ff6e331ef7..8d54acb614 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerConsumerConfiguration.java
@@ -6,6 +6,7 @@
package io.strimzi.operator.cluster.model;
import io.strimzi.api.kafka.model.KafkaMirrorMakerConsumerSpec;
+import io.strimzi.operator.common.Reconciliation;
import java.util.HashMap;
import java.util.List;
@@ -29,9 +30,10 @@ public class KafkaMirrorMakerConsumerConfiguration extends AbstractConfiguration
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public KafkaMirrorMakerConsumerConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public KafkaMirrorMakerConsumerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java
index 0a087841f5..5d6877af90 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaMirrorMakerProducerConfiguration.java
@@ -6,6 +6,7 @@
package io.strimzi.operator.cluster.model;
import io.strimzi.api.kafka.model.KafkaMirrorMakerProducerSpec;
+import io.strimzi.operator.common.Reconciliation;
import java.util.HashMap;
import java.util.List;
@@ -29,9 +30,10 @@ public class KafkaMirrorMakerProducerConfiguration extends AbstractConfiguration
* Constructor used to instantiate this class from JsonObject. Should be used to create configuration from
* ConfigMap / CRD.
*
+ * @param reconciliation The reconciliation
* @param jsonOptions Json object with configuration options as key ad value pairs.
*/
- public KafkaMirrorMakerProducerConfiguration(Iterable> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public KafkaMirrorMakerProducerConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java
index 53d513738a..2d504e2509 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ListenersValidator.java
@@ -11,8 +11,8 @@
import io.strimzi.api.kafka.model.listener.arraylistener.GenericKafkaListenerConfigurationBroker;
import io.strimzi.api.kafka.model.listener.arraylistener.KafkaListenerType;
import io.strimzi.kafka.oauth.jsonpath.JsonPathFilterQuery;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
+import io.strimzi.operator.common.Reconciliation;
+import io.strimzi.operator.common.ReconciliationLogger;
import java.util.HashSet;
import java.util.List;
@@ -26,7 +26,7 @@
* Util methods for validating Kafka listeners
*/
public class ListenersValidator {
- protected static final Logger LOG = LogManager.getLogger(ListenersValidator.class.getName());
+ protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ListenersValidator.class.getName());
private final static Pattern LISTENER_NAME_PATTERN = Pattern.compile(GenericKafkaListener.LISTENER_NAME_REGEX);
public final static List FORBIDDEN_PORTS = List.of(9404, 9999);
public final static int LOWEST_ALLOWED_PORT_NUMBER = 9092;
@@ -34,14 +34,15 @@ public class ListenersValidator {
/**
* Validated the listener configuration. If the configuration is not valid, InvalidResourceException will be thrown.
*
+ * @param reconciliation The reconciliation
* @param replicas Number of replicas (required for Ingress validation)
* @param listeners Listeners which should be validated
*/
- public static void validate(int replicas, List listeners) throws InvalidResourceException {
+ public static void validate(Reconciliation reconciliation, int replicas, List listeners) throws InvalidResourceException {
Set errors = validateAndGetErrorMessages(replicas, listeners);
if (!errors.isEmpty()) {
- LOG.error("Listener configuration is not valid: {}", errors);
+ LOGGER.errorCr(reconciliation, "Listener configuration is not valid: {}", errors);
throw new InvalidResourceException("Listener configuration is not valid: " + errors);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java
index b3b6ac2229..96add9bd82 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java
@@ -35,10 +35,10 @@
import io.strimzi.api.kafka.model.template.PodTemplate;
import io.strimzi.certs.CertAndKey;
import io.strimzi.operator.cluster.KafkaUpgradeException;
+import io.strimzi.operator.common.Reconciliation;
+import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
@@ -60,7 +60,7 @@ public class ModelUtils {
private ModelUtils() {}
- protected static final Logger log = LogManager.getLogger(ModelUtils.class.getName());
+ protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ModelUtils.class.getName());
public static final String TLS_SIDECAR_LOG_LEVEL = "TLS_SIDECAR_LOG_LEVEL";
/**
@@ -128,8 +128,8 @@ static EnvVar tlsSidecarLogEnvVar(TlsSidecar tlsSidecar) {
tlsSidecar.getLogLevel() : TlsSidecarLogLevel.NOTICE).toValue());
}
- public static Secret buildSecret(ClusterCa clusterCa, Secret secret, String namespace, String secretName,
- String commonName, String keyCertName, Labels labels, OwnerReference ownerReference, boolean isMaintenanceTimeWindowsSatisfied) {
+ public static Secret buildSecret(Reconciliation reconciliation, ClusterCa clusterCa, Secret secret, String namespace, String secretName,
+ String commonName, String keyCertName, Labels labels, OwnerReference ownerReference, boolean isMaintenanceTimeWindowsSatisfied) {
Map data = new HashMap<>(4);
CertAndKey certAndKey = null;
boolean shouldBeRegenerated = false;
@@ -146,15 +146,15 @@ public static Secret buildSecret(ClusterCa clusterCa, Secret secret, String name
}
if (shouldBeRegenerated) {
- log.debug("Certificate for pod {} need to be regenerated because: {}", keyCertName, String.join(", ", reasons));
+ LOGGER.debugCr(reconciliation, "Certificate for pod {} need to be regenerated because: {}", keyCertName, String.join(", ", reasons));
try {
certAndKey = clusterCa.generateSignedCert(commonName, Ca.IO_STRIMZI);
} catch (IOException e) {
- log.warn("Error while generating certificates", e);
+ LOGGER.warnCr(reconciliation, "Error while generating certificates", e);
}
- log.debug("End generating certificates");
+ LOGGER.debugCr(reconciliation, "End generating certificates");
} else {
if (secret.getData().get(keyCertName + ".p12") != null &&
!secret.getData().get(keyCertName + ".p12").isEmpty() &&
@@ -174,7 +174,7 @@ public static Secret buildSecret(ClusterCa clusterCa, Secret secret, String name
decodeFromSecret(secret, keyCertName + ".key"),
decodeFromSecret(secret, keyCertName + ".crt"));
} catch (IOException e) {
- log.error("Error generating the keystore for {}", keyCertName, e);
+ LOGGER.errorCr(reconciliation, "Error generating the keystore for {}", keyCertName, e);
}
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java
index 92aa929f66..ab87fa9f41 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/StorageDiff.java
@@ -11,9 +11,9 @@
import io.strimzi.api.kafka.model.storage.PersistentClaimStorageOverride;
import io.strimzi.api.kafka.model.storage.SingleVolumeStorage;
import io.strimzi.api.kafka.model.storage.Storage;
+import io.strimzi.operator.common.Reconciliation;
+import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.operator.resource.AbstractJsonDiff;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.util.Collections;
import java.util.HashSet;
@@ -29,7 +29,7 @@
* Class for diffing storage configuration
*/
public class StorageDiff extends AbstractJsonDiff {
- private static final Logger log = LogManager.getLogger(StorageDiff.class.getName());
+ private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(StorageDiff.class.getName());
private static final Pattern IGNORABLE_PATHS = Pattern.compile(
"^(/deleteClaim|/)$");
@@ -43,13 +43,14 @@ public class StorageDiff extends AbstractJsonDiff {
* Diffs the storage for allowed or not allowed changes. Examples of allowed changes is increasing volume size or
* adding overrides for nodes before scale-up / removing them after scale-down.
*
+ * @param reconciliation The reconciliation
* @param current Current Storage configuration
* @param desired Desired Storage configuration
* @param currentReplicas Current number of replicas (will differ from desired number of replicas when scaling up or down)
* @param desiredReplicas Desired number of replicas (will differ from current number of replicas when scaling up or down)
*/
- public StorageDiff(Storage current, Storage desired, int currentReplicas, int desiredReplicas) {
- this(current, desired, currentReplicas, desiredReplicas, "");
+ public StorageDiff(Reconciliation reconciliation, Storage current, Storage desired, int currentReplicas, int desiredReplicas) {
+ this(reconciliation, current, desired, currentReplicas, desiredReplicas, "");
}
/**
@@ -57,13 +58,14 @@ public StorageDiff(Storage current, Storage desired, int currentReplicas, int de
* adding overrides for nodes before scale-up / removing them after scale-down. This constructor is used internally
* only.
*
+ * @param reconciliation The reconciliation
* @param current Current Storage configuration
* @param desired Desired Storage configuration
* @param currentReplicas Current number of replicas (will differ from desired number of replicas when scaling up or down)
* @param desiredReplicas Desired number of replicas (will differ from current number of replicas when scaling up or down)
* @param volumeDesc Description of the volume which is being used
*/
- private StorageDiff(Storage current, Storage desired, int currentReplicas, int desiredReplicas, String volumeDesc) {
+ private StorageDiff(Reconciliation reconciliation, Storage current, Storage desired, int currentReplicas, int desiredReplicas, String volumeDesc) {
boolean changesType = false;
boolean shrinkSize = false;
boolean isEmpty = true;
@@ -87,7 +89,7 @@ private StorageDiff(Storage current, Storage desired, int currentReplicas, int d
volumesAddedOrRemoved |= isNull(currentVolume) != isNull(desiredVolume);
- StorageDiff diff = new StorageDiff(currentVolume, desiredVolume, currentReplicas, desiredReplicas, "(volume ID: " + volumeId + ") ");
+ StorageDiff diff = new StorageDiff(reconciliation, currentVolume, desiredVolume, currentReplicas, desiredReplicas, "(volume ID: " + volumeId + ") ");
changesType |= diff.changesType();
shrinkSize |= diff.shrinkSize();
@@ -104,7 +106,7 @@ private StorageDiff(Storage current, Storage desired, int currentReplicas, int d
String pathValue = d.get("path").asText();
if (IGNORABLE_PATHS.matcher(pathValue).matches()) {
- log.debug("Ignoring Storage {}diff {}", volumeDesc, d);
+ LOGGER.debugCr(reconciliation, "Ignoring Storage {}diff {}", volumeDesc, d);
continue;
}
@@ -133,10 +135,10 @@ private StorageDiff(Storage current, Storage desired, int currentReplicas, int d
}
}
- if (log.isDebugEnabled()) {
- log.debug("Storage {}differs: {}", volumeDesc, d);
- log.debug("Current Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(source, pathValue));
- log.debug("Desired Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(target, pathValue));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debugCr(reconciliation, "Storage {}differs: {}", volumeDesc, d);
+ LOGGER.debugCr(reconciliation, "Current Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(source, pathValue));
+ LOGGER.debugCr(reconciliation, "Desired Storage {}path {} has value {}", volumeDesc, pathValue, lookupPath(target, pathValue));
}
num++;
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java
index e13cf8abd1..d8eab13dee 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/VolumeUtils.java
@@ -6,8 +6,6 @@
import io.fabric8.kubernetes.api.model.ConfigMapVolumeSource;
import io.fabric8.kubernetes.api.model.ConfigMapVolumeSourceBuilder;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.math.BigInteger;
import java.nio.charset.StandardCharsets;
@@ -43,7 +41,6 @@
* Shared methods for working with Volume
*/
public class VolumeUtils {
- protected static final Logger log = LogManager.getLogger(VolumeUtils.class.getName());
private static Pattern volumeNamePattern = Pattern.compile("^([a-z0-9]{1}[a-z0-9-]{0,61}[a-z0-9]{1})$");
/**
@@ -78,8 +75,6 @@ public static Volume createConfigMapVolume(String name, String configMapName, Ma
.withConfigMap(configMapVolumeSource)
.build();
- log.trace("Created configMap Volume named '{}' with source configMap '{}'", validName, configMapName);
-
return volume;
}
@@ -102,8 +97,6 @@ public static Volume createConfigMapVolume(String name, String configMapName) {
.withConfigMap(configMapVolumeSource)
.build();
- log.trace("Created configMap Volume named '{}' with source configMap '{}'", validName, configMapName);
-
return volume;
}
@@ -145,7 +138,6 @@ public static Volume createSecretVolume(String name, String secretName, Map> jsonOptions) {
- super(jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
+ public ZookeeperConfiguration(Reconciliation reconciliation, Iterable> jsonOptions) {
+ super(reconciliation, jsonOptions, FORBIDDEN_PREFIXES, FORBIDDEN_PREFIX_EXCEPTIONS, DEFAULTS);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java
index db2abc6d9e..7b065e7f15 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/AbstractConnectOperator.java
@@ -52,6 +52,7 @@
import io.strimzi.operator.common.AbstractOperator;
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.BackOff;
+import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
@@ -72,8 +73,6 @@
import io.vertx.core.Promise;
import io.vertx.core.Vertx;
import io.vertx.core.json.JsonObject;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import java.util.ArrayList;
import java.util.Collections;
@@ -100,7 +99,7 @@ public abstract class AbstractConnectOperator, R extends Resource, P extends AbstractKafkaConnectSpec, S extends KafkaConnectStatus>
extends AbstractOperator> {
- private static final Logger log = LogManager.getLogger(AbstractConnectOperator.class.getName());
+ private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractConnectOperator.class.getName());
private final CrdOperator connectorOperator;
private final Function connectClientProvider;
@@ -233,8 +232,10 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) {
KafkaConnectS2I connectS2i = cf.resultAt(1);
KafkaConnectApi apiClient = connectOperator.connectClientProvider.apply(connectOperator.vertx);
if (connect == null && connectS2i == null) {
- log.info("{} {} in namespace {} was {}, but Connect cluster {} does not exist", connectorKind, connectorName, connectorNamespace, action, connectName);
- updateStatus(noConnectCluster(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
+ Reconciliation r = new Reconciliation("connector-watch", connectOperator.kind(),
+ kafkaConnector.getMetadata().getNamespace(), connectName);
+ updateStatus(r, noConnectCluster(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
+ LOGGER.infoCr(r, "{} {} in namespace {} was {}, but Connect cluster {} does not exist", connectorKind, connectorName, connectorNamespace, action, connectName);
return Future.succeededFuture();
} else if (connect != null && isOlderOrAlone(connect.getMetadata().getCreationTimestamp(), connectS2i)) {
// grab the lock and call reconcileConnectors()
@@ -243,14 +244,14 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) {
kafkaConnector.getMetadata().getNamespace(), connectName);
if (!Util.matchesSelector(selector, connect)) {
- log.debug("{}: {} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels);
+ LOGGER.debugCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels);
return Future.succeededFuture();
} else if (connect.getSpec() != null && connect.getSpec().getReplicas() == 0) {
- log.info("{}: {} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName);
- updateStatus(zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
+ LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", connectorKind, connectorName, connectorNamespace, action, connectName);
+ updateStatus(reconciliation, zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
return Future.succeededFuture();
} else {
- log.info("{}: {} {} in namespace {} was {}", reconciliation, connectorKind, connectorName, connectorNamespace, action);
+ LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", connectorKind, connectorName, connectorNamespace, action);
return connectOperator.withLock(reconciliation, LOCK_TIMEOUT_MS,
() -> connectOperator.reconcileConnectorAndHandleResult(reconciliation,
@@ -258,7 +259,7 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) {
isUseResources(connect),
kafkaConnector.getMetadata().getName(), action == Action.DELETED ? null : kafkaConnector)
.compose(reconcileResult -> {
- log.info("{}: reconciled", reconciliation);
+ LOGGER.infoCr(reconciliation, "reconciled");
return Future.succeededFuture(reconcileResult);
}));
}
@@ -269,14 +270,14 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) {
kafkaConnector.getMetadata().getNamespace(), connectName);
if (!Util.matchesSelector(selector, connectS2i)) {
- log.debug("{}: {} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels);
+ LOGGER.debugCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} does not match label selector {} and will be ignored", connectorKind, connectorName, connectorNamespace, action, connectName, selectorLabels);
return Future.succeededFuture();
} else if (connectS2i.getSpec() != null && connectS2i.getSpec().getReplicas() == 0) {
- log.info("{}: {} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", reconciliation, connectorKind, connectorName, connectorNamespace, action, connectName);
- updateStatus(zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
+ LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}, but Connect cluster {} has 0 replicas", connectorKind, connectorName, connectorNamespace, action, connectName);
+ updateStatus(reconciliation, zeroReplicas(connectNamespace, connectName), kafkaConnector, connectOperator.connectorOperator);
return Future.succeededFuture();
} else {
- log.info("{}: {} {} in namespace {} was {}", reconciliation, connectorKind, connectorName, connectorNamespace, action);
+ LOGGER.infoCr(reconciliation, "{} {} in namespace {} was {}", connectorKind, connectorName, connectorNamespace, action);
return connectS2IOperator.withLock(reconciliation, LOCK_TIMEOUT_MS,
() -> connectS2IOperator.reconcileConnectorAndHandleResult(reconciliation,
@@ -284,14 +285,16 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) {
isUseResources(connectS2i),
kafkaConnector.getMetadata().getName(), action == Action.DELETED ? null : kafkaConnector)
.compose(reconcileResult -> {
- log.info("{}: reconciled", reconciliation);
+ LOGGER.infoCr(reconciliation, "reconciled");
return Future.succeededFuture(reconcileResult);
}));
}
}
});
} else {
- updateStatus(new InvalidResourceException("Resource lacks label '"
+ updateStatus(new Reconciliation("connector-watch", connectOperator.kind(),
+ kafkaConnector.getMetadata().getNamespace(), null),
+ new InvalidResourceException("Resource lacks label '"
+ Labels.STRIMZI_CLUSTER_LABEL
+ "': No connect cluster in which to create this connector."),
kafkaConnector, connectOperator.connectorOperator);
@@ -299,10 +302,10 @@ public void eventReceived(Action action, KafkaConnector kafkaConnector) {
break;
case ERROR:
- log.error("Failed {} {} in namespace {} ", connectorKind, connectorName, connectorNamespace);
+ LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Failed {} {} in namespace {} ", connectorKind, connectorName, connectorNamespace);
break;
default:
- log.error("Unknown action: {} {} in namespace {}", connectorKind, connectorName, connectorNamespace);
+ LOGGER.errorCr(new Reconciliation("connector-watch", connectorKind, connectName, connectorNamespace), "Unknown action: {} {} in namespace {}", connectorKind, connectorName, connectorNamespace);
}
}
@@ -376,14 +379,14 @@ protected Future reconcileConnectors(Reconciliation reconciliation, T conn
return CompositeFuture.join(
apiClient.list(host, port),
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())),
- apiClient.listConnectorPlugins(host, port),
- apiClient.updateConnectLoggers(host, port, desiredLogging, defaultLogging)
+ apiClient.listConnectorPlugins(reconciliation, host, port),
+ apiClient.updateConnectLoggers(reconciliation, host, port, desiredLogging, defaultLogging)
).compose(cf -> {
List runningConnectorNames = cf.resultAt(0);
List desiredConnectors = cf.resultAt(1);
List connectorPlugins = cf.resultAt(2);
- log.debug("{}: Setting list of connector plugins in Kafka Connect status", reconciliation);
+ LOGGER.debugCr(reconciliation, "Setting list of connector plugins in Kafka Connect status");
connectStatus.setConnectorPlugins(connectorPlugins);
if (connectorsResourceCounter != null) {
@@ -392,12 +395,12 @@ protected Future reconcileConnectors(Reconciliation reconciliation, T conn
Set deleteConnectorNames = new HashSet<>(runningConnectorNames);
deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
- log.debug("{}: {} cluster: delete connectors: {}", reconciliation, kind(), deleteConnectorNames);
+ LOGGER.debugCr(reconciliation, "{} cluster: delete connectors: {}", kind(), deleteConnectorNames);
Stream> deletionFutures = deleteConnectorNames.stream().map(connectorName ->
reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null)
);
- log.debug("{}: {} cluster: required connectors: {}", reconciliation, kind(), desiredConnectors);
+ LOGGER.debugCr(reconciliation, "{} cluster: required connectors: {}", kind(), desiredConnectors);
Stream> createUpdateFutures = desiredConnectors.stream()
.map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));
@@ -405,7 +408,7 @@ protected Future reconcileConnectors(Reconciliation reconciliation, T conn
}).recover(error -> {
if (error instanceof ConnectTimeoutException) {
Promise connectorStatuses = Promise.promise();
- log.warn("{}: Failed to connect to the REST API => trying to update the connector status", reconciliation);
+ LOGGER.warnCr(reconciliation, "Failed to connect to the REST API => trying to update the connector status");
connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build()))
.compose(connectors -> CompositeFuture.join(
@@ -463,13 +466,13 @@ private Future reconcileConnector(Reconciliation reconciliation, String ho
boolean useResources, String connectorName, KafkaConnector connector) {
if (connector == null) {
if (useResources) {
- log.info("{}: deleting connector: {}", reconciliation, connectorName);
- return apiClient.delete(host, port, connectorName);
+ LOGGER.infoCr(reconciliation, "deleting connector: {}", connectorName);
+ return apiClient.delete(reconciliation, host, port, connectorName);
} else {
return Future.succeededFuture();
}
} else {
- log.info("{}: creating/updating connector: {}", reconciliation, connectorName);
+ LOGGER.infoCr(reconciliation, "creating/updating connector: {}", connectorName);
if (connector.getSpec() == null) {
return maybeUpdateConnectorStatus(reconciliation, connector, null,
new InvalidResourceException("spec property is required"));
@@ -509,32 +512,32 @@ private Future reconcileConnector(Reconciliation reconciliation, String ho
*/
protected Future maybeCreateOrUpdateConnector(Reconciliation reconciliation, String host, KafkaConnectApi apiClient,
String connectorName, KafkaConnectorSpec connectorSpec, CustomResource resource) {
- return apiClient.getConnectorConfig(new BackOff(200L, 2, 6), host, port, connectorName).compose(
+ return apiClient.getConnectorConfig(reconciliation, new BackOff(200L, 2, 6), host, port, connectorName).compose(
config -> {
if (!needsReconfiguring(reconciliation, connectorName, connectorSpec, config)) {
- log.debug("{}: Connector {} exists and has desired config, {}=={}", reconciliation, connectorName, connectorSpec.getConfig(), config);
- return apiClient.status(host, port, connectorName)
+ LOGGER.debugCr(reconciliation, "Connector {} exists and has desired config, {}=={}", connectorName, connectorSpec.getConfig(), config);
+ return apiClient.status(reconciliation, host, port, connectorName)
.compose(status -> pauseResume(reconciliation, host, apiClient, connectorName, connectorSpec, status))
.compose(ignored -> maybeRestartConnector(reconciliation, host, apiClient, connectorName, resource, new ArrayList<>()))
.compose(conditions -> maybeRestartConnectorTask(reconciliation, host, apiClient, connectorName, resource, conditions))
.compose(conditions ->
- apiClient.statusWithBackOff(new BackOff(200L, 2, 10), host, port, connectorName)
+ apiClient.statusWithBackOff(reconciliation, new BackOff(200L, 2, 10), host, port, connectorName)
.compose(createConnectorStatusAndConditions(conditions)))
- .compose(status -> updateConnectorTopics(host, apiClient, connectorName, status));
+ .compose(status -> updateConnectorTopics(reconciliation, host, apiClient, connectorName, status));
} else {
- log.debug("{}: Connector {} exists but does not have desired config, {}!={}", reconciliation, connectorName, connectorSpec.getConfig(), config);
+ LOGGER.debugCr(reconciliation, "Connector {} exists but does not have desired config, {}!={}", connectorName, connectorSpec.getConfig(), config);
return createOrUpdateConnector(reconciliation, host, apiClient, connectorName, connectorSpec)
.compose(createConnectorStatusAndConditions())
- .compose(status -> updateConnectorTopics(host, apiClient, connectorName, status));
+ .compose(status -> updateConnectorTopics(reconciliation, host, apiClient, connectorName, status));
}
},
error -> {
if (error instanceof ConnectRestException
&& ((ConnectRestException) error).getStatusCode() == 404) {
- log.debug("{}: Connector {} does not exist", reconciliation, connectorName);
+ LOGGER.debugCr(reconciliation, "Connector {} does not exist", connectorName);
return createOrUpdateConnector(reconciliation, host, apiClient, connectorName, connectorSpec)
.compose(createConnectorStatusAndConditions())
- .compose(status -> updateConnectorTopics(host, apiClient, connectorName, status));
+ .compose(status -> updateConnectorTopics(reconciliation, host, apiClient, connectorName, status));
} else {
return Future.failedFuture(error);
}
@@ -555,20 +558,20 @@ private boolean needsReconfiguring(Reconciliation reconciliation, String connect
for (Map.Entry entry : connectorSpec.getConfig().entrySet()) {
desired.put(entry.getKey(), entry.getValue() != null ? entry.getValue().toString() : null);
}
- if (log.isDebugEnabled()) {
- log.debug("{}: Desired: {}", reconciliation, new TreeMap<>(desired));
- log.debug("{}: Actual: {}", reconciliation, new TreeMap<>(actual));
+ if (LOGGER.isDebugEnabled()) {
+ LOGGER.debugCr(reconciliation, "Desired: {}", new TreeMap<>(desired));
+ LOGGER.debugCr(reconciliation, "Actual: {}", new TreeMap<>(actual));
}
return !desired.equals(actual);
}
protected Future> createOrUpdateConnector(Reconciliation reconciliation, String host, KafkaConnectApi apiClient,
String connectorName, KafkaConnectorSpec connectorSpec) {
- return apiClient.createOrUpdatePutRequest(host, port, connectorName, asJson(connectorSpec))
- .compose(ignored -> apiClient.statusWithBackOff(new BackOff(200L, 2, 10), host, port,
+ return apiClient.createOrUpdatePutRequest(reconciliation, host, port, connectorName, asJson(reconciliation, connectorSpec))
+ .compose(ignored -> apiClient.statusWithBackOff(reconciliation, new BackOff(200L, 2, 10), host, port,
connectorName))
.compose(status -> pauseResume(reconciliation, host, apiClient, connectorName, connectorSpec, status))
- .compose(ignored -> apiClient.status(host, port, connectorName));
+ .compose(ignored -> apiClient.status(reconciliation, host, port, connectorName));
}
private Future pauseResume(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, KafkaConnectorSpec connectorSpec, Map status) {
@@ -579,10 +582,10 @@ private Future pauseResume(Reconciliation reconciliation, String host, Kaf
String state = (String) path;
boolean shouldPause = Boolean.TRUE.equals(connectorSpec.getPause());
if ("RUNNING".equals(state) && shouldPause) {
- log.debug("{}: Pausing connector {}", reconciliation, connectorName);
+ LOGGER.debugCr(reconciliation, "Pausing connector {}", connectorName);
return apiClient.pause(host, port, connectorName);
} else if ("PAUSED".equals(state) && !shouldPause) {
- log.debug("{}: Resuming connector {}", reconciliation, connectorName);
+ LOGGER.debugCr(reconciliation, "Resuming connector {}", connectorName);
return apiClient.resume(host, port, connectorName);
} else {
return Future.succeededFuture();
@@ -592,14 +595,14 @@ private Future pauseResume(Reconciliation reconciliation, String host, Kaf
private Future> maybeRestartConnector(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, CustomResource resource, List conditions) {
if (hasRestartAnnotation(resource, connectorName)) {
- log.debug("{}: Restarting connector {}", reconciliation, connectorName);
+ LOGGER.debugCr(reconciliation, "Restarting connector {}", connectorName);
return apiClient.restart(host, port, connectorName)
.compose(ignored -> removeRestartAnnotation(reconciliation, resource)
.compose(v -> Future.succeededFuture(conditions)),
throwable -> {
// Ignore restart failures - add a warning and try again on the next reconcile
String message = "Failed to restart connector " + connectorName + ". " + throwable.getMessage();
- log.warn("{}: {}", reconciliation, message);
+ LOGGER.warnCr(reconciliation, message);
conditions.add(StatusUtils.buildWarningCondition("RestartConnector", message));
return Future.succeededFuture(conditions);
});
@@ -611,14 +614,14 @@ private Future> maybeRestartConnector(Reconciliation reconciliat
private Future> maybeRestartConnectorTask(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, CustomResource resource, List conditions) {
int taskID = getRestartTaskAnnotationTaskID(resource, connectorName);
if (taskID >= 0) {
- log.debug("{}: Restarting connector task {}:{}", reconciliation, connectorName, taskID);
+ LOGGER.debugCr(reconciliation, "Restarting connector task {}:{}", connectorName, taskID);
return apiClient.restartTask(host, port, connectorName, taskID)
.compose(ignored -> removeRestartTaskAnnotation(reconciliation, resource)
.compose(v -> Future.succeededFuture(conditions)),
throwable -> {
// Ignore restart failures - add a warning and try again on the next reconcile
String message = "Failed to restart connector task " + connectorName + ":" + taskID + ". " + throwable.getMessage();
- log.warn("{}: {}", reconciliation, message);
+ LOGGER.warnCr(reconciliation, message);
conditions.add(StatusUtils.buildWarningCondition("RestartConnectorTask", message));
return Future.succeededFuture(conditions);
});
@@ -627,8 +630,8 @@ private Future> maybeRestartConnectorTask(Reconciliation reconci
}
}
- private Future updateConnectorTopics(String host, KafkaConnectApi apiClient, String connectorName, ConnectorStatusAndConditions status) {
- return apiClient.getConnectorTopics(host, port, connectorName)
+ private Future updateConnectorTopics(Reconciliation reconciliation, String host, KafkaConnectApi apiClient, String connectorName, ConnectorStatusAndConditions status) {
+ return apiClient.getConnectorTopics(reconciliation, host, port, connectorName)
.compose(updateConnectorStatusAndConditions(status));
}
@@ -674,24 +677,24 @@ protected Future removeRestartTaskAnnotation(Reconciliation reconciliation
* Patches the KafkaConnector CR to remove the supplied annotation.
*/
private Future removeAnnotation(Reconciliation reconciliation, KafkaConnector resource, String annotationKey) {
- log.debug("{}: Removing annotation {}", reconciliation, annotationKey);
+ LOGGER.debugCr(reconciliation, "Removing annotation {}", annotationKey);
KafkaConnector patchedKafkaConnector = new KafkaConnectorBuilder(resource)
.editMetadata()
.removeFromAnnotations(annotationKey)
.endMetadata()
.build();
- return connectorOperator.patchAsync(patchedKafkaConnector)
+ return connectorOperator.patchAsync(reconciliation, patchedKafkaConnector)
.compose(ignored -> Future.succeededFuture());
}
- public static void updateStatus(Throwable error, KafkaConnector kafkaConnector2, CrdOperator, KafkaConnector, ?> connectorOperations) {
+ public static void updateStatus(Reconciliation reconciliation, Throwable error, KafkaConnector kafkaConnector2, CrdOperator, KafkaConnector, ?> connectorOperations) {
KafkaConnectorStatus status = new KafkaConnectorStatus();
StatusUtils.setStatusConditionAndObservedGeneration(kafkaConnector2, status, error);
StatusDiff diff = new StatusDiff(kafkaConnector2.getStatus(), status);
if (!diff.isEmpty()) {
KafkaConnector copy = new KafkaConnectorBuilder(kafkaConnector2).build();
copy.setStatus(status);
- connectorOperations.updateStatusAsync(copy);
+ connectorOperations.updateStatusAsync(reconciliation, copy);
}
}
@@ -727,11 +730,11 @@ Function, Future> updateConnectorStat
return topics -> Future.succeededFuture(new ConnectorStatusAndConditions(status.statusResult, topics, status.conditions));
}
- public Set validate(KafkaConnector resource) {
+ public Set validate(Reconciliation reconciliation, KafkaConnector resource) {
if (resource != null) {
Set warningConditions = new LinkedHashSet<>(0);
- ResourceVisitor.visit(resource, new ValidationVisitor(resource, log, warningConditions));
+ ResourceVisitor.visit(reconciliation, resource, new ValidationVisitor(resource, LOGGER, warningConditions));
return warningConditions;
}
@@ -742,7 +745,7 @@ public Set validate(KafkaConnector resource) {
Future maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConnector connector, ConnectorStatusAndConditions connectorStatus, Throwable error) {
KafkaConnectorStatus status = new KafkaConnectorStatus();
if (error != null) {
- log.warn("{}: Error reconciling connector {}", reconciliation, connector.getMetadata().getName(), error);
+ LOGGER.warnCr(reconciliation, "Error reconciling connector {}", connector.getMetadata().getName(), error);
}
Map statusResult = null;
@@ -755,7 +758,7 @@ Future maybeUpdateConnectorStatus(Reconciliation reconciliation, KafkaConn
connectorStatus.conditions.forEach(condition -> conditions.add(condition));
}
- Set unknownAndDeprecatedConditions = validate(connector);
+ Set unknownAndDeprecatedConditions = validate(reconciliation, connector);
unknownAndDeprecatedConditions.forEach(condition -> conditions.add(condition));
if (!Annotations.isReconciliationPausedWithAnnotation(connector)) {
@@ -797,7 +800,7 @@ protected int getActualTaskCount(KafkaConnector connector, Map s
}
}
- protected JsonObject asJson(KafkaConnectorSpec spec) {
+ protected JsonObject asJson(Reconciliation reconciliation, KafkaConnectorSpec spec) {
JsonObject connectorConfigJson = new JsonObject();
if (spec.getConfig() != null) {
for (Map.Entry cf : spec.getConfig().entrySet()) {
@@ -805,7 +808,7 @@ protected JsonObject asJson(KafkaConnectorSpec spec) {
if ("connector.class".equals(name)
|| "tasks.max".equals(name)) {
// TODO include resource namespace and name in this message
- log.warn("Configuration parameter {} in KafkaConnector.spec.config will be ignored and the value from KafkaConnector.spec will be used instead",
+ LOGGER.warnCr(reconciliation, "Configuration parameter {} in KafkaConnector.spec.config will be ignored and the value from KafkaConnector.spec will be used instead",
name);
}
connectorConfigJson.put(name, cf.getValue());
@@ -845,8 +848,8 @@ protected JsonObject asJson(KafkaConnectorSpec spec) {
if ((!(fetchedResource instanceof KafkaConnector))
&& (!(fetchedResource instanceof KafkaMirrorMaker2))
&& StatusUtils.isResourceV1alpha1(fetchedResource)) {
- log.warn("{}: {} {} needs to be upgraded from version {} to 'v1beta1' to use the status field",
- reconciliation, fetchedResource.getKind(), fetchedResource.getMetadata().getName(), fetchedResource.getApiVersion());
+ LOGGER.warnCr(reconciliation, "{} {} needs to be upgraded from version {} to 'v1beta1' to use the status field",
+ fetchedResource.getKind(), fetchedResource.getMetadata().getName(), fetchedResource.getApiVersion());
updateStatusPromise.complete();
} else {
S currentStatus = fetchedResource.getStatus();
@@ -856,26 +859,26 @@ protected JsonObject asJson(KafkaConnectorSpec spec) {
if (!ksDiff.isEmpty()) {
T resourceWithNewStatus = copyWithStatus.apply(fetchedResource, desiredStatus);
- resourceOperator.updateStatusAsync(resourceWithNewStatus).onComplete(updateRes -> {
+ resourceOperator.updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> {
if (updateRes.succeeded()) {
- log.debug("{}: Completed status update", reconciliation);
+ LOGGER.debugCr(reconciliation, "Completed status update");
updateStatusPromise.complete();
} else {
- log.error("{}: Failed to update status", reconciliation, updateRes.cause());
+ LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause());
updateStatusPromise.fail(updateRes.cause());
}
});
} else {
- log.debug("{}: Status did not change", reconciliation);
+ LOGGER.debugCr(reconciliation, "Status did not change");
updateStatusPromise.complete();
}
}
} else {
- log.error("{}: Current {} resource not found", reconciliation, resource.getKind());
+ LOGGER.errorCr(reconciliation, "Current {} resource not found", resource.getKind());
updateStatusPromise.fail("Current " + resource.getKind() + " resource not found");
}
} else {
- log.error("{}: Failed to get the current {} resource and its status", reconciliation, resource.getKind(), getRes.cause());
+ LOGGER.errorCr(reconciliation, "Failed to get the current {} resource and its status", resource.getKind(), getRes.cause());
updateStatusPromise.fail(getRes.cause());
}
});
@@ -883,17 +886,17 @@ protected JsonObject asJson(KafkaConnectorSpec spec) {
return updateStatusPromise.future();
}
- Future> kafkaConnectJmxSecret(String namespace, String name, KafkaConnectCluster connectCluster) {
+ Future> kafkaConnectJmxSecret(Reconciliation reconciliation, String namespace, String name, KafkaConnectCluster connectCluster) {
if (connectCluster.isJmxAuthenticated()) {
Future secretFuture = secretOperations.getAsync(namespace, KafkaConnectCluster.jmxSecretName(name));
return secretFuture.compose(res -> {
if (res == null) {
- return secretOperations.reconcile(namespace, KafkaConnectCluster.jmxSecretName(name), connectCluster.generateJmxSecret());
+ return secretOperations.reconcile(reconciliation, namespace, KafkaConnectCluster.jmxSecretName(name), connectCluster.generateJmxSecret());
}
return Future.succeededFuture(ReconcileResult.noop(res));
});
}
- return secretOperations.reconcile(namespace, KafkaConnectCluster.jmxSecretName(name), null);
+ return secretOperations.reconcile(reconciliation, namespace, KafkaConnectCluster.jmxSecretName(name), null);
}
}
diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java
index c219930ae3..f5941415ee 100644
--- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java
+++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperator.java
@@ -85,6 +85,7 @@
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.BackOff;
import io.strimzi.operator.common.InvalidConfigurationException;
+import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.MetricsAndLogging;
import io.strimzi.operator.common.PasswordGenerator;
import io.strimzi.operator.common.Reconciliation;
@@ -112,8 +113,6 @@
import org.apache.kafka.clients.admin.Admin;
import org.apache.kafka.common.KafkaException;
-import org.apache.logging.log4j.LogManager;
-import org.apache.logging.log4j.Logger;
import org.quartz.CronExpression;
import java.nio.charset.StandardCharsets;
@@ -157,7 +156,7 @@
*/
@SuppressWarnings({"checkstyle:ClassDataAbstractionCoupling", "checkstyle:ClassFanOutComplexity", "checkstyle:JavaNCSS"})
public class KafkaAssemblyOperator extends AbstractAssemblyOperator, KafkaSpec, KafkaStatus> {
- private static final Logger log = LogManager.getLogger(KafkaAssemblyOperator.class.getName());
+ private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(KafkaAssemblyOperator.class.getName());
private final long operationTimeoutMs;
private final String operatorNamespace;
@@ -463,7 +462,7 @@ Future updateStatus(KafkaStatus desiredStatus) {
if (kafka != null) {
if ((Constants.RESOURCE_GROUP_NAME + "/" + Constants.V1ALPHA1).equals(kafka.getApiVersion())) {
- log.warn("{}: The resource needs to be upgraded from version {} to 'v1beta1' to use the status field", reconciliation, kafka.getApiVersion());
+ LOGGER.warnCr(reconciliation, "The resource needs to be upgraded from version {} to 'v1beta1' to use the status field", kafka.getApiVersion());
updateStatusPromise.complete();
} else {
KafkaStatus currentStatus = kafka.getStatus();
@@ -473,26 +472,26 @@ Future updateStatus(KafkaStatus desiredStatus) {
if (!ksDiff.isEmpty()) {
Kafka resourceWithNewStatus = new KafkaBuilder(kafka).withStatus(desiredStatus).build();
- crdOperator.updateStatusAsync(resourceWithNewStatus).onComplete(updateRes -> {
+ crdOperator.updateStatusAsync(reconciliation, resourceWithNewStatus).onComplete(updateRes -> {
if (updateRes.succeeded()) {
- log.debug("{}: Completed status update", reconciliation);
+ LOGGER.debugCr(reconciliation, "Completed status update");
updateStatusPromise.complete();
} else {
- log.error("{}: Failed to update status", reconciliation, updateRes.cause());
+ LOGGER.errorCr(reconciliation, "Failed to update status", updateRes.cause());
updateStatusPromise.fail(updateRes.cause());
}
});
} else {
- log.debug("{}: Status did not change", reconciliation);
+ LOGGER.debugCr(reconciliation, "Status did not change");
updateStatusPromise.complete();
}
}
} else {
- log.error("{}: Current Kafka resource not found", reconciliation);
+ LOGGER.errorCr(reconciliation, "Current Kafka resource not found");
updateStatusPromise.fail("Current Kafka resource not found");
}
} else {
- log.error("{}: Failed to get the current Kafka resource and its status", reconciliation, getRes.cause());
+ LOGGER.errorCr(reconciliation, "Failed to get the current Kafka resource and its status", getRes.cause());
updateStatusPromise.fail(getRes.cause());
}
});
@@ -513,7 +512,7 @@ Future initialStatus() {
Kafka kafka = getRes.result();
if (kafka != null && kafka.getStatus() == null) {
- log.debug("{}: Setting the initial status for a new resource", reconciliation);
+ LOGGER.debugCr(reconciliation, "Setting the initial status for a new resource");
Condition deployingCondition = new ConditionBuilder()
.withLastTransitionTime(StatusUtils.iso8601(dateSupplier()))
@@ -529,11 +528,11 @@ Future initialStatus() {
updateStatus(initialStatus).map(this).onComplete(initialStatusPromise);
} else {
- log.debug("{}: Status is already set. No need to set initial status", reconciliation);
+ LOGGER.debugCr(reconciliation, "Status is already set. No need to set initial status");
initialStatusPromise.complete(this);
}
} else {
- log.error("{}: Failed to get the current Kafka resource and its status", reconciliation, getRes.cause());
+ LOGGER.errorCr(reconciliation, "Failed to get the current Kafka resource and its status", getRes.cause());
initialStatusPromise.fail(getRes.cause());
}
});
@@ -628,11 +627,11 @@ Future reconcileCas(Supplier dateSupplier) {
clusterCaCertAnnotations = kafkaAssembly.getSpec().getKafka().getTemplate().getClusterCaCert().getMetadata().getAnnotations();
}
- this.clusterCa = new ClusterCa(certManager, passwordGenerator, name, clusterCaCertSecret, clusterCaKeySecret,
+ this.clusterCa = new ClusterCa(reconciliation, certManager, passwordGenerator, name, clusterCaCertSecret,
+ clusterCaKeySecret,
ModelUtils.getCertificateValidity(clusterCaConfig),
ModelUtils.getRenewalDays(clusterCaConfig),
- clusterCaConfig == null || clusterCaConfig.isGenerateCertificateAuthority(),
- clusterCaConfig != null ? clusterCaConfig.getCertificateExpirationPolicy() : null);
+ clusterCaConfig == null || clusterCaConfig.isGenerateCertificateAuthority(), clusterCaConfig != null ? clusterCaConfig.getCertificateExpirationPolicy() : null);
clusterCa.createRenewOrReplace(
reconciliation.namespace(), reconciliation.name(), caLabels.toMap(),
clusterCaCertLabels, clusterCaCertAnnotations,
@@ -646,13 +645,13 @@ Future reconcileCas(Supplier dateSupplier) {
// When we are not supposed to generate the CA but it does not exist, we should just throw an error
checkCustomCaSecret(clientsCaConfig, clientsCaCertSecret, clientsCaKeySecret, "Clients CA");
- this.clientsCa = new ClientsCa(certManager, passwordGenerator,
- clientsCaCertName, clientsCaCertSecret,
- clientsCaKeyName, clientsCaKeySecret,
+ this.clientsCa = new ClientsCa(reconciliation, certManager,
+ passwordGenerator, clientsCaCertName,
+ clientsCaCertSecret, clientsCaKeyName,
+ clientsCaKeySecret,
ModelUtils.getCertificateValidity(clientsCaConfig),
ModelUtils.getRenewalDays(clientsCaConfig),
- clientsCaConfig == null || clientsCaConfig.isGenerateCertificateAuthority(),
- clientsCaConfig != null ? clientsCaConfig.getCertificateExpirationPolicy() : null);
+ clientsCaConfig == null || clientsCaConfig.isGenerateCertificateAuthority(), clientsCaConfig != null ? clientsCaConfig.getCertificateExpirationPolicy() : null);
clientsCa.createRenewOrReplace(reconciliation.namespace(), reconciliation.name(),
caLabels.toMap(), emptyMap(), emptyMap(),
clientsCaConfig != null && !clientsCaConfig.isGenerateSecretOwnerReference() ? null : ownerRef,
@@ -661,14 +660,14 @@ Future reconcileCas(Supplier dateSupplier) {
List secretReconciliations = new ArrayList<>(2);
if (clusterCaConfig == null || clusterCaConfig.isGenerateCertificateAuthority()) {
- Future clusterSecretReconciliation = secretOperations.reconcile(reconciliation.namespace(), clusterCaCertName, this.clusterCa.caCertSecret())
- .compose(ignored -> secretOperations.reconcile(reconciliation.namespace(), clusterCaKeyName, this.clusterCa.caKeySecret()));
+ Future clusterSecretReconciliation = secretOperations.reconcile(reconciliation, reconciliation.namespace(), clusterCaCertName, this.clusterCa.caCertSecret())
+ .compose(ignored -> secretOperations.reconcile(reconciliation, reconciliation.namespace(), clusterCaKeyName, this.clusterCa.caKeySecret()));
secretReconciliations.add(clusterSecretReconciliation);
}
if (clientsCaConfig == null || clientsCaConfig.isGenerateCertificateAuthority()) {
- Future clientsSecretReconciliation = secretOperations.reconcile(reconciliation.namespace(), clientsCaCertName, this.clientsCa.caCertSecret())
- .compose(ignored -> secretOperations.reconcile(reconciliation.namespace(), clientsCaKeyName, this.clientsCa.caKeySecret()));
+ Future clientsSecretReconciliation = secretOperations.reconcile(reconciliation, reconciliation.namespace(), clientsCaCertName, this.clientsCa.caCertSecret())
+ .compose(ignored -> secretOperations.reconcile(reconciliation, reconciliation.namespace(), clientsCaKeyName, this.clientsCa.caKeySecret()));
secretReconciliations.add(clientsSecretReconciliation);
}
@@ -721,12 +720,12 @@ Future rollingUpdateForNewCaKey() {
if (!reason.isEmpty()) {
Future zkRollFuture;
Function> rollPodAndLogReason = pod -> {
- log.debug("{}: Rolling Pod {} to {}", reconciliation, pod.getMetadata().getName(), reason);
+ LOGGER.debugCr(reconciliation, "Rolling Pod {} to {}", pod.getMetadata().getName(), reason);
return reason;
};
if (this.clusterCa.keyReplaced()) {
zkRollFuture = zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name))
- .compose(sts -> zkSetOperations.maybeRollingUpdate(sts, rollPodAndLogReason,
+ .compose(sts -> zkSetOperations.maybeRollingUpdate(reconciliation, sts, rollPodAndLogReason,
clusterCa.caCertSecret(),
oldCoSecret));
} else {
@@ -734,7 +733,7 @@ Future rollingUpdateForNewCaKey() {
}
return zkRollFuture
.compose(i -> kafkaSetOperations.getAsync(namespace, KafkaCluster.kafkaClusterName(name)))
- .compose(sts -> new KafkaRoller(vertx, reconciliation, podOperations, 1_000, operationTimeoutMs,
+ .compose(sts -> new KafkaRoller(reconciliation, vertx, podOperations, 1_000, operationTimeoutMs,
() -> new BackOff(250, 2, 10), sts, clusterCa.caCertSecret(), oldCoSecret, adminClientProvider,
kafkaCluster.getBrokersConfiguration(), kafkaLogging, kafkaCluster.getKafkaVersion(), true)
.rollingRestart(rollPodAndLogReason))
@@ -758,8 +757,8 @@ Future rollDeploymentIfExists(String deploymentName, String reasons) {
return deploymentOperations.getAsync(namespace, deploymentName)
.compose(dep -> {
if (dep != null) {
- log.debug("{}: Rolling Deployment {} to {}", reconciliation, deploymentName, reasons);
- return deploymentOperations.rollingUpdate(namespace, deploymentName, operationTimeoutMs);
+ LOGGER.debugCr(reconciliation, "Rolling Deployment {} to {}", deploymentName, reasons);
+ return deploymentOperations.rollingUpdate(reconciliation, namespace, deploymentName, operationTimeoutMs);
} else {
return Future.succeededFuture();
}
@@ -787,7 +786,7 @@ Future kafkaManualPodRollingUpdate(StatefulSet sts) {
if (!podsToRoll.isEmpty()) {
return maybeRollKafka(sts, pod -> {
if (pod != null && podsToRoll.contains(pod.getMetadata().getName())) {
- log.debug("{}: Rolling Kafka pod {} due to manual rolling update annotation on a pod", reconciliation, pod.getMetadata().getName());
+ LOGGER.debugCr(reconciliation, "Rolling Kafka pod {} due to manual rolling update annotation on a pod", pod.getMetadata().getName());
return singletonList("manual rolling update annotation on a pod");
} else {
return new ArrayList<>();
@@ -818,8 +817,8 @@ Future kafkaManualRollingUpdate() {
if (pod == null) {
throw new ConcurrentDeletionException("Unexpectedly pod no longer exists during roll of StatefulSet.");
}
- log.debug("{}: Rolling Kafka pod {} due to manual rolling update annotation",
- reconciliation, pod.getMetadata().getName());
+ LOGGER.debugCr(reconciliation, "Rolling Kafka pod {} due to manual rolling update annotation",
+ pod.getMetadata().getName());
return singletonList("manual rolling update");
});
} else {
@@ -855,9 +854,9 @@ Future zkManualPodRollingUpdate(StatefulSet sts) {
}
if (!podsToRoll.isEmpty()) {
- return zkSetOperations.maybeRollingUpdate(sts, pod -> {
+ return zkSetOperations.maybeRollingUpdate(reconciliation, sts, pod -> {
if (pod != null && podsToRoll.contains(pod.getMetadata().getName())) {
- log.debug("{}: Rolling ZooKeeper pod {} due to manual rolling update annotation on a pod", reconciliation, pod.getMetadata().getName());
+ LOGGER.debugCr(reconciliation, "Rolling ZooKeeper pod {} due to manual rolling update annotation on a pod", pod.getMetadata().getName());
return singletonList("manual rolling update annotation on a pod");
} else {
return null;
@@ -884,9 +883,9 @@ Future zkManualRollingUpdate() {
if (sts != null) {
if (Annotations.booleanAnnotation(sts, Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, false)) {
// User trigger rolling update of the whole StatefulSet
- return zkSetOperations.maybeRollingUpdate(sts, pod -> {
- log.debug("{}: Rolling Zookeeper pod {} due to manual rolling update",
- reconciliation, pod.getMetadata().getName());
+ return zkSetOperations.maybeRollingUpdate(reconciliation, sts, pod -> {
+ LOGGER.debugCr(reconciliation, "Rolling Zookeeper pod {} due to manual rolling update",
+ pod.getMetadata().getName());
return singletonList("manual rolling update");
});
} else {
@@ -905,7 +904,7 @@ Future zkManualRollingUpdate() {
Future zkVersionChange() {
if (versionChange.isNoop()) {
- log.debug("Kafka.spec.kafka.version is unchanged therefore no change to Zookeeper is required");
+ LOGGER.debugCr(reconciliation, "Kafka.spec.kafka.version is unchanged therefore no change to Zookeeper is required");
} else {
String versionChangeType;
@@ -916,7 +915,7 @@ Future zkVersionChange() {
}
if (versionChange.requiresZookeeperChange()) {
- log.info("Kafka {} from {} to {} requires Zookeeper {} from {} to {}",
+ LOGGER.infoCr(reconciliation, "Kafka {} from {} to {} requires Zookeeper {} from {} to {}",
versionChangeType,
versionChange.from().version(),
versionChange.to().version(),
@@ -924,7 +923,7 @@ Future zkVersionChange() {
versionChange.from().zookeeperVersion(),
versionChange.to().zookeeperVersion());
} else {
- log.info("Kafka {} from {} to {} requires no change in Zookeeper version",
+ LOGGER.infoCr(reconciliation, "Kafka {} from {} to {} requires no change in Zookeeper version",
versionChangeType,
versionChange.from().version(),
versionChange.to().version());
@@ -933,7 +932,7 @@ Future zkVersionChange() {
// Get the zookeeper image currently set in the Kafka CR or, if that is not set, the image from the target Kafka version
String newZkImage = versions.kafkaImage(kafkaAssembly.getSpec().getZookeeper().getImage(), versionChange.to().version());
- log.debug("Setting new Zookeeper image: " + newZkImage);
+ LOGGER.debugCr(reconciliation, "Setting new Zookeeper image: " + newZkImage);
this.zkCluster.setImage(newZkImage);
}
@@ -952,7 +951,7 @@ public Future waitForQuiescence(StatefulSet sts) {
boolean notUpToDate = !isPodUpToDate(sts, pod);
List reason = emptyList();
if (notUpToDate) {
- log.debug("Rolling pod {} prior to upgrade", pod.getMetadata().getName());
+ LOGGER.debugCr(reconciliation, "Rolling pod {} prior to upgrade", pod.getMetadata().getName());
reason = singletonList("upgrade quiescence");
}
return reason;
@@ -973,7 +972,7 @@ public Future waitForQuiescence(StatefulSet sts) {
*/
Future prepareVersionChange() {
if (versionChange.isNoop()) {
- log.debug("{}: No Kafka version change", reconciliation);
+ LOGGER.debugCr(reconciliation, "{}: No Kafka version change", reconciliation);
if (kafkaCluster.getInterBrokerProtocolVersion() == null) {
// When IBPV is not set, we set it to current Kafka version
@@ -981,7 +980,7 @@ Future prepareVersionChange() {
if (highestInterBrokerProtocolVersion != null
&& !kafkaCluster.getKafkaVersion().protocolVersion().equals(highestInterBrokerProtocolVersion)) {
- log.info("{}: Upgrading Kafka inter.broker.protocol.version from {} to {}", reconciliation, highestInterBrokerProtocolVersion, kafkaCluster.getKafkaVersion().protocolVersion());
+ LOGGER.infoCr(reconciliation, "Upgrading Kafka inter.broker.protocol.version from {} to {}", highestInterBrokerProtocolVersion, kafkaCluster.getKafkaVersion().protocolVersion());
if (kafkaCluster.getLogMessageFormatVersion() == null
&& highestLogMessageFormatVersion != null) {
@@ -998,20 +997,20 @@ Future prepareVersionChange() {
if (highestLogMessageFormatVersion != null &&
!kafkaCluster.getKafkaVersion().messageVersion().equals(highestLogMessageFormatVersion)) {
- log.info("{}: Upgrading Kafka log.message.format.version from {} to {}", reconciliation, highestLogMessageFormatVersion, kafkaCluster.getKafkaVersion().messageVersion());
+ LOGGER.infoCr(reconciliation, "Upgrading Kafka log.message.format.version from {} to {}", highestLogMessageFormatVersion, kafkaCluster.getKafkaVersion().messageVersion());
}
}
return Future.succeededFuture(this);
} else {
if (versionChange.isUpgrade()) {
- log.info("Kafka is upgrading from {} to {}", versionChange.from().version(), versionChange.to().version());
+ LOGGER.infoCr(reconciliation, "Kafka is upgrading from {} to {}", versionChange.from().version(), versionChange.to().version());
// We make sure that the highest log.message.format.version or inter.broker.protocol.version version
// used by any of the brokers is not higher than the broker version we upgrade from.
if ((highestLogMessageFormatVersion != null && compareDottedVersions(versionChange.from().messageVersion(), highestLogMessageFormatVersion) < 0)
|| (highestInterBrokerProtocolVersion != null && compareDottedVersions(versionChange.from().protocolVersion(), highestInterBrokerProtocolVersion) < 0)) {
- log.warn("log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be lower or equal to the Kafka broker version we upgrade from ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.from().version());
+ LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be lower or equal to the Kafka broker version we upgrade from ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.from().version());
throw new KafkaUpgradeException("log.message.format.version (" + highestLogMessageFormatVersion + ") and inter.broker.protocol.version (" + highestInterBrokerProtocolVersion + ") used by the brokers have to be lower or equal to the Kafka broker version we upgrade from (" + versionChange.from().version() + ")");
}
@@ -1035,7 +1034,7 @@ Future prepareVersionChange() {
}
} else {
// Has to be a downgrade
- log.info("Kafka is downgrading from {} to {}", versionChange.from().version(), versionChange.to().version());
+ LOGGER.infoCr(reconciliation, "Kafka is downgrading from {} to {}", versionChange.from().version(), versionChange.to().version());
// The currently used log.message.format.version and inter.broker.protocol.version cannot be higher
// than the version we are downgrading to. If it is we fail the reconciliation. If they are not set,
@@ -1045,7 +1044,7 @@ Future prepareVersionChange() {
|| compareDottedVersions(versionChange.to().messageVersion(), highestLogMessageFormatVersion) < 0
|| highestInterBrokerProtocolVersion == null
|| compareDottedVersions(versionChange.to().protocolVersion(), highestInterBrokerProtocolVersion) < 0) {
- log.warn("log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version());
+ LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version());
throw new KafkaUpgradeException("log.message.format.version (" + highestLogMessageFormatVersion + ") and inter.broker.protocol.version (" + highestInterBrokerProtocolVersion + ") used by the brokers have to be set and be lower or equal to the Kafka broker version we downgrade to (" + versionChange.to().version() + ")");
}
@@ -1069,7 +1068,7 @@ Future prepareVersionChange() {
// validation. But we still double check it as safety.
if (compareDottedVersions(versionChange.to().messageVersion(), desiredLogMessageFormat) < 0
|| compareDottedVersions(versionChange.to().protocolVersion(), desiredInterBrokerProtocol) < 0) {
- log.warn("log.message.format.version ({}) and inter.broker.protocol.version ({}) used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version());
+ LOGGER.warnCr(reconciliation, "log.message.format.version ({}) and inter.broker.protocol.version ({}) used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to ({})", highestLogMessageFormatVersion, highestInterBrokerProtocolVersion, versionChange.to().version());
throw new KafkaUpgradeException("log.message.format.version and inter.broker.protocol.version used in the Kafka CR have to be set and be lower or equal to the Kafka broker version we downgrade to");
}
}
@@ -1121,7 +1120,7 @@ Future maybeRollKafka(StatefulSet sts, Function> podNeed
*/
Future maybeRollKafka(StatefulSet sts, Function> podNeedsRestart, boolean allowReconfiguration) {
return adminClientSecrets()
- .compose(compositeFuture -> new KafkaRoller(vertx, reconciliation, podOperations, 1_000, operationTimeoutMs,
+ .compose(compositeFuture -> new KafkaRoller(reconciliation, vertx, podOperations, 1_000, operationTimeoutMs,
() -> new BackOff(250, 2, 10), sts, compositeFuture.resultAt(0), compositeFuture.resultAt(1), adminClientProvider,
kafkaCluster.getBrokersConfiguration(), kafkaLogging, kafkaCluster.getKafkaVersion(), allowReconfiguration)
.rollingRestart(podNeedsRestart));
@@ -1136,7 +1135,7 @@ Future getZookeeperDescription() {
this.zkCurrentReplicas = sts.getSpec().getReplicas();
}
- this.zkCluster = ZookeeperCluster.fromCrd(kafkaAssembly, versions, oldStorage, zkCurrentReplicas != null ? zkCurrentReplicas : 0);
+ this.zkCluster = ZookeeperCluster.fromCrd(reconciliation, kafkaAssembly, versions, oldStorage, zkCurrentReplicas != null ? zkCurrentReplicas : 0);
// We are upgrading from previous Strimzi version which has a sidecars. The older sidecar
// configurations allowed only older versions of TLS to be used by default. But the Zookeeper
@@ -1152,7 +1151,7 @@ Future getZookeeperDescription() {
zkCluster.getConfiguration().setConfigOption("ssl.enabledProtocols", "TLSv1.2,TLSv1.1,TLSv1");
}
- return Util.metricsAndLogging(configMapOperations, kafkaAssembly.getMetadata().getNamespace(),
+ return Util.metricsAndLogging(reconciliation, configMapOperations, kafkaAssembly.getMetadata().getNamespace(),
zkCluster.getLogging(),
zkCluster.getMetricsConfigInCm());
})
@@ -1181,21 +1180,21 @@ Future withVoid(Future> r) {
}
Future zookeeperServiceAccount() {
- return withVoid(serviceAccountOperations.reconcile(namespace,
+ return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace,
ZookeeperCluster.containerServiceAccountName(zkCluster.getCluster()),
zkCluster.generateServiceAccount()));
}
Future zkService() {
- return withVoid(serviceOperations.reconcile(namespace, zkCluster.getServiceName(), zkCluster.generateService()));
+ return withVoid(serviceOperations.reconcile(reconciliation, namespace, zkCluster.getServiceName(), zkCluster.generateService()));
}
Future zkHeadlessService() {
- return withVoid(serviceOperations.reconcile(namespace, zkCluster.getHeadlessServiceName(), zkCluster.generateHeadlessService()));
+ return withVoid(serviceOperations.reconcile(reconciliation, namespace, zkCluster.getHeadlessServiceName(), zkCluster.generateHeadlessService()));
}
Future zkAncillaryCm() {
- return withVoid(configMapOperations.reconcile(namespace, zkCluster.getAncillaryConfigMapName(), zkMetricsAndLogsConfigMap));
+ return withVoid(configMapOperations.reconcile(reconciliation, namespace, zkCluster.getAncillaryConfigMapName(), zkMetricsAndLogsConfigMap));
}
/**
@@ -1207,7 +1206,7 @@ Future zkAncillaryCm() {
*/
Future updateCertificateSecretWithDiff(String secretName, Secret secret) {
return secretOperations.getAsync(namespace, secretName)
- .compose(oldSecret -> secretOperations.reconcile(namespace, secretName, secret)
+ .compose(oldSecret -> secretOperations.reconcile(reconciliation, namespace, secretName, secret)
.map(res -> {
if (res instanceof ReconcileResult.Patched) {
// The secret is patched and some changes to the existing certificates actually occured
@@ -1228,24 +1227,24 @@ Future zkNodesSecret() {
}
Future zkNetPolicy() {
- return withVoid(networkPolicyOperator.reconcile(namespace, ZookeeperCluster.policyName(name), zkCluster.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels)));
+ return withVoid(networkPolicyOperator.reconcile(reconciliation, namespace, ZookeeperCluster.policyName(name), zkCluster.generateNetworkPolicy(operatorNamespace, operatorNamespaceLabels)));
}
Future zkPodDisruptionBudget() {
- return withVoid(podDisruptionBudgetOperator.reconcile(namespace, zkCluster.getName(), zkCluster.generatePodDisruptionBudget()));
+ return withVoid(podDisruptionBudgetOperator.reconcile(reconciliation, namespace, zkCluster.getName(), zkCluster.generatePodDisruptionBudget()));
}
Future zkStatefulSet() {
StatefulSet zkSts = zkCluster.generateStatefulSet(pfa.isOpenshift(), imagePullPolicy, imagePullSecrets);
Annotations.annotations(zkSts.getSpec().getTemplate()).put(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, String.valueOf(getCaCertGeneration(this.clusterCa)));
Annotations.annotations(zkSts.getSpec().getTemplate()).put(Annotations.ANNO_STRIMZI_LOGGING_HASH, zkLoggingHash);
- return withZkDiff(zkSetOperations.reconcile(namespace, zkCluster.getName(), zkSts));
+ return withZkDiff(zkSetOperations.reconcile(reconciliation, namespace, zkCluster.getName(), zkSts));
}
Future zkRollingUpdate() {
// Scale-down and Scale-up might have change the STS. we should get a fresh one.
return zkSetOperations.getAsync(namespace, ZookeeperCluster.zookeeperClusterName(name))
- .compose(sts -> zkSetOperations.maybeRollingUpdate(sts,
+ .compose(sts -> zkSetOperations.maybeRollingUpdate(reconciliation, sts,
pod -> getReasonsToRestartPod(zkDiffs.resource(), pod, existingZookeeperCertsChanged, this.clusterCa)))
.map(this);
}
@@ -1303,7 +1302,7 @@ Future zkScaler(int connectToReplicas) {
DnsNameGenerator.podDnsNameWithoutClusterDomain(namespace,
KafkaResources.zookeeperHeadlessServiceName(name), zkCluster.getPodName(i));
- ZookeeperScaler zkScaler = zkScalerProvider.createZookeeperScaler(vertx, zkConnectionString(connectToReplicas, zkNodeAddress), zkNodeAddress, clusterCaCertSecret, coKeySecret, operationTimeoutMs);
+ ZookeeperScaler zkScaler = zkScalerProvider.createZookeeperScaler(reconciliation, vertx, zkConnectionString(connectToReplicas, zkNodeAddress), zkNodeAddress, clusterCaCertSecret, coKeySecret, operationTimeoutMs);
return Future.succeededFuture(zkScaler);
});
@@ -1314,7 +1313,7 @@ Future zkScalingUp() {
if (zkCurrentReplicas != null
&& zkCurrentReplicas < desired) {
- log.info("{}: Scaling Zookeeper up from {} to {} replicas", reconciliation, zkCurrentReplicas, desired);
+ LOGGER.infoCr(reconciliation, "Scaling Zookeeper up from {} to {} replicas", zkCurrentReplicas, desired);
return zkScaler(zkCurrentReplicas)
.compose(zkScaler -> {
@@ -1327,7 +1326,7 @@ Future zkScalingUp() {
if (res.succeeded()) {
scalingPromise.complete(res.result());
} else {
- log.warn("{}: Failed to scale Zookeeper", reconciliation, res.cause());
+ LOGGER.warnCr(reconciliation, "Failed to scale Zookeeper", res.cause());
scalingPromise.fail(res.cause());
}
});
@@ -1342,8 +1341,8 @@ Future zkScalingUp() {
Future zkScalingUpByOne(ZookeeperScaler zkScaler, int current, int desired) {
if (current < desired) {
- return zkSetOperations.scaleUp(namespace, zkCluster.getName(), current + 1)
- .compose(ignore -> podOperations.readiness(namespace, zkCluster.getPodName(current), 1_000, operationTimeoutMs))
+ return zkSetOperations.scaleUp(reconciliation, namespace, zkCluster.getName(), current + 1)
+ .compose(ignore -> podOperations.readiness(reconciliation, namespace, zkCluster.getPodName(current), 1_000, operationTimeoutMs))
.compose(ignore -> zkScaler.scale(current + 1))
.compose(ignore -> zkScalingUpByOne(zkScaler, current + 1, desired));
} else {
@@ -1357,7 +1356,7 @@ Future zkScalingDown() {
if (zkCurrentReplicas != null
&& zkCurrentReplicas > desired) {
// With scaling
- log.info("{}: Scaling Zookeeper down from {} to {} replicas", reconciliation, zkCurrentReplicas, desired);
+ LOGGER.infoCr(reconciliation, "Scaling Zookeeper down from {} to {} replicas", zkCurrentReplicas, desired);
// No need to check for pod readiness since we run right after the readiness check
return zkScaler(desired)
@@ -1371,7 +1370,7 @@ Future zkScalingDown() {
if (res.succeeded()) {
scalingPromise.complete(res.result());
} else {
- log.warn("{}: Failed to scale Zookeeper", reconciliation, res.cause());
+ LOGGER.warnCr(reconciliation, "Failed to scale Zookeeper", res.cause());
scalingPromise.fail(res.cause());
}
});
@@ -1388,7 +1387,7 @@ Future zkScalingDownByOne(ZookeeperScaler zkScaler, int cur
if (current > desired) {
return podsReady(zkCluster, current - 1)
.compose(ignore -> zkScaler.scale(current - 1))
- .compose(ignore -> zkSetOperations.scaleDown(namespace, zkCluster.getName(), current - 1))
+ .compose(ignore -> zkSetOperations.scaleDown(reconciliation, namespace, zkCluster.getName(), current - 1))
.compose(ignore -> zkScalingDownByOne(zkScaler, current - 1, desired));
} else {
return Future.succeededFuture(this);
@@ -1398,7 +1397,7 @@ Future zkScalingDownByOne(ZookeeperScaler zkScaler, int cur
Future zkScalingCheck() {
// No scaling, but we should check the configuration
// This can cover any previous failures in the Zookeeper reconfiguration
- log.debug("{}: Verifying that Zookeeper is configured to run with {} replicas", reconciliation, zkCurrentReplicas);
+ LOGGER.debugCr(reconciliation, "Verifying that Zookeeper is configured to run with {} replicas", zkCurrentReplicas);
// No need to check for pod readiness since we run right after the readiness check
return zkScaler(zkCluster.getReplicas())
@@ -1411,7 +1410,7 @@ Future zkScalingCheck() {
if (res.succeeded()) {
scalingPromise.complete(this);
} else {
- log.warn("{}: Failed to verify Zookeeper configuration", res.cause());
+ LOGGER.warnCr(reconciliation, "Failed to verify Zookeeper configuration", res.cause());
scalingPromise.fail(res.cause());
}
});
@@ -1421,11 +1420,11 @@ Future zkScalingCheck() {
}
Future zkServiceEndpointReadiness() {
- return withVoid(serviceOperations.endpointReadiness(namespace, zkCluster.getServiceName(), 1_000, operationTimeoutMs));
+ return withVoid(serviceOperations.endpointReadiness(reconciliation, namespace, zkCluster.getServiceName(), 1_000, operationTimeoutMs));
}
Future zkHeadlessServiceEndpointReadiness() {
- return withVoid(serviceOperations.endpointReadiness(namespace, zkCluster.getHeadlessServiceName(), 1_000, operationTimeoutMs));
+ return withVoid(serviceOperations.endpointReadiness(reconciliation, namespace, zkCluster.getHeadlessServiceName(), 1_000, operationTimeoutMs));
}
Future zkGenerateCertificates(Supplier dateSupplier) {
@@ -1456,7 +1455,7 @@ Future zkGenerateCertificates(Supplier dateSupplier)
this.kafkaStsAlreadyExists = true;
}
- this.kafkaCluster = KafkaCluster.fromCrd(kafkaAssembly, versions, oldStorage, kafkaCurrentReplicas);
+ this.kafkaCluster = KafkaCluster.fromCrd(reconciliation, kafkaAssembly, versions, oldStorage, kafkaCurrentReplicas);
this.kafkaBootstrapDnsName.addAll(ListenersUtils.alternativeNames(kafkaCluster.getListeners()));
//return Future.succeededFuture(this);
@@ -1522,7 +1521,7 @@ Future zkGenerateCertificates(Supplier dateSupplier)
// Either Pods or StatefulSet already exist. But none of them contains the version
// annotation. This suggests they are not created by the current versions of Strimzi.
// Without the annotation, we cannot detect the Kafka version and decide on upgrade.
- log.warn("Kafka Pods or StatefulSet exist, but do not contain the {} annotation to detect their version. Kafka upgrade cannot be detected.", ANNO_STRIMZI_IO_KAFKA_VERSION);
+ LOGGER.warnCr(reconciliation, "Kafka Pods or StatefulSet exist, but do not contain the {} annotation to detect their version. Kafka upgrade cannot be detected.", ANNO_STRIMZI_IO_KAFKA_VERSION);
throw new KafkaUpgradeException("Kafka Pods or StatefulSet exist, but do not contain the " + ANNO_STRIMZI_IO_KAFKA_VERSION + " annotation to detect their version. Kafka upgrade cannot be detected.");
}
} else if (lowestKafkaVersion.equals(highestKafkaVersion)) {
@@ -1548,7 +1547,7 @@ Future withKafkaDiff(Future> r
}
Future kafkaInitServiceAccount() {
- return withVoid(serviceAccountOperations.reconcile(namespace,
+ return withVoid(serviceAccountOperations.reconcile(reconciliation, namespace,
kafkaCluster.getServiceAccountName(),
kafkaCluster.generateServiceAccount()));
}
@@ -1556,8 +1555,8 @@ Future kafkaInitServiceAccount() {
Future kafkaInitClusterRoleBinding() {
ClusterRoleBinding desired = kafkaCluster.generateClusterRoleBinding(namespace);
- return withVoid(withIgnoreRbacError(
- clusterRoleBindingOperations.reconcile(
+ return withVoid(withIgnoreRbacError(reconciliation,
+ clusterRoleBindingOperations.reconcile(reconciliation,
KafkaResources.initContainerClusterRoleBindingName(name, namespace),
desired),
desired
@@ -1565,7 +1564,7 @@ Future kafkaInitClusterRoleBinding() {
}
Future kafkaScaleDown() {
- return withVoid(kafkaSetOperations.scaleDown(namespace, kafkaCluster.getName(), kafkaCluster.getReplicas()));
+ return withVoid(kafkaSetOperations.scaleDown(reconciliation, namespace, kafkaCluster.getName(), kafkaCluster.getReplicas()));
}
/**
@@ -1589,20 +1588,20 @@ Future kafkaServices() {
List serviceFutures = new ArrayList<>(services.size());
List existingServiceNames = existingServices.stream().map(svc -> svc.getMetadata().getName()).collect(Collectors.toList());
- log.debug("{}: Reconciling existing Services {} against the desired services", reconciliation, existingServiceNames);
+ LOGGER.debugCr(reconciliation, "Reconciling existing Services {} against the desired services", existingServiceNames);
// Update desired services
for (Service service : services) {
String serviceName = service.getMetadata().getName();
existingServiceNames.remove(serviceName);
- serviceFutures.add(serviceOperations.reconcile(namespace, serviceName, service));
+ serviceFutures.add(serviceOperations.reconcile(reconciliation, namespace, serviceName, service));
}
- log.debug("{}: Services {} should be deleted", reconciliation, existingServiceNames);
+ LOGGER.debugCr(reconciliation, "Services {} should be deleted", existingServiceNames);
// Delete services which match our selector but are not desired anymore
for (String serviceName : existingServiceNames) {
- serviceFutures.add(serviceOperations.reconcile(namespace, serviceName, null));
+ serviceFutures.add(serviceOperations.reconcile(reconciliation, namespace, serviceName, null));
}
return CompositeFuture.join(serviceFutures);
@@ -1631,20 +1630,20 @@ Future kafkaRoutes() {
List routeFutures = new ArrayList<>(routes.size());
List existingRouteNames = existingRoutes.stream().map(route -> route.getMetadata().getName()).collect(Collectors.toList());
- log.debug("{}: Reconciling existing Routes {} against the desired routes", reconciliation, existingRouteNames);
+ LOGGER.debugCr(reconciliation, "Reconciling existing Routes {} against the desired routes", existingRouteNames);
// Update desired routes
for (Route route : routes) {
String routeName = route.getMetadata().getName();
existingRouteNames.remove(routeName);
- routeFutures.add(routeOperations.reconcile(namespace, routeName, route));
+ routeFutures.add(routeOperations.reconcile(reconciliation, namespace, routeName, route));
}
- log.debug("{}: Routes {} should be deleted", reconciliation, existingRouteNames);
+ LOGGER.debugCr(reconciliation, "Routes {} should be deleted", existingRouteNames);
// Delete routes which match our selector but are not desired anymore
for (String routeName : existingRouteNames) {
- routeFutures.add(routeOperations.reconcile(namespace, routeName, null));
+ routeFutures.add(routeOperations.reconcile(reconciliation, namespace, routeName, null));
}
return CompositeFuture.join(routeFutures);
@@ -1652,7 +1651,7 @@ Future kafkaRoutes() {
return withVoid(fut);
} else {
- log.warn("{}: The OpenShift route API is not available in this Kubernetes cluster. Exposing Kafka cluster {} using routes is not possible.", reconciliation, name);
+ LOGGER.warnCr(reconciliation, "The OpenShift route API is not available in this Kubernetes cluster. Exposing Kafka cluster {} using routes is not possible.", name);
return withVoid(Future.failedFuture("The OpenShift route API is not available in this Kubernetes cluster. Exposing Kafka cluster " + name + " using routes is not possible."));
}
}
@@ -1682,20 +1681,20 @@ Future kafkaIngresses() {
List