From fd04171b02d4008db6a0a63ee6cfbed3bd911764 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Thu, 18 Mar 2021 13:01:54 +0100 Subject: [PATCH 01/22] Migrate to kubernetes-client 5.2.1 --- pom.xml | 17 +- .../jenkins/openshiftsync/BaseWatcher.java | 5 +- .../openshiftsync/BuildSyncRunListener.java | 242 +++++++-------- .../jenkins/openshiftsync/OpenShiftUtils.java | 281 +++++++----------- .../openshiftsync/WatcherCallback.java | 4 +- 5 files changed, 234 insertions(+), 315 deletions(-) diff --git a/pom.xml b/pom.xml index 18e4b800d..ec8bf78a0 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ 2.263 8 2.71 - 4.13.2 + 5.2.1 DEBUG 3.0.4 false @@ -95,7 +95,7 @@ io.fabric8 openshift-client ${openshift-client.version} - provided + compile org.jenkins-ci.plugins.workflow @@ -162,9 +162,16 @@ io.fabric8 kubernetes-client - 4.13.2 - provided - + 5.2.1 + compile + + + + + + + + org.eclipse.jetty diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java index abb76cbff..f52b63b74 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java @@ -19,6 +19,7 @@ import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.kubernetes.client.Watch; +import io.fabric8.kubernetes.client.WatcherException; import java.util.ArrayList; import java.util.List; @@ -80,7 +81,7 @@ public void stop() { } } - public void onClose(KubernetesClientException e, String namespace) { + public void onClose(WatcherException e, String namespace) { //scans of fabric client confirm this call be called with null //we do not want to totally ignore this, as the closing of the //watch can effect responsiveness @@ -88,7 +89,7 @@ public void onClose(KubernetesClientException e, String namespace) { if (e != null) { LOGGER.warning(e.toString()); - if (e.getStatus() != null && e.getStatus().getCode() == HTTP_GONE) { + if (e.isHttpGone()) { stop(); start(); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java index c5246b4f6..de6b36de5 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java @@ -15,6 +15,44 @@ */ package io.fabric8.jenkins.openshiftsync; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.maybeScheduleNext; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.formatTimestamp; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; + +import javax.annotation.Nonnull; + +import org.apache.commons.httpclient.HttpStatus; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.jenkinsci.plugins.workflow.support.steps.input.InputAction; +import org.jenkinsci.plugins.workflow.support.steps.input.InputStepExecution; +import org.kohsuke.stapler.DataBoundConstructor; + import com.cloudbees.workflow.rest.external.AtomFlowNodeExt; import com.cloudbees.workflow.rest.external.FlowNodeExt; import com.cloudbees.workflow.rest.external.PendingInputActionsExt; @@ -33,8 +71,7 @@ import hudson.triggers.SafeTimerTask; import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.openshift.api.model.Build; -import io.fabric8.openshift.api.model.BuildFluent; -import io.fabric8.openshift.api.model.DoneableBuild; +import io.fabric8.openshift.api.model.BuildBuilder; import io.jenkins.blueocean.rest.factory.BlueRunFactory; import io.jenkins.blueocean.rest.model.BluePipelineNode; import io.jenkins.blueocean.rest.model.BlueRun; @@ -42,41 +79,6 @@ import jenkins.model.Jenkins; import jenkins.util.Timer; -import org.apache.commons.httpclient.HttpStatus; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; -import org.jenkinsci.plugins.workflow.support.steps.input.InputAction; -import org.jenkinsci.plugins.workflow.support.steps.input.InputStepExecution; -import org.kohsuke.stapler.DataBoundConstructor; - -import javax.annotation.Nonnull; - -import java.io.IOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.Method; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.maybeScheduleNext; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.formatTimestamp; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.util.logging.Level.FINE; -import static java.util.logging.Level.SEVERE; -import static java.util.logging.Level.WARNING; - /** * Listens to Jenkins Job build {@link Run} start and stop then ensure there's a * suitable {@link Build} object in OpenShift thats updated correctly with the @@ -84,10 +86,9 @@ */ @Extension public class BuildSyncRunListener extends RunListener { - private static final Logger logger = Logger - .getLogger(BuildSyncRunListener.class.getName()); + private static final Logger logger = Logger.getLogger(BuildSyncRunListener.class.getName()); - private long pollPeriodMs = 1000 * 5; // 5 seconds + private long pollPeriodMs = 1000 * 5; // 5 seconds private long delayPollPeriodMs = 1000; // 1 seconds private static final long maxDelay = 30000; @@ -104,14 +105,13 @@ public BuildSyncRunListener(long pollPeriodMs) { } /** - * Joins all the given strings, ignoring nulls so that they form a URL with - * / between the paths without a // if the previous path ends with / and the - * next path starts with / unless a path item is blank + * Joins all the given strings, ignoring nulls so that they form a URL with / + * between the paths without a // if the previous path ends with / and the next + * path starts with / unless a path item is blank * - * @param strings - * the sequence of strings to join - * @return the strings concatenated together with / while avoiding a double - * // between non blank strings. + * @param strings the sequence of strings to join + * @return the strings concatenated together with / while avoiding a double // + * between non blank strings. */ public static String joinPaths(String... strings) { StringBuilder sb = new StringBuilder(); @@ -124,8 +124,7 @@ public static String joinPaths(String... strings) { String joined = sb.toString(); // And normalize it... - return joined.replaceAll("/+", "/").replaceAll("/\\?", "?") - .replaceAll("/#", "#").replaceAll(":/", "://"); + return joined.replaceAll("/+", "/").replaceAll("/\\?", "?").replaceAll("/#", "#").replaceAll(":/", "://"); } @Override @@ -145,8 +144,7 @@ public void onStarted(Run run, TaskListener listener) { } checkTimerStarted(); } else { - logger.fine("not polling polling build " + run.getUrl() - + " as its not a WorkflowJob"); + logger.fine("not polling polling build " + run.getUrl() + " as its not a WorkflowJob"); } super.onStarted(run, listener); } @@ -159,8 +157,7 @@ protected void doRun() throws Exception { pollLoop(); } }; - Timer.get().scheduleAtFixedRate(task, delayPollPeriodMs, pollPeriodMs, - TimeUnit.MILLISECONDS); + Timer.get().scheduleAtFixedRate(task, delayPollPeriodMs, pollPeriodMs, TimeUnit.MILLISECONDS); } } @@ -234,18 +231,15 @@ protected void pollRun(Run run) { } } - private boolean shouldUpdateOpenShiftBuild(BuildCause cause, - int latestStageNum, int latestNumFlowNodes, StatusExt status) { + private boolean shouldUpdateOpenShiftBuild(BuildCause cause, int latestStageNum, int latestNumFlowNodes, + StatusExt status) { long currTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); logger.fine(String.format( "shouldUpdateOpenShiftBuild curr time %s last update %s curr stage num %s last stage num %s" + "curr flow num %s last flow num %s status %s", - String.valueOf(currTime), - String.valueOf(cause.getLastUpdateToOpenshift()), - String.valueOf(latestStageNum), - String.valueOf(cause.getNumStages()), - String.valueOf(latestNumFlowNodes), - String.valueOf(cause.getNumFlowNodes()), status.toString())); + String.valueOf(currTime), String.valueOf(cause.getLastUpdateToOpenshift()), + String.valueOf(latestStageNum), String.valueOf(cause.getNumStages()), + String.valueOf(latestNumFlowNodes), String.valueOf(cause.getNumFlowNodes()), status.toString())); // if we have not updated in maxDelay time, update if (currTime > (cause.getLastUpdateToOpenshift() + maxDelay)) { @@ -263,8 +257,7 @@ private boolean shouldUpdateOpenShiftBuild(BuildCause cause, } // if the run is in some sort of terminal state, update - if (status != StatusExt.IN_PROGRESS && - status != StatusExt.PAUSED_PENDING_INPUT) { + if (status != StatusExt.IN_PROGRESS && status != StatusExt.PAUSED_PENDING_INPUT) { return true; } @@ -282,10 +275,10 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { } String namespace = OpenShiftUtils.getNamespacefromPodInputs(); + String ns = cause.getNamespace(); if (namespace == null) - namespace = cause.getNamespace(); - String rootUrl = OpenShiftUtils.getJenkinsURL( - getAuthenticatedOpenShiftClient(), namespace); + namespace = ns; + String rootUrl = OpenShiftUtils.getJenkinsURL(getAuthenticatedOpenShiftClient(), namespace); String buildUrl = joinPaths(rootUrl, run.getUrl()); String logsUrl = joinPaths(buildUrl, "/consoleText"); String logsConsoleUrl = joinPaths(buildUrl, "/console"); @@ -309,20 +302,15 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { .loadClass("org.jenkinsci.plugins.blueoceandisplayurl.BlueOceanDisplayURLImpl"); Constructor ctor = weburlbldr.getConstructor(); Object displayURL = ctor.newInstance(); - Method getRunURLMethod = weburlbldr.getMethod( - "getRunURL", hudson.model.Run.class); - Object blueOceanURI = getRunURLMethod.invoke( - displayURL, run); + Method getRunURLMethod = weburlbldr.getMethod("getRunURL", hudson.model.Run.class); + Object blueOceanURI = getRunURLMethod.invoke(displayURL, run); logsBlueOceanUrl = blueOceanURI.toString(); - logsBlueOceanUrl = logsBlueOceanUrl.replaceAll( - "http://unconfigured-jenkins-location/", ""); - if (logsBlueOceanUrl.startsWith("http://") - || logsBlueOceanUrl.startsWith("https://")) + logsBlueOceanUrl = logsBlueOceanUrl.replaceAll("http://unconfigured-jenkins-location/", ""); + if (logsBlueOceanUrl.startsWith("http://") || logsBlueOceanUrl.startsWith("https://")) // still normalize string logsBlueOceanUrl = joinPaths("", logsBlueOceanUrl); else - logsBlueOceanUrl = joinPaths(rootUrl, - logsBlueOceanUrl); + logsBlueOceanUrl = joinPaths(rootUrl, logsBlueOceanUrl); } } } @@ -343,8 +331,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { } boolean pendingInput = false; if (!wfRunExt.get_links().self.href.matches("^https?://.*$")) { - wfRunExt.get_links().self.setHref(joinPaths(rootUrl, - wfRunExt.get_links().self.href)); + wfRunExt.get_links().self.setHref(joinPaths(rootUrl, wfRunExt.get_links().self.href)); } int newNumStages = wfRunExt.getStages().size(); int newNumFlowNodes = 0; @@ -356,9 +343,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { // we leverage the blue ocean state machine to determine this BlueRunResult result = blueRunResults.get(stage.getName()); if (result != null && result == BlueRunResult.NOT_BUILT) { - logger.info("skipping stage " - + stage.getName() - + " for the status JSON for pipeline run " + logger.info("skipping stage " + stage.getName() + " for the status JSON for pipeline run " + run.getDisplayName() + " because it was not executed (most likely because of a failure in another stage)"); continue; @@ -369,22 +354,17 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { if (!links.self.href.matches("^https?://.*$")) { links.self.setHref(joinPaths(rootUrl, links.self.href)); } - if (links.getLog() != null - && !links.getLog().href.matches("^https?://.*$")) { + if (links.getLog() != null && !links.getLog().href.matches("^https?://.*$")) { links.getLog().setHref(joinPaths(rootUrl, links.getLog().href)); } - newNumFlowNodes = newNumFlowNodes - + stage.getStageFlowNodes().size(); + newNumFlowNodes = newNumFlowNodes + stage.getStageFlowNodes().size(); for (AtomFlowNodeExt node : stage.getStageFlowNodes()) { FlowNodeExt.FlowNodeLinks nodeLinks = node.get_links(); if (!nodeLinks.self.href.matches("^https?://.*$")) { - nodeLinks.self.setHref(joinPaths(rootUrl, - nodeLinks.self.href)); + nodeLinks.self.setHref(joinPaths(rootUrl, nodeLinks.self.href)); } - if (nodeLinks.getLog() != null - && !nodeLinks.getLog().href.matches("^https?://.*$")) { - nodeLinks.getLog().setHref( - joinPaths(rootUrl, nodeLinks.getLog().href)); + if (nodeLinks.getLog() != null && !nodeLinks.getLog().href.matches("^https?://.*$")) { + nodeLinks.getLog().setHref(joinPaths(rootUrl, nodeLinks.getLog().href)); } } @@ -396,8 +376,8 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { // override stages in case declarative has fooled base pipeline support wfRunExt.setStages(validStageList); - boolean needToUpdate = this.shouldUpdateOpenShiftBuild(cause, - newNumStages, newNumFlowNodes, wfRunExt.getStatus()); + boolean needToUpdate = this.shouldUpdateOpenShiftBuild(cause, newNumStages, newNumFlowNodes, + wfRunExt.getStatus()); if (!needToUpdate) { return; } @@ -410,9 +390,9 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { return; } - String pendingActionsJson = null; + String pendingActions = null; if (pendingInput && run instanceof WorkflowRun) { - pendingActionsJson = getPendingActionsJson((WorkflowRun) run); + pendingActions = getPendingActionsJson((WorkflowRun) run); } String phase = runToBuildPhase(run); @@ -429,42 +409,28 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { } } - logger.log(FINE, "Patching build {0}/{1}: setting phase to {2}", - new Object[] { cause.getNamespace(), cause.getName(), phase }); + String name = cause.getName(); + logger.log(FINE, "Patching build {0}/{1}: setting phase to {2}", new Object[] { ns, name, phase }); try { - BuildFluent.MetadataNested builder = getAuthenticatedOpenShiftClient() - .builds() - .inNamespace(cause.getNamespace()) - .withName(cause.getName()) - .edit() - .editMetadata() - .addToAnnotations( - OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON, json) - .addToAnnotations(OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI, - buildUrl) - .addToAnnotations(OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL, - logsUrl) - .addToAnnotations( - Constants.OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL, - logsConsoleUrl) - .addToAnnotations( - Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL, - logsBlueOceanUrl); + Map annotations = new HashMap(); + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON, json); + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI, buildUrl); + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL, logsUrl); + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL, logsConsoleUrl); + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL, logsBlueOceanUrl); String jenkinsNamespace = System.getenv("KUBERNETES_NAMESPACE"); if (jenkinsNamespace != null && !jenkinsNamespace.isEmpty()) { - builder.addToAnnotations( - OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE, - jenkinsNamespace); + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE, jenkinsNamespace); } - if (pendingActionsJson != null && !pendingActionsJson.isEmpty()) { - builder.addToAnnotations( - OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON, - pendingActionsJson); + if (pendingActions != null && !pendingActions.isEmpty()) { + annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON, pendingActions); } - builder.endMetadata().editStatus().withPhase(phase) - .withStartTimestamp(startTime) - .withCompletionTimestamp(completionTime).endStatus().done(); + BuildBuilder builder = new BuildBuilder().editMetadata().withAnnotations(annotations).endMetadata() + .editStatus().withPhase(phase).withStartTimestamp(startTime).withCompletionTimestamp(completionTime) + .endStatus(); + logger.log(INFO, "Creating a new build builder: " + builder); + getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name).edit(b -> builder.build()); } catch (KubernetesClientException e) { if (HTTP_NOT_FOUND == e.getCode()) { runsToPoll.remove(run); @@ -475,8 +441,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { cause.setNumFlowNodes(newNumFlowNodes); cause.setNumStages(newNumStages); - cause.setLastUpdateToOpenshift(TimeUnit.NANOSECONDS.toMillis(System - .nanoTime())); + cause.setLastUpdateToOpenshift(TimeUnit.NANOSECONDS.toMillis(System.nanoTime())); } // annotate the Build with pending input JSON so consoles can do the @@ -484,18 +449,17 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { private String getPendingActionsJson(WorkflowRun run) { List pendingInputActions = new ArrayList(); InputAction inputAction = run.getAction(InputAction.class); -List executions = null; + List executions = null; if (inputAction != null) { - try { - executions = inputAction.getExecutions(); - } catch (Exception e) { - logger.log(SEVERE, "Failed to get Excecutions:" + e, e); - return null; - } + try { + executions = inputAction.getExecutions(); + } catch (Exception e) { + logger.log(SEVERE, "Failed to get Excecutions:" + e, e); + return null; + } if (executions != null && !executions.isEmpty()) { for (InputStepExecution inputStepExecution : executions) { - pendingInputActions.add(PendingInputActionsExt.create( - inputStepExecution, run)); + pendingInputActions.add(PendingInputActionsExt.create(inputStepExecution, run)); } } } @@ -542,13 +506,11 @@ private String runToBuildPhase(Run run) { /** * Returns true if we should poll the status of this run * - * @param run - * the Run to test against + * @param run the Run to test against * @return true if the should poll the status of this build run */ protected boolean shouldPollRun(Run run) { - return run instanceof WorkflowRun - && run.getCause(BuildCause.class) != null + return run instanceof WorkflowRun && run.getCause(BuildCause.class) != null && GlobalPluginConfiguration.get().isEnabled(); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java index 191667fba..773b8adf6 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java @@ -15,15 +15,42 @@ */ package io.fabric8.jenkins.openshiftsync; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_DEFAULT_NAMESPACE; +import static java.util.logging.Level.FINE; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.apache.commons.lang.StringUtils; +import org.apache.tools.ant.filters.StringInputStream; +import org.joda.time.DateTime; +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + import com.cloudbees.hudson.plugins.folder.Folder; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; -import hudson.model.ItemGroup; import hudson.BulkChange; import hudson.model.Item; +import hudson.model.ItemGroup; import hudson.util.XStream2; import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.ObjectMeta; @@ -34,6 +61,7 @@ import io.fabric8.kubernetes.client.Config; import io.fabric8.kubernetes.client.Version; import io.fabric8.openshift.api.model.Build; +import io.fabric8.openshift.api.model.BuildBuilder; import io.fabric8.openshift.api.model.BuildConfig; import io.fabric8.openshift.api.model.BuildConfigSpec; import io.fabric8.openshift.api.model.BuildSource; @@ -47,46 +75,17 @@ import io.fabric8.openshift.client.OpenShiftConfigBuilder; import jenkins.model.Jenkins; -import org.apache.commons.lang.StringUtils; -import org.apache.tools.ant.filters.StringInputStream; -import org.joda.time.DateTime; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileReader; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.StandardCharsets; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; -import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING; -import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_DEFAULT_NAMESPACE; -import static java.util.logging.Level.FINE; - /** */ public class OpenShiftUtils { - private final static Logger logger = Logger.getLogger(OpenShiftUtils.class - .getName()); + private final static Logger logger = Logger.getLogger(OpenShiftUtils.class.getName()); private static OpenShiftClient openShiftClient; private static String jenkinsPodNamespace = null; - + static { - jenkinsPodNamespace = System - .getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME); + jenkinsPodNamespace = System.getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME); if (jenkinsPodNamespace != null && jenkinsPodNamespace.trim().length() > 0) { jenkinsPodNamespace = jenkinsPodNamespace.trim(); } else { @@ -119,15 +118,13 @@ public class OpenShiftUtils { } } - private static final DateTimeFormatter dateFormatter = ISODateTimeFormat - .dateTimeNoMillis(); + private static final DateTimeFormatter dateFormatter = ISODateTimeFormat.dateTimeNoMillis(); /** * Initializes an {@link OpenShiftClient} * - * @param serverUrl - * the optional URL of where the OpenShift cluster API server is - * running + * @param serverUrl the optional URL of where the OpenShift cluster API server + * is running */ public synchronized static void initializeOpenShiftClient(String serverUrl) { OpenShiftConfigBuilder configBuilder = new OpenShiftConfigBuilder(); @@ -136,11 +133,10 @@ public synchronized static void initializeOpenShiftClient(String serverUrl) { } Config config = configBuilder.build(); config.setUserAgent("openshift-sync-plugin-" - + Jenkins.getInstance().getPluginManager() - .getPlugin("openshift-sync").getVersion() + "/fabric8-" + + Jenkins.getInstance().getPluginManager().getPlugin("openshift-sync").getVersion() + "/fabric8-" + Version.clientVersion()); openShiftClient = new DefaultOpenShiftClient(config); - DefaultOpenShiftClient defClient = (DefaultOpenShiftClient)openShiftClient; + DefaultOpenShiftClient defClient = (DefaultOpenShiftClient) openShiftClient; defClient.getHttpClient().dispatcher().setMaxRequestsPerHost(100); defClient.getHttpClient().dispatcher().setMaxRequests(100); } @@ -172,10 +168,9 @@ public synchronized static void shutdownOpenShiftClient() { /** * Checks if a {@link BuildConfig} relates to a Jenkins build * - * @param bc - * the BuildConfig - * @return true if this is an OpenShift BuildConfig which should be mirrored - * to a Jenkins Job + * @param bc the BuildConfig + * @return true if this is an OpenShift BuildConfig which should be mirrored to + * a Jenkins Job */ public static boolean isPipelineStrategyBuildConfig(BuildConfig bc) { if (BuildConfigToJobMapper.JENKINS_PIPELINE_BUILD_STRATEGY @@ -206,8 +201,7 @@ public static boolean isPipelineStrategyBuild(Build b) { logger.warning("bad input, null strategy: " + b); return false; } - if (BuildConfigToJobMapper.JENKINS_PIPELINE_BUILD_STRATEGY - .equalsIgnoreCase(b.getSpec().getStrategy().getType()) + if (BuildConfigToJobMapper.JENKINS_PIPELINE_BUILD_STRATEGY.equalsIgnoreCase(b.getSpec().getStrategy().getType()) && b.getSpec().getStrategy().getJenkinsPipelineStrategy() != null) { return true; } @@ -217,8 +211,7 @@ public static boolean isPipelineStrategyBuild(Build b) { /** * Finds the Jenkins job name for the given {@link BuildConfig}. * - * @param bc - * the BuildConfig + * @param bc the BuildConfig * @return the jenkins job name for the given BuildConfig */ public static String jenkinsJobName(BuildConfig bc) { @@ -231,10 +224,9 @@ public static String jenkinsJobName(BuildConfig bc) { /** * Creates the Jenkins Job name for the given buildConfigName * - * @param namespace - * the namespace of the build - * @param buildConfigName - * the name of the {@link BuildConfig} in in the namespace + * @param namespace the namespace of the build + * @param buildConfigName the name of the {@link BuildConfig} in in the + * namespace * @return the jenkins job name for the given namespace and name */ public static String jenkinsJobName(String namespace, String buildConfigName) { @@ -245,8 +237,7 @@ public static String jenkinsJobName(String namespace, String buildConfigName) { * Finds the full jenkins job path including folders for the given * {@link BuildConfig}. * - * @param bc - * the BuildConfig + * @param bc the BuildConfig * @return the jenkins job name for the given BuildConfig */ public static String jenkinsJobFullName(BuildConfig bc) { @@ -255,22 +246,22 @@ public static String jenkinsJobFullName(BuildConfig bc) { return jobName; } if (GlobalPluginConfiguration.get().getFoldersEnabled()) { - return getNamespace(bc) + "/" + jenkinsJobName(getNamespace(bc), getName(bc)); + return getNamespace(bc) + "/" + jenkinsJobName(getNamespace(bc), getName(bc)); } else { - return getName(bc); + return getName(bc); } } /** * Returns the parent for the given item full name or default to the active * jenkins if it does not exist + * * @param activeJenkins the active Jenkins instance - * @param fullName the full name of the instance - * @param namespace the namespace where the instance runs + * @param fullName the full name of the instance + * @param namespace the namespace where the instance runs * @return and ItemGroup representing the full parent */ - public static ItemGroup getFullNameParent(Jenkins activeJenkins, - String fullName, String namespace) { + public static ItemGroup getFullNameParent(Jenkins activeJenkins, String fullName, String namespace) { int idx = fullName.lastIndexOf('/'); if (idx > 0) { String parentFullName = fullName.substring(0, idx); @@ -282,25 +273,21 @@ public static ItemGroup getFullNameParent(Jenkins activeJenkins, // lets lazily create a new folder for this namespace parent Folder folder = new Folder(activeJenkins, namespace); try { - folder.setDescription("Folder for the OpenShift project: " - + namespace); + folder.setDescription("Folder for the OpenShift project: " + namespace); } catch (IOException e) { // ignore } BulkChange bk = new BulkChange(folder); - InputStream jobStream = new StringInputStream( - new XStream2().toXML(folder)); + InputStream jobStream = new StringInputStream(new XStream2().toXML(folder)); try { - activeJenkins.createProjectFromXML(namespace, jobStream) - .save(); + activeJenkins.createProjectFromXML(namespace, jobStream).save(); } catch (IOException e) { logger.warning("Failed to create the Folder: " + namespace); } try { bk.commit(); } catch (IOException e) { - logger.warning("Failed to commit toe BulkChange for the Folder: " - + namespace); + logger.warning("Failed to commit toe BulkChange for the Folder: " + namespace); } // lets look it up again to be sure parent = activeJenkins.getItemByFullName(namespace); @@ -315,8 +302,7 @@ public static ItemGroup getFullNameParent(Jenkins activeJenkins, /** * Finds the Jenkins job display name for the given {@link BuildConfig}. * - * @param bc - * the BuildConfig + * @param bc the BuildConfig * @return the jenkins job display name for the given BuildConfig */ public static String jenkinsJobDisplayName(BuildConfig bc) { @@ -328,14 +314,12 @@ public static String jenkinsJobDisplayName(BuildConfig bc) { /** * Creates the Jenkins Job display name for the given buildConfigName * - * @param namespace - * the namespace of the build - * @param buildConfigName - * the name of the {@link BuildConfig} in in the namespace + * @param namespace the namespace of the build + * @param buildConfigName the name of the {@link BuildConfig} in in the + * namespace * @return the jenkins job display name for the given namespace and name */ - public static String jenkinsJobDisplayName(String namespace, - String buildConfigName) { + public static String jenkinsJobDisplayName(String namespace, String buildConfigName) { return namespace + "/" + buildConfigName; } @@ -343,27 +327,21 @@ public static String jenkinsJobDisplayName(String namespace, * Gets the current namespace running Jenkins inside or returns a reasonable * default * - * @param configuredNamespaces - * the optional configured namespace(s) - * @param client - * the OpenShift client + * @param configuredNamespaces the optional configured namespace(s) + * @param client the OpenShift client * @return the default namespace using either the configuration value, the * default namespace on the client or "default" */ - public static String[] getNamespaceOrUseDefault( - String[] configuredNamespaces, OpenShiftClient client) { + public static String[] getNamespaceOrUseDefault(String[] configuredNamespaces, OpenShiftClient client) { String[] namespaces = configuredNamespaces; if (namespaces != null) { for (int i = 0; i < namespaces.length; i++) { - if (namespaces[i].startsWith("${") - && namespaces[i].endsWith("}")) { - String envVar = namespaces[i].substring(2, - namespaces[i].length() - 1); + if (namespaces[i].startsWith("${") && namespaces[i].endsWith("}")) { + String envVar = namespaces[i].substring(2, namespaces[i].length() - 1); namespaces[i] = System.getenv(envVar); if (StringUtils.isBlank(namespaces[i])) { - logger.warning("No value defined for namespace environment variable `" - + envVar + "`"); + logger.warning("No value defined for namespace environment variable `" + envVar + "`"); } } } @@ -380,30 +358,22 @@ public static String[] getNamespaceOrUseDefault( /** * Returns the public URL of the given service * - * @param openShiftClient - * the OpenShiftClient to use - * @param defaultProtocolText - * the protocol text part of a URL such as http:// - * @param namespace - * the Kubernetes namespace - * @param serviceName - * the service name + * @param openShiftClient the OpenShiftClient to use + * @param defaultProtocolText the protocol text part of a URL such as + * http:// + * @param namespace the Kubernetes namespace + * @param serviceName the service name * @return the external URL of the service */ - public static String getExternalServiceUrl(OpenShiftClient openShiftClient, - String defaultProtocolText, String namespace, String serviceName) { + public static String getExternalServiceUrl(OpenShiftClient openShiftClient, String defaultProtocolText, + String namespace, String serviceName) { if (namespace != null && serviceName != null) { try { - RouteList routes = openShiftClient.routes() - .inNamespace(namespace).list(); + RouteList routes = openShiftClient.routes().inNamespace(namespace).list(); for (Route route : routes.getItems()) { RouteSpec spec = route.getSpec(); - if (spec != null - && spec.getTo() != null - && "Service".equalsIgnoreCase(spec.getTo() - .getKind()) - && serviceName.equalsIgnoreCase(spec.getTo() - .getName())) { + if (spec != null && spec.getTo() != null && "Service".equalsIgnoreCase(spec.getTo().getKind()) + && serviceName.equalsIgnoreCase(spec.getTo().getName())) { String host = spec.getHost(); if (host != null && host.length() > 0) { if (spec.getTls() != null) { @@ -414,13 +384,12 @@ public static String getExternalServiceUrl(OpenShiftClient openShiftClient, } } } catch (Exception e) { - logger.log(Level.WARNING, "Could not find Route for service " - + namespace + "/" + serviceName + ". " + e, e); + logger.log(Level.WARNING, + "Could not find Route for service " + namespace + "/" + serviceName + ". " + e, e); } // lets try the portalIP instead try { - Service service = openShiftClient.services() - .inNamespace(namespace).withName(serviceName).get(); + Service service = openShiftClient.services().inNamespace(namespace).withName(serviceName).get(); if (service != null) { ServiceSpec spec = service.getSpec(); if (spec != null) { @@ -431,8 +400,8 @@ public static String getExternalServiceUrl(OpenShiftClient openShiftClient, } } } catch (Exception e) { - logger.log(Level.WARNING, "Could not find Route for service " - + namespace + "/" + serviceName + ". " + e, e); + logger.log(Level.WARNING, + "Could not find Route for service " + namespace + "/" + serviceName + ". " + e, e); } } @@ -443,14 +412,11 @@ public static String getExternalServiceUrl(OpenShiftClient openShiftClient, /** * Calculates the external URL to access Jenkins * - * @param namespace - * the namespace Jenkins is runing inside - * @param openShiftClient - * the OpenShift client + * @param namespace the namespace Jenkins is runing inside + * @param openShiftClient the OpenShift client * @return the external URL to access Jenkins */ - public static String getJenkinsURL(OpenShiftClient openShiftClient, - String namespace) { + public static String getJenkinsURL(OpenShiftClient openShiftClient, String namespace) { // if the user has explicitly configured the jenkins root URL, use it String rootUrl = Jenkins.getInstance().getRootUrl(); if (StringUtils.isNotEmpty(rootUrl)) { @@ -461,8 +427,7 @@ public static String getJenkinsURL(OpenShiftClient openShiftClient, // the service/route // TODO we will eventually make the service name configurable, with the // default of "jenkins" - return getExternalServiceUrl(openShiftClient, "http://", namespace, - "jenkins"); + return getExternalServiceUrl(openShiftClient, "http://", namespace, "jenkins"); } public static String getNamespacefromPodInputs() { @@ -472,15 +437,11 @@ public static String getNamespacefromPodInputs() { /** * Lazily creates the GitSource if need be then updates the git URL * - * @param buildConfig - * the BuildConfig to update - * @param gitUrl - * the URL to the git repo - * @param ref - * the git ref (commit/branch/etc) for the build + * @param buildConfig the BuildConfig to update + * @param gitUrl the URL to the git repo + * @param ref the git ref (commit/branch/etc) for the build */ - public static void updateGitSourceUrl(BuildConfig buildConfig, - String gitUrl, String ref) { + public static void updateGitSourceUrl(BuildConfig buildConfig, String gitUrl, String ref) { BuildConfigSpec spec = buildConfig.getSpec(); if (spec == null) { spec = new BuildConfigSpec(); @@ -502,26 +463,22 @@ public static void updateGitSourceUrl(BuildConfig buildConfig, } public static void updateOpenShiftBuildPhase(Build build, String phase) { - logger.log(FINE, "setting build to {0} in namespace {1}/{2}", - new Object[] { phase, build.getMetadata().getNamespace(), - build.getMetadata().getName() }); - getAuthenticatedOpenShiftClient().builds() - .inNamespace(build.getMetadata().getNamespace()) - .withName(build.getMetadata().getName()).edit().editStatus() - .withPhase(phase).endStatus().done(); + String ns = build.getMetadata().getNamespace(); + String name = build.getMetadata().getName(); + logger.log(FINE, "setting build to {0} in namespace {1}/{2}", new Object[] { phase, ns, name }); + + BuildBuilder builder = new BuildBuilder().editStatus().withPhase(phase).endStatus(); + getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name).edit(b -> builder.build()); } /** * Maps a Jenkins Job name to an ObjectShift BuildConfig name * * @return the namespaced name for the BuildConfig - * @param jobName - * the job to associate to a BuildConfig name - * @param namespace - * the default namespace that Jenkins is running inside + * @param jobName the job to associate to a BuildConfig name + * @param namespace the default namespace that Jenkins is running inside */ - public static NamespaceName buildConfigNameFromJenkinsJobName( - String jobName, String namespace) { + public static NamespaceName buildConfigNameFromJenkinsJobName(String jobName, String namespace) { // TODO lets detect the namespace separator in the jobName for cases // where a jenkins is used for // BuildConfigs in multiple namespaces? @@ -548,35 +505,28 @@ public static long parseTimestamp(String timestamp) { return dateFormatter.parseMillis(timestamp); } - public static boolean isResourceWithoutStateEqual(HasMetadata oldObj, - HasMetadata newObj) { + public static boolean isResourceWithoutStateEqual(HasMetadata oldObj, HasMetadata newObj) { try { - byte[] oldDigest = MessageDigest.getInstance("MD5").digest( - dumpWithoutRuntimeStateAsYaml(oldObj).getBytes( - StandardCharsets.UTF_8)); - byte[] newDigest = MessageDigest.getInstance("MD5").digest( - dumpWithoutRuntimeStateAsYaml(newObj).getBytes( - StandardCharsets.UTF_8)); + byte[] oldDigest = MessageDigest.getInstance("MD5") + .digest(dumpWithoutRuntimeStateAsYaml(oldObj).getBytes(StandardCharsets.UTF_8)); + byte[] newDigest = MessageDigest.getInstance("MD5") + .digest(dumpWithoutRuntimeStateAsYaml(newObj).getBytes(StandardCharsets.UTF_8)); return Arrays.equals(oldDigest, newDigest); } catch (NoSuchAlgorithmException | JsonProcessingException e) { throw new RuntimeException(e); } } - public static String dumpWithoutRuntimeStateAsYaml(HasMetadata obj) - throws JsonProcessingException { + public static String dumpWithoutRuntimeStateAsYaml(HasMetadata obj) throws JsonProcessingException { ObjectMapper statelessMapper = new ObjectMapper(new YAMLFactory()); - statelessMapper.addMixInAnnotations(ObjectMeta.class, - ObjectMetaMixIn.class); - statelessMapper.addMixInAnnotations(ReplicationController.class, - StatelessReplicationControllerMixIn.class); + statelessMapper.addMixInAnnotations(ObjectMeta.class, ObjectMetaMixIn.class); + statelessMapper.addMixInAnnotations(ReplicationController.class, StatelessReplicationControllerMixIn.class); return statelessMapper.writeValueAsString(obj); } public static boolean isCancellable(BuildStatus buildStatus) { String phase = buildStatus.getPhase(); - return phase.equals(NEW) || phase.equals(PENDING) - || phase.equals(RUNNING); + return phase.equals(NEW) || phase.equals(PENDING) || phase.equals(RUNNING); } public static boolean isNew(BuildStatus buildStatus) { @@ -584,8 +534,7 @@ public static boolean isNew(BuildStatus buildStatus) { } public static boolean isCancelled(BuildStatus status) { - return status != null && status.getCancelled() != null - && Boolean.TRUE.equals(status.getCancelled()); + return status != null && status.getCancelled() != null && Boolean.TRUE.equals(status.getCancelled()); } /** @@ -626,8 +575,7 @@ public static String getAnnotation(HasMetadata resource, String name) { return null; } - public static void addAnnotation(HasMetadata resource, String name, - String value) { + public static void addAnnotation(HasMetadata resource, String name, String value) { ObjectMeta metadata = resource.getMetadata(); if (metadata == null) { metadata = new ObjectMeta(); @@ -657,12 +605,11 @@ public static String getName(HasMetadata resource) { return null; } - protected static OpenShiftClient getOpenshiftClient() { - return getAuthenticatedOpenShiftClient(); - } + protected static OpenShiftClient getOpenshiftClient() { + return getAuthenticatedOpenShiftClient(); + } - abstract class StatelessReplicationControllerMixIn extends - ReplicationController { + abstract class StatelessReplicationControllerMixIn extends ReplicationController { @JsonIgnore private ReplicationControllerStatus status; diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java b/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java index 994bc678a..47ac9d361 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java @@ -17,6 +17,7 @@ import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.kubernetes.client.Watcher; +import io.fabric8.kubernetes.client.WatcherException; public class WatcherCallback implements Watcher { @@ -35,8 +36,9 @@ public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T } @Override - public void onClose(KubernetesClientException cause) { + public void onClose(WatcherException cause) { watcher.onClose(cause, namespace); + } } From ba3ffc5e5489469dcdce008f594d9a53aca3bb64 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Tue, 6 Apr 2021 12:10:34 +0200 Subject: [PATCH 02/22] OpenShift Sync Plugin with kubernetes-client 5.2.1 --- pom.xml | 21 +++++++------------ .../openshiftsync/BuildDecisionHandler.java | 2 +- .../openshiftsync/BuildSyncRunListener.java | 16 ++++++++------ .../jenkins/openshiftsync/OpenShiftUtils.java | 2 +- 4 files changed, 19 insertions(+), 22 deletions(-) diff --git a/pom.xml b/pom.xml index ec8bf78a0..bf91f5f61 100644 --- a/pom.xml +++ b/pom.xml @@ -95,7 +95,7 @@ io.fabric8 openshift-client ${openshift-client.version} - compile + provided org.jenkins-ci.plugins.workflow @@ -157,20 +157,13 @@ org.csanchez.jenkins.plugins kubernetes - 1.29.0 + 1.29.3 - - io.fabric8 - kubernetes-client - 5.2.1 - compile - - - - - - - + + org.jenkins-ci.plugins + kubernetes-client-api + 5.2.1-beta-1 + diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java index 8f6fe8a4b..a8b034fb4 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java @@ -77,7 +77,7 @@ public boolean shouldSchedule(Queue.Task p, List actions) { ParametersAction params = dumpParams(actions); if (LOGGER.isLoggable(Level.FINE)) { - LOGGER.fine("ParametersAction: " + params.toString()); + LOGGER.fine("ParametersAction: " + params); } if (params != null && ret != null) BuildToActionMapper.addParameterAction(ret.getMetadata() diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java index de6b36de5..4e244c20d 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java @@ -86,6 +86,8 @@ */ @Extension public class BuildSyncRunListener extends RunListener { + private static final String KUBERNETES_NAMESPACE = "KUBERNETES_NAMESPACE"; + private static final Logger logger = Logger.getLogger(BuildSyncRunListener.class.getName()); private long pollPeriodMs = 1000 * 5; // 5 seconds @@ -419,18 +421,20 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL, logsUrl); annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL, logsConsoleUrl); annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL, logsBlueOceanUrl); - String jenkinsNamespace = System.getenv("KUBERNETES_NAMESPACE"); + String jenkinsNamespace = System.getenv(KUBERNETES_NAMESPACE); if (jenkinsNamespace != null && !jenkinsNamespace.isEmpty()) { annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE, jenkinsNamespace); } if (pendingActions != null && !pendingActions.isEmpty()) { annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON, pendingActions); } - BuildBuilder builder = new BuildBuilder().editMetadata().withAnnotations(annotations).endMetadata() - .editStatus().withPhase(phase).withStartTimestamp(startTime).withCompletionTimestamp(completionTime) - .endStatus(); - logger.log(INFO, "Creating a new build builder: " + builder); - getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name).edit(b -> builder.build()); + final String finalStartTime = startTime; + final String finalCompletionTime = completionTime; + logger.log(INFO, "Creating a new build builder: "); + getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name) + .edit(b -> new BuildBuilder(b).editMetadata().withAnnotations(annotations).endMetadata() + .editStatus().withPhase(phase).withStartTimestamp(finalStartTime) + .withCompletionTimestamp(finalCompletionTime).endStatus().build()); } catch (KubernetesClientException e) { if (HTTP_NOT_FOUND == e.getCode()) { runsToPoll.remove(run); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java index 773b8adf6..6d59cc37d 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java @@ -467,7 +467,7 @@ public static void updateOpenShiftBuildPhase(Build build, String phase) { String name = build.getMetadata().getName(); logger.log(FINE, "setting build to {0} in namespace {1}/{2}", new Object[] { phase, ns, name }); - BuildBuilder builder = new BuildBuilder().editStatus().withPhase(phase).endStatus(); + BuildBuilder builder = new BuildBuilder(build).editStatus().withPhase(phase).endStatus(); getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name).edit(b -> builder.build()); } From fd17003b8d2c9009176d956c7fd988fc644bc0a0 Mon Sep 17 00:00:00 2001 From: jkhelil Date: Tue, 6 Apr 2021 11:29:21 +0200 Subject: [PATCH 03/22] stop and start a single watcher after onclose --- .../jenkins/openshiftsync/BaseWatcher.java | 28 ++++--- .../openshiftsync/BuildConfigWatcher.java | 60 +++++++++------ .../jenkins/openshiftsync/BuildWatcher.java | 70 +++++++++-------- .../openshiftsync/ConfigMapWatcher.java | 67 +++++++++------- .../openshiftsync/ImageStreamWatcher.java | 62 ++++++++------- .../jenkins/openshiftsync/SecretWatcher.java | 77 ++++++++++--------- 6 files changed, 209 insertions(+), 155 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java index f52b63b74..f2a15652e 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java @@ -38,6 +38,7 @@ public abstract class BaseWatcher { private final Logger LOGGER = Logger.getLogger(BaseWatcher.class.getName()); protected ScheduledFuture relister; + protected final Object lock = new Object(); protected final String[] namespaces; protected ConcurrentHashMap watches; private final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name"; @@ -68,6 +69,7 @@ public synchronized void start() { TimeUnit.MILLISECONDS); } + public abstract void startAfterOnClose(String namespace); public void stop() { if (relister != null && !relister.isDone()) { @@ -81,24 +83,26 @@ public void stop() { } } - public void onClose(WatcherException e, String namespace) { + public void stop(String namespace) { + Watch watch = watches.get(namespace) + if (watch != null) { + watch.close(); + watches.remove(namespace); + } + } + + public void onClose(KubernetesClientException e, String namespace) { //scans of fabric client confirm this call be called with null //we do not want to totally ignore this, as the closing of the //watch can effect responsiveness - LOGGER.info("Watch for type " + this.getClass().getName() + " closed for one of the following namespaces: " + watches.keySet().toString()); + LOGGER.info("Watch for type " + this.getClass().getName() + " closed for namespace : " + namespace); if (e != null) { - LOGGER.warning(e.toString()); - - if (e.isHttpGone()) { - stop(); - start(); + synchronized (this.lock) { + LOGGER.warning(e.toString()); + stop(namespace); + startAfterOnClose(namespace); } } - // clearing the watches here will signal the extending classes - // to attempt to re-establish the watch the next time they attempt - // to list; should shield from rapid/repeated close/reopen cycles - // doing it in this fashion - watches.remove(namespace); } public void addWatch(String key, Watch desiredWatch) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java index e8e8a0a0f..b04278687 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java @@ -99,29 +99,7 @@ public void doRun() { return; } for (String namespace : namespaces) { - BuildConfigList buildConfigs = null; - try { - logger.fine("listing BuildConfigs resources"); - buildConfigs = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace).list(); - onInitialBuildConfigs(buildConfigs); - logger.fine("handled BuildConfigs resources"); - } catch (Exception e) { - logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e); - } - try { - String resourceVersion = "0"; - if (buildConfigs == null) { - logger.warning("Unable to get build config list; impacts resource version used for watch"); - } else { - resourceVersion = buildConfigs.getMetadata().getResourceVersion(); - } - if (watches.get(namespace) == null) { - logger.info("creating BuildConfig watch for namespace " + namespace + " and resource version " + resourceVersion); - addWatch(namespace, getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace).withResourceVersion(resourceVersion).watch(new WatcherCallback(BuildConfigWatcher.this,namespace))); - } - } catch (Exception e) { - logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e); - } + addWatchForNamespace(namespace); } // poke the BuildWatcher builds with no BC list and see if we // can create job @@ -130,6 +108,42 @@ public void doRun() { } }; } + + public void addWatchForNamespace(String namespace) { + BuildConfigList buildConfigs = null; + try { + logger.fine("listing BuildConfigs resources"); + buildConfigs = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace).list(); + onInitialBuildConfigs(buildConfigs); + logger.fine("handled BuildConfigs resources"); + } catch (Exception e) { + logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e); + } + try { + String resourceVersion = "0"; + if (buildConfigs == null) { + logger.warning("Unable to get build config list; impacts resource version used for watch"); + } else { + resourceVersion = buildConfigs.getMetadata().getResourceVersion(); + } + if (watches.get(namespace) == null) { + logger.info("creating BuildConfig watch for namespace " + namespace + " and resource version " + resourceVersion); + addWatch(namespace, getAuthenticatedOpenShiftClient() + .buildConfigs() + .inNamespace(namespace) + .withResourceVersion(resourceVersion) + .watch(new WatcherCallback(BuildConfigWatcher.this,namespace))); + } + } catch (Exception e) { + logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e); + } + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + addWatchForNamespace(namespace); + } + } public void start() { initializeBuildConfigToJobMap(); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java index 5762d1b40..5214e9e45 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java @@ -103,35 +103,42 @@ public void doRun() { // about BuildWatcher.flushBuildsWithNoBCList(); for (String namespace : namespaces) { - BuildList newBuilds = null; - try { - logger.fine("listing Build resources"); - newBuilds = getAuthenticatedOpenShiftClient() - .builds() - .inNamespace(namespace) - .withField(OPENSHIFT_BUILD_STATUS_FIELD, + addWatchForNamespace(namespace); + } + reconcileRunsAndBuilds(); + } + }; + } + + public void addWatchForNamespace(String namespace) { + BuildList newBuilds = null; + try { + logger.fine("listing Build resources"); + newBuilds = getAuthenticatedOpenShiftClient() + .builds() + .inNamespace(namespace) + .withField(OPENSHIFT_BUILD_STATUS_FIELD, BuildPhases.NEW).list(); - onInitialBuilds(newBuilds); - logger.fine("handled Build resources"); - } catch (Exception e) { - logger.log(Level.SEVERE, - "Failed to load initial Builds: " + e, e); - } - try { - String resourceVersion = "0"; - if (newBuilds == null) { - logger.warning("Unable to get build list; impacts resource version used for watch"); - } else { - resourceVersion = newBuilds.getMetadata() + onInitialBuilds(newBuilds); + logger.fine("handled Build resources"); + } catch (Exception e) { + logger.log(Level.SEVERE,"Failed to load initial Builds: " + e, e); + } + try { + String resourceVersion = "0"; + if (newBuilds == null) { + logger.warning("Unable to get build list; impacts resource version used for watch"); + } else { + resourceVersion = newBuilds.getMetadata() .getResourceVersion(); - } - if (watches.get(namespace) == null) { - logger.info("creating Build watch for namespace " + } + if (watches.get(namespace) == null) { + logger.info("creating Build watch for namespace " + namespace + " and resource version " + resourceVersion); - addWatch(namespace, getAuthenticatedOpenShiftClient() + addWatch(namespace, getAuthenticatedOpenShiftClient() .builds() .inNamespace(namespace) .withResourceVersion( @@ -139,15 +146,16 @@ public void doRun() { .watch(new WatcherCallback( BuildWatcher.this, namespace))); - } - } catch (Exception e) { - logger.log(Level.SEVERE, - "Failed to load initial Builds: " + e, e); - } } - reconcileRunsAndBuilds(); - } - }; + } catch (Exception e) { + logger.log(Level.SEVERE,"Failed to load initial Builds: " + e, e); + } + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + addWatchForNamespace(namespace); + } } public void start() { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index 10b99d86f..1c57f3fea 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -53,40 +53,51 @@ public void doRun() { return; } for (String namespace : namespaces) { - ConfigMapList configMaps = null; - try { - LOGGER.fine("listing ConfigMap resources"); - configMaps = getAuthenticatedOpenShiftClient() + addWatchForNamespace(namespace); + } + } + }; + } + + public void addWatchForNamespace(String namespace) { + ConfigMapList configMaps = null; + try { + LOGGER.fine("listing ConfigMap resources"); + configMaps = getAuthenticatedOpenShiftClient() .configMaps().inNamespace(namespace).list(); onInitialConfigMaps(configMaps); - LOGGER.fine("handled ConfigMap resources"); - } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); - } - try { - String resourceVersion = "0"; - if (configMaps == null) { - LOGGER.warning("Unable to get config map list; impacts resource version used for watch"); - } else { - resourceVersion = configMaps.getMetadata().getResourceVersion(); - } - if (watches.get(namespace) == null) { - LOGGER.info("creating ConfigMap watch for namespace " + LOGGER.fine("handled ConfigMap resources"); + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); + } + try { + String resourceVersion = "0"; + if (configMaps == null) { + LOGGER.warning("Unable to get config map list; impacts resource version used for watch"); + } else { + resourceVersion = configMaps.getMetadata().getResourceVersion(); + } + if (watches.get(namespace) == null) { + LOGGER.info("creating ConfigMap watch for namespace " + namespace + " and resource version " + resourceVersion); - addWatch(namespace, - getAuthenticatedOpenShiftClient() - .configMaps() - .inNamespace(namespace) - .withResourceVersion(resourceVersion).watch(new WatcherCallback(ConfigMapWatcher.this,namespace))); - } - } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); - } - } + addWatch(namespace, + getAuthenticatedOpenShiftClient() + .configMaps() + .inNamespace(namespace) + .withResourceVersion(resourceVersion).watch(new WatcherCallback(ConfigMapWatcher.this,namespace))); } - }; + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); + } + + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + addWatchForNamespace(namespace); + } } public void start() { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java index b8ea01954..1f65f86b6 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java @@ -51,37 +51,47 @@ public void doRun() { logger.fine("No Openshift Token credential defined."); return; } - for (String ns : namespaces) { - ImageStreamList imageStreams = null; - try { - logger.fine("listing ImageStream resources"); - imageStreams = OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(ns).list(); - onImageStreamInitialization(imageStreams); - logger.fine("handled ImageStream resources"); - } catch (Exception e) { - logger.log(SEVERE, "Failed to load ImageStreams: " + e, e); - } - try { - String resourceVersion = "0"; - if (imageStreams == null) { - logger.warning("Unable to get image stream list; impacts resource version used for watch"); - } else { - resourceVersion = imageStreams.getMetadata().getResourceVersion(); - } - if (watches.get(ns) == null) { - logger.info("creating ImageStream watch for namespace " + ns + " and resource version " + resourceVersion); - ImageStreamWatcher w = ImageStreamWatcher.this; - WatcherCallback watcher = new WatcherCallback(w, ns); - addWatch(ns, OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(ns).withResourceVersion(resourceVersion).watch(watcher)); - } - } catch (Exception e) { - logger.log(SEVERE, "Failed to load ImageStreams: " + e, e); - } + for (String namespace : namespaces) { + addWatchForNamespace(namespace); } } }; } + public void addWatchForNamespace(String namespace) { + ImageStreamList imageStreams = null; + try { + logger.fine("listing ImageStream resources"); + imageStreams = OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(namespace).list(); + onImageStreamInitialization(imageStreams); + logger.fine("handled ImageStream resources"); + } catch (Exception e) { + logger.log(SEVERE, "Failed to load ImageStreams: " + e, e); + } + try { + String resourceVersion = "0"; + if (imageStreams == null) { + logger.warning("Unable to get image stream list; impacts resource version used for watch"); + } else { + resourceVersion = imageStreams.getMetadata().getResourceVersion(); + } + if (watches.get(namespace) == null) { + logger.info("creating ImageStream watch for namespace " + namespace + " and resource version " + resourceVersion); + ImageStreamWatcher w = ImageStreamWatcher.this; + WatcherCallback watcher = new WatcherCallback(w, namespace); + addWatch(namespace, OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(namespace).withResourceVersion(resourceVersion).watch(watcher)); + } + } catch (Exception e) { + logger.log(SEVERE, "Failed to load ImageStreams: " + e, e); + } + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + addWatchForNamespace(namespace); + } + } + public void start() { // lets process the initial state logger.info("Now handling startup image streams!!"); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java index c5cfa3a42..56002b43d 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java @@ -59,47 +59,54 @@ public void doRun() { return; } for (String namespace : namespaces) { - SecretList secrets = null; - try { - logger.fine("listing Secrets resources"); - secrets = getAuthenticatedOpenShiftClient().secrets() - .inNamespace(namespace) - .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, Constants.VALUE_SECRET_SYNC).list(); - onInitialSecrets(secrets); - logger.fine("handled Secrets resources"); - } catch (Exception e) { - logger.log(SEVERE, "Failed to load Secrets: " + e, e); - } - try { - String resourceVersion = "0"; - if (secrets == null) { - logger.warning("Unable to get secret list; impacts resource version used for watch"); - } else { - resourceVersion = secrets.getMetadata() + addWatchForNamespace(namespace); + } + + } + }; + } + + public void addWatchForNamespace(String namespace) { + SecretList secrets = null; + try { + logger.fine("listing Secrets resources"); + secrets = getAuthenticatedOpenShiftClient().secrets() + .inNamespace(namespace) + .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, Constants.VALUE_SECRET_SYNC).list(); + onInitialSecrets(secrets); + logger.fine("handled Secrets resources"); + } catch (Exception e) { + logger.log(SEVERE, "Failed to load Secrets: " + e, e); + } + try { + String resourceVersion = "0"; + if (secrets == null) { + logger.warning("Unable to get secret list; impacts resource version used for watch"); + } else { + resourceVersion = secrets.getMetadata() .getResourceVersion(); - } - if (watches.get(namespace) == null) { - logger.info("creating Secret watch for namespace " + } + if (watches.get(namespace) == null) { + logger.info("creating Secret watch for namespace " + namespace + " and resource version" + resourceVersion); - addWatch(namespace, - getAuthenticatedOpenShiftClient() - .secrets() - .inNamespace(namespace) - .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, + addWatch(namespace, getAuthenticatedOpenShiftClient() + .secrets() + .inNamespace(namespace) + .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, Constants.VALUE_SECRET_SYNC) - .withResourceVersion( - resourceVersion) - .watch(new WatcherCallback(SecretWatcher.this, - namespace))); + .withResourceVersion(resourceVersion) + .watch(new WatcherCallback(SecretWatcher.this,namespace))); } - } catch (Exception e) { - logger.log(SEVERE, "Failed to load Secrets: " + e, e); - } - } + } catch (Exception e) { + logger.log(SEVERE, "Failed to load Secrets: " + e, e); + } + } - } - }; + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + addWatchForNamespace(namespace); + } } public void start() { From 3361a3e9083ec8e30eb27bf960b308d2b0a4499e Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Tue, 6 Apr 2021 13:46:35 +0200 Subject: [PATCH 04/22] Adding logs --- .../java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java | 3 ++- .../io/fabric8/jenkins/openshiftsync/WatcherCallback.java | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java index f52b63b74..e8cb784e9 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java @@ -19,6 +19,7 @@ import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.kubernetes.client.Watch; +import io.fabric8.kubernetes.client.Watcher; import io.fabric8.kubernetes.client.WatcherException; import java.util.ArrayList; @@ -34,7 +35,7 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import jenkins.util.Timer; -public abstract class BaseWatcher { +public abstract class BaseWatcher{ private final Logger LOGGER = Logger.getLogger(BaseWatcher.class.getName()); protected ScheduledFuture relister; diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java b/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java index 47ac9d361..49c0c3bb6 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java @@ -15,7 +15,8 @@ */ package io.fabric8.jenkins.openshiftsync; -import io.fabric8.kubernetes.client.KubernetesClientException; +import org.slf4j.LoggerFactory; + import io.fabric8.kubernetes.client.Watcher; import io.fabric8.kubernetes.client.WatcherException; @@ -37,8 +38,8 @@ public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T @Override public void onClose(WatcherException cause) { + LoggerFactory.getLogger(WatcherCallback.class).debug("Watcher closed: " + watcher + " , for namespace: " + namespace); watcher.onClose(cause, namespace); - } } From 2f9f065783e0a040f62fa69f57679cf1a67b362c Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Wed, 7 Apr 2021 03:21:38 +0200 Subject: [PATCH 05/22] Simplify and cleanup all closes --- pom.xml | 2 +- .../jenkins/openshiftsync/BaseWatcher.java | 250 +++--- .../openshiftsync/BuildConfigToJobMap.java | 44 +- .../openshiftsync/BuildConfigWatcher.java | 142 ++- .../jenkins/openshiftsync/BuildWatcher.java | 345 +++----- .../openshiftsync/ConfigMapWatcher.java | 108 +-- .../GlobalPluginConfiguration.java | 57 +- .../GlobalPluginConfigurationTimerTask.java | 60 +- .../openshiftsync/ImageStreamWatcher.java | 87 +- .../openshiftsync/PodTemplateUtils.java | 825 ++++++++++-------- .../jenkins/openshiftsync/SecretWatcher.java | 217 ++--- .../openshiftsync/WatcherCallback.java | 45 - 12 files changed, 1050 insertions(+), 1132 deletions(-) delete mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java diff --git a/pom.xml b/pom.xml index bf91f5f61..d072cd467 100644 --- a/pom.xml +++ b/pom.xml @@ -157,7 +157,7 @@ org.csanchez.jenkins.plugins kubernetes - 1.29.3 + 1.28.3 org.jenkins-ci.plugins diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java index 7f9807d40..2c416eeb8 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java @@ -15,158 +15,170 @@ */ package io.fabric8.jenkins.openshiftsync; -import static java.net.HttpURLConnection.HTTP_GONE; +import static org.apache.commons.lang.builder.ToStringStyle.DEFAULT_STYLE; -import io.fabric8.kubernetes.client.KubernetesClientException; -import io.fabric8.kubernetes.client.Watch; -import io.fabric8.kubernetes.client.Watcher; -import io.fabric8.kubernetes.client.WatcherException; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; import java.util.logging.Logger; -import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.apache.commons.lang.builder.ReflectionToStringBuilder; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import jenkins.util.Timer; +import hudson.triggers.SafeTimerTask; +import io.fabric8.kubernetes.client.KubernetesClientException; +import io.fabric8.kubernetes.client.Watch; +import io.fabric8.kubernetes.client.Watcher; +import io.fabric8.kubernetes.client.WatcherException; -public abstract class BaseWatcher{ +public abstract class BaseWatcher implements Watcher { private final Logger LOGGER = Logger.getLogger(BaseWatcher.class.getName()); - protected ScheduledFuture relister; - protected final Object lock = new Object(); - protected final String[] namespaces; - protected ConcurrentHashMap watches; - private final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name"; - private final String PT_NOT_OWNED = "The event for %s | %s | %s that no longer includes the pod template %s was ignored because the type %s was associated with that pod template"; + // protected ScheduledFuture relister; + protected final transient Object lock = new Object(); + // protected ConcurrentHashMap watches; + protected final String namespace; + protected Watch watch; @SuppressFBWarnings("EI_EXPOSE_REP2") - public BaseWatcher(String[] namespaces) { - this.namespaces = namespaces; - watches = new ConcurrentHashMap<>(); + public BaseWatcher(String namespace) { + this.namespace = namespace; + // this.watches = new ConcurrentHashMap<>(); } - public abstract Runnable getStartTimerTask(); +// public abstract Runnable getStartTimerTask(); public abstract int getListIntervalInSeconds(); - public abstract void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource); + protected abstract void start(); - public synchronized void start() { - // lets do this in a background thread to avoid errors like: - // Tried proxying - // io.fabric8.jenkins.openshiftsync.GlobalPluginConfiguration to support - // a circular dependency, but it is not an interface. - Runnable task = getStartTimerTask(); - relister = Timer.get().scheduleAtFixedRate(task, 100, // still do the - // first run 100 - // milliseconds in - getListIntervalInSeconds() * 1000, - TimeUnit.MILLISECONDS); - - } - public abstract void startAfterOnClose(String namespace); - - public void stop() { - if (relister != null && !relister.isDone()) { - relister.cancel(true); - relister = null; - } - - for (Map.Entry entry : watches.entrySet()) { - entry.getValue().close(); - watches.remove(entry.getKey()); + @Override + public void onClose(WatcherException cause) { + Watcher watcher = this; + LOGGER.info("Closing watcher: cause: " + cause + ", watcher: " + watcher); + if (cause != null) { + synchronized (this.lock) { + LOGGER.info("Watcher stopped unexpectedly for : " + this.namespace + ", will restart:" + cause); + this.watch.close(); + this.watch = null; + this.start(); + } } } - public void stop(String namespace) { - Watch watch = watches.get(namespace) - if (watch != null) { - watch.close(); - watches.remove(namespace); - } + public void onClose(KubernetesClientException cause) { + this.onClose(new WatcherException(cause.getMessage(), cause)); } - public void onClose(KubernetesClientException e, String namespace) { - //scans of fabric client confirm this call be called with null - //we do not want to totally ignore this, as the closing of the - //watch can effect responsiveness - LOGGER.info("Watch for type " + this.getClass().getName() + " closed for namespace : " + namespace); - if (e != null) { - synchronized (this.lock) { - LOGGER.warning(e.toString()); - stop(namespace); - startAfterOnClose(namespace); + public final Runnable getStartTimerTask() { + return new SafeTimerTask() { + @Override + public void doRun() { + if (!CredentialsUtils.hasCredentials()) { + LOGGER.fine("No Openshift Token credential defined."); + return; + } + start(); } - } + }; } - public void addWatch(String key, Watch desiredWatch) { - Watch watch = watches.putIfAbsent(key, desiredWatch); - if (watch != null) { - watch.close(); +// public abstract void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource); +// public abstract void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource); + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + start(); } } - protected void processSlavesForAddEvent(List slaves, String type, String uid, String apiObjName, String namespace) { - LOGGER.info("Adding PodTemplate(s) for "); - List finalSlaveList = new ArrayList(); - for (PodTemplate podTemplate : slaves) { - PodTemplateUtils.addPodTemplate(this, type, apiObjName, namespace, finalSlaveList, podTemplate); - } - PodTemplateUtils.updateTrackedPodTemplatesMap(uid, finalSlaveList); +// @Override +// public void onClose(WatcherException cause) { +// Watcher watcher = this; +// String namespace = getNamespace(); +// LOGGER.info("Closing watcher: cause: " + cause + ", watcher: " + watcher); +// // TODO implement here what should be done when closing this watcher +// // TODO Let's reimplement it, using Observer pattern and notifying the +// // GlobalPluginConfiguration listener +// // super.onClose(cause); +// } + + @Override + public void onClose() { + Watcher watcher = this; + String namespace = getNamespace(); + LOGGER.info("Closing watcher without cause: " + watcher); + WatcherException cause = new WatcherException("Received closed event without exception"); + // TODO implement here what should be done when closing this watcher + // TODO Let's reimplement it, using Observer pattern and notifying the + // GlobalPluginConfiguration listener + // super.onClose(cause); + // watcher.onClose(cause); } - protected void processSlavesForModifyEvent(List slaves, String type, String uid, String apiObjName, String namespace) { - LOGGER.info("Modifying PodTemplates"); - boolean alreadyTracked = PodTemplateUtils.trackedPodTemplates.containsKey(uid); - boolean hasSlaves = slaves.size() > 0; // Configmap has podTemplates - if (alreadyTracked) { - if (hasSlaves) { - // Since the user could have change the immutable image - // that a PodTemplate uses, we just - // recreate the PodTemplate altogether. This makes it so - // that any changes from within - // Jenkins is undone. - - // Check if there are new PodTemplates added or removed to the configmap, - // if they are, add them to or remove them from trackedPodTemplates - List podTemplatesToTrack = new ArrayList(); - PodTemplateUtils.purgeTemplates(this, type, uid, apiObjName, namespace); - for(PodTemplate pt: slaves){ - podTemplatesToTrack = PodTemplateUtils.onlyTrackPodTemplate(this, type,apiObjName,namespace,podTemplatesToTrack, pt); - } - PodTemplateUtils.updateTrackedPodTemplatesMap(uid, podTemplatesToTrack); - for (PodTemplate podTemplate : podTemplatesToTrack) { - // still do put here in case this is a new item from the last - // update on this ConfigMap/ImageStream - PodTemplateUtils.addPodTemplate(this, type,null,null,null, podTemplate); - } - } else { - // The user modified the configMap to no longer be a - // jenkins-slave. - PodTemplateUtils.purgeTemplates(this, type, uid, apiObjName, namespace); - } - } else { - if (hasSlaves) { - List finalSlaveList = new ArrayList(); - for (PodTemplate podTemplate : slaves) { - // The user modified the api obj to be a jenkins-slave - PodTemplateUtils.addPodTemplate(this, type, apiObjName, namespace, finalSlaveList, podTemplate); + // public synchronized void start() { + // lets do this in a background thread to avoid errors like: + // Tried proxying + // io.fabric8.jenkins.openshiftsync.GlobalPluginConfiguration to support + // a circular dependency, but it is not an interface. + // Runnable task = getStartTimerTask(); + // still do the first run 100 milliseconds in + // this.relister = Timer.get().scheduleAtFixedRate(task, 100, + // getListIntervalInSeconds() * 1000, MILLISECONDS); + // } + + public void stop() { + if (this.watch != null) { + synchronized (this.lock) { + if (this.watch != null) { + LOGGER.info("Stopping watcher: " + this + " by closing its watch: " + this.watch); + this.watch.close(); + this.watch = null; } - PodTemplateUtils.updateTrackedPodTemplatesMap(uid, finalSlaveList); } } + /* + * if (relister != null && !relister.isDone()) { relister.cancel(true); relister + * = null; } + * + * for (Map.Entry entry : watches.entrySet()) { + * entry.getValue().close(); watches.remove(entry.getKey()); } + */ } - protected void processSlavesForDeleteEvent(List slaves, String type, String uid, String apiObjName, String namespace) { - if (PodTemplateUtils.trackedPodTemplates.containsKey(uid)) { - PodTemplateUtils.purgeTemplates(this, type, uid, apiObjName, namespace); - } +// public void stop(String namespace) { +// Watch watch = watches.get(namespace); +// if (watch != null) { +// watch.close(); +// watches.remove(namespace); +// } +// } + // @Override +// public void onClose(WatcherException e, String namespace) { +// // scans of fabric client confirm this call be called with null +// // we do not want to totally ignore this, as the closing of the +// // watch can effect responsiveness +// LOGGER.info("Watch for type " + this.getClass().getName() + " closed for namespace : " + namespace); +// if (e != null) { +// synchronized (this.lock) { +// LOGGER.severe("Exception while watching namespace: " + namespace + ", " + e.toString()); +// // stop(namespace); +// // startAfterOnClose(namespace); +// } +// } +// } + +// public void addWatch(String key, Watch desiredWatch) { +// Watch watch = watches.putIfAbsent(key, desiredWatch); +// if (watch != null) { +// watch.close(); +// } +// } + + public String toString() { + return ReflectionToStringBuilder.toString(this, DEFAULT_STYLE, false, false) + + ReflectionToStringBuilder.toString(this.watch); + } + + public String getNamespace() { + return namespace; } + } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java index 05e19588e..8091052c3 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java @@ -10,6 +10,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Logger; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.jenkinsJobName; import static org.apache.commons.lang.StringUtils.isBlank; import static org.apache.commons.lang.StringUtils.isNotBlank; @@ -17,32 +18,29 @@ public class BuildConfigToJobMap { private final static Logger logger = Logger.getLogger(BuildConfigToJobMap.class.getName()); private static ConcurrentHashMap buildConfigToJobMap; - + private BuildConfigToJobMap() { } static synchronized void initializeBuildConfigToJobMap() { if (buildConfigToJobMap == null) { - List jobs = Jenkins.getActiveInstance().getAllItems( - WorkflowJob.class); + List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); buildConfigToJobMap = new ConcurrentHashMap<>(jobs.size()); for (WorkflowJob job : jobs) { - BuildConfigProjectProperty buildConfigProjectProperty = job - .getProperty(BuildConfigProjectProperty.class); - if (buildConfigProjectProperty == null) { - continue; - } - String namespace = buildConfigProjectProperty.getNamespace(); - String name = buildConfigProjectProperty.getName(); - if (isNotBlank(namespace) && isNotBlank(name)) { - buildConfigToJobMap.put(OpenShiftUtils.jenkinsJobName(namespace, name), job); + BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); + if (property != null) { + String namespace = property.getNamespace(); + String name = property.getName(); + if (isNotBlank(namespace) && isNotBlank(name)) { + String jenkinsJobName = jenkinsJobName(namespace, name); + buildConfigToJobMap.put(jenkinsJobName, job); + } } } } } - static WorkflowJob getJobFromBuildConfig( - BuildConfig buildConfig) { + static WorkflowJob getJobFromBuildConfig(BuildConfig buildConfig) { ObjectMeta meta = buildConfig.getMetadata(); if (meta == null) { return null; @@ -57,8 +55,7 @@ static WorkflowJob getJobFromBuildConfigNameNamespace(String name, String namesp return buildConfigToJobMap.get(OpenShiftUtils.jenkinsJobName(namespace, name)); } - static void putJobWithBuildConfig(WorkflowJob job, - BuildConfig buildConfig) { + static void putJobWithBuildConfig(WorkflowJob job, BuildConfig buildConfig) { if (buildConfig == null) { throw new IllegalArgumentException("BuildConfig cannot be null"); } @@ -67,17 +64,14 @@ static void putJobWithBuildConfig(WorkflowJob job, } ObjectMeta meta = buildConfig.getMetadata(); if (meta == null) { - throw new IllegalArgumentException( - "BuildConfig must contain valid metadata"); + throw new IllegalArgumentException("BuildConfig must contain valid metadata"); } putJobWithBuildConfigNameNamespace(job, meta.getName(), meta.getNamespace()); } - static void putJobWithBuildConfigNameNamespace(WorkflowJob job, - String name, String namespace) { + static void putJobWithBuildConfigNameNamespace(WorkflowJob job, String name, String namespace) { if (isBlank(name) || isBlank(namespace)) { - throw new IllegalArgumentException( - "BuildConfig name and namespace must not be blank"); + throw new IllegalArgumentException("BuildConfig name and namespace must not be blank"); } buildConfigToJobMap.put(OpenShiftUtils.jenkinsJobName(namespace, name), job); } @@ -88,16 +82,14 @@ static void removeJobWithBuildConfig(BuildConfig buildConfig) { } ObjectMeta meta = buildConfig.getMetadata(); if (meta == null) { - throw new IllegalArgumentException( - "BuildConfig must contain valid metadata"); + throw new IllegalArgumentException("BuildConfig must contain valid metadata"); } removeJobWithBuildConfigNameNamespace(meta.getName(), meta.getNamespace()); } static void removeJobWithBuildConfigNameNamespace(String name, String namespace) { if (isBlank(name) || isBlank(namespace)) { - throw new IllegalArgumentException( - "BuildConfig name/namepsace must not be blank"); + throw new IllegalArgumentException("BuildConfig name/namepsace must not be blank"); } buildConfigToJobMap.remove(OpenShiftUtils.jenkinsJobName(namespace, name)); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java index b04278687..1f5344476 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java @@ -15,50 +15,41 @@ */ package io.fabric8.jenkins.openshiftsync; -import com.cloudbees.hudson.plugins.folder.Folder; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.initializeBuildConfigToJobMap; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuildConfig; +import static java.util.logging.Level.SEVERE; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.eclipse.jetty.util.ConcurrentHashSet; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import hudson.BulkChange; -import hudson.model.ItemGroup; import hudson.model.Job; -import hudson.model.ParameterDefinition; import hudson.security.ACL; import hudson.triggers.SafeTimerTask; -import hudson.util.XStream2; -import io.fabric8.kubernetes.client.Watcher.Action; import io.fabric8.openshift.api.model.BuildConfig; import io.fabric8.openshift.api.model.BuildConfigList; import io.fabric8.openshift.api.model.BuildList; +import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.Jenkins; import jenkins.security.NotReallyRoleSensitiveCallable; import jenkins.util.Timer; -import org.apache.tools.ant.filters.StringInputStream; -import org.eclipse.jetty.util.ConcurrentHashSet; -import org.jenkinsci.plugins.workflow.flow.FlowDefinition; -import org.jenkinsci.plugins.workflow.job.WorkflowJob; - -import java.io.InputStream; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.initializeBuildConfigToJobMap; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.*; -import static java.util.logging.Level.SEVERE; - /** * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we * ensure there is a suitable Jenkins Job object defined with the correct * configuration */ -public class BuildConfigWatcher extends BaseWatcher { +public class BuildConfigWatcher extends BaseWatcher { private final Logger logger = Logger.getLogger(getClass().getName()); // for coordinating between ItemListener.onUpdate and onDeleted both @@ -81,8 +72,8 @@ public static void deleteCompleted(String bcID) { } @SuppressFBWarnings("EI_EXPOSE_REP2") - public BuildConfigWatcher(String[] namespaces) { - super(namespaces); + public BuildConfigWatcher(String namespace) { + super(namespace); } @Override @@ -90,66 +81,44 @@ public int getListIntervalInSeconds() { return GlobalPluginConfiguration.get().getBuildConfigListInterval(); } - public Runnable getStartTimerTask() { - return new SafeTimerTask() { - @Override - public void doRun() { - if (!CredentialsUtils.hasCredentials()) { - logger.fine("No Openshift Token credential defined."); - return; - } - for (String namespace : namespaces) { - addWatchForNamespace(namespace); - } - // poke the BuildWatcher builds with no BC list and see if we - // can create job - // runs for premature builds - BuildWatcher.flushBuildsWithNoBCList(); - } - }; - } - - public void addWatchForNamespace(String namespace) { + public void start() { + initializeBuildConfigToJobMap(); + logger.info("Now handling startup build configs for namespace: " + namespace + " !!"); BuildConfigList buildConfigs = null; + String ns = this.namespace; try { logger.fine("listing BuildConfigs resources"); - buildConfigs = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace).list(); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + buildConfigs = client.buildConfigs().inNamespace(ns).list(); onInitialBuildConfigs(buildConfigs); logger.fine("handled BuildConfigs resources"); } catch (Exception e) { logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e); } try { - String resourceVersion = "0"; + String rv = "0"; if (buildConfigs == null) { logger.warning("Unable to get build config list; impacts resource version used for watch"); } else { - resourceVersion = buildConfigs.getMetadata().getResourceVersion(); + rv = buildConfigs.getMetadata().getResourceVersion(); } - if (watches.get(namespace) == null) { - logger.info("creating BuildConfig watch for namespace " + namespace + " and resource version " + resourceVersion); - addWatch(namespace, getAuthenticatedOpenShiftClient() - .buildConfigs() - .inNamespace(namespace) - .withResourceVersion(resourceVersion) - .watch(new WatcherCallback(BuildConfigWatcher.this,namespace))); + + if (this.watch == null) { + synchronized (this.lock) { + if (this.watch == null) { + logger.info("creating BuildConfig watch for namespace " + ns + " and resource version " + rv); + OpenShiftClient client = getOpenshiftClient(); + this.watch = client.buildConfigs().inNamespace(ns).withResourceVersion(rv).watch(this); + } + } } } catch (Exception e) { logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e); - } - } - - public void startAfterOnClose(String namespace) { - synchronized (this.lock) { - addWatchForNamespace(namespace); } - } - - public void start() { - initializeBuildConfigToJobMap(); - logger.info("Now handling startup build configs!!"); - super.start(); - + // poke the BuildWatcher builds with no BC list and see if we + // can create job + // runs for premature builds + BuildWatcher.flushBuildsWithNoBCList(); } private void onInitialBuildConfigs(BuildConfigList buildConfigs) { @@ -168,6 +137,7 @@ private void onInitialBuildConfigs(BuildConfigList buildConfigs) { } @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") + @Override public void eventReceived(Action action, BuildConfig buildConfig) { try { switch (action) { @@ -181,10 +151,12 @@ public void eventReceived(Action action, BuildConfig buildConfig) { modifyEventToJenkinsJob(buildConfig); break; case ERROR: - logger.warning("watch for buildconfig " + buildConfig.getMetadata().getName() + " received error event "); + logger.warning( + "watch for buildconfig " + buildConfig.getMetadata().getName() + " received error event "); break; default: - logger.warning("watch for buildconfig " + buildConfig.getMetadata().getName() + " received unknown event " + action); + logger.warning("watch for buildconfig " + buildConfig.getMetadata().getName() + + " received unknown event " + action); break; } // we employ impersonation here to insure we have "full access"; @@ -216,10 +188,15 @@ public void doRun() { logger.fine("No Openshift Token credential defined."); return; } - BuildList buildList = getAuthenticatedOpenShiftClient().builds().inNamespace(buildConfig.getMetadata().getNamespace()).withField(OPENSHIFT_BUILD_STATUS_FIELD, BuildPhases.NEW) - .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, buildConfig.getMetadata().getName()).list(); + BuildList buildList = getAuthenticatedOpenShiftClient().builds() + .inNamespace(buildConfig.getMetadata().getNamespace()) + .withField(OPENSHIFT_BUILD_STATUS_FIELD, BuildPhases.NEW) + .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, + buildConfig.getMetadata().getName()) + .list(); if (buildList.getItems().size() > 0) { - logger.info("build backup query for " + buildConfig.getMetadata().getName() + " found new builds"); + logger.info("build backup query for " + buildConfig.getMetadata().getName() + + " found new builds"); BuildWatcher.onInitialBuilds(buildList); } } @@ -233,11 +210,6 @@ public void doRun() { logger.log(Level.WARNING, "Caught: " + e, e); } } - @Override - public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) { - BuildConfig bc = (BuildConfig)resource; - eventReceived(action, bc); - } private void upsertJob(final BuildConfig buildConfig) throws Exception { if (isPipelineStrategyBuildConfig(buildConfig)) { @@ -271,12 +243,14 @@ private void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws @Override public Void call() throws Exception { try { - deleteInProgress(buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); + deleteInProgress( + buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); job.delete(); } finally { removeJobWithBuildConfig(buildConfig); Jenkins.getActiveInstance().rebuildDependencyGraphAsync(); - deleteCompleted(buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); + deleteCompleted( + buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); } return null; } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java index 5214e9e45..7ee09e022 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java @@ -15,38 +15,11 @@ */ package io.fabric8.jenkins.openshiftsync; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import hudson.security.ACL; -import hudson.triggers.SafeTimerTask; -import io.fabric8.kubernetes.api.model.OwnerReference; -import io.fabric8.kubernetes.client.Watcher.Action; -import io.fabric8.openshift.api.model.Build; -import io.fabric8.openshift.api.model.BuildConfig; -import io.fabric8.openshift.api.model.BuildList; -import io.fabric8.openshift.api.model.BuildStatus; -import jenkins.model.Jenkins; -import jenkins.security.NotReallyRoleSensitiveCallable; - -import org.apache.commons.lang.StringUtils; -import org.jenkinsci.plugins.workflow.job.WorkflowJob; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.ConcurrentModificationException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Level; -import java.util.logging.Logger; - import static io.fabric8.jenkins.openshiftsync.Annotations.BUILDCONFIG_NAME; import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfigNameNamespace; import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.cancelBuild; @@ -54,17 +27,44 @@ import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.getJobFromBuild; import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.handleBuildList; import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.triggerJob; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; import static java.util.logging.Level.WARNING; -public class BuildWatcher extends BaseWatcher { - private static final Logger logger = Logger.getLogger(BuildWatcher.class - .getName()); +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.apache.commons.lang.StringUtils; +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import hudson.security.ACL; +import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.openshift.api.model.Build; +import io.fabric8.openshift.api.model.BuildConfig; +import io.fabric8.openshift.api.model.BuildList; +import io.fabric8.openshift.api.model.BuildStatus; +import io.fabric8.openshift.client.OpenShiftClient; +import jenkins.model.Jenkins; +import jenkins.security.NotReallyRoleSensitiveCallable; + +public class BuildWatcher extends BaseWatcher { + private static final Logger logger = Logger.getLogger(BuildWatcher.class.getName()); // now that listing interval is 5 minutes (used to be 10 seconds), we have // seen @@ -74,13 +74,13 @@ public class BuildWatcher extends BaseWatcher { // minute delay // before the job run gets kicked off // started seeing duplicate builds getting kicked off so quit depending on - // so moved off of concurrent hash set to concurrent hash map using + // so moved off of concurrent hash set to concurrent hash map using // namepace/name key - private static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap(); + private static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap(); @SuppressFBWarnings("EI_EXPOSE_REP2") - public BuildWatcher(String[] namespaces) { - super(namespaces); + public BuildWatcher(String namespace) { + super(namespace); } @Override @@ -88,82 +88,58 @@ public int getListIntervalInSeconds() { return GlobalPluginConfiguration.get().getBuildListInterval(); } - @Override - public Runnable getStartTimerTask() { - return new SafeTimerTask() { - @Override - public void doRun() { - if (!CredentialsUtils.hasCredentials()) { - logger.fine("No Openshift Token credential defined."); - return; - } - // prior to finding new builds poke the BuildWatcher builds with - // no BC list and see if we - // can create job runs for premature builds we already know - // about - BuildWatcher.flushBuildsWithNoBCList(); - for (String namespace : namespaces) { - addWatchForNamespace(namespace); - } - reconcileRunsAndBuilds(); - } - }; - } - - public void addWatchForNamespace(String namespace) { + public void start() { + BuildToActionMapper.initialize(); + logger.info("Now handling startup build for " + namespace + " !!"); + + // prior to finding new builds poke the BuildWatcher builds with + // no BC list and see if we + // can create job runs for premature builds we already know + // about + BuildWatcher.flushBuildsWithNoBCList(); + String ns = this.namespace; BuildList newBuilds = null; try { logger.fine("listing Build resources"); - newBuilds = getAuthenticatedOpenShiftClient() - .builds() - .inNamespace(namespace) - .withField(OPENSHIFT_BUILD_STATUS_FIELD, - BuildPhases.NEW).list(); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + newBuilds = client.builds().inNamespace(ns).withField(OPENSHIFT_BUILD_STATUS_FIELD, NEW).list(); onInitialBuilds(newBuilds); logger.fine("handled Build resources"); } catch (Exception e) { - logger.log(Level.SEVERE,"Failed to load initial Builds: " + e, e); + logger.log(Level.SEVERE, "Failed to load initial Builds: " + e, e); } try { - String resourceVersion = "0"; + String rv = "0"; if (newBuilds == null) { logger.warning("Unable to get build list; impacts resource version used for watch"); } else { - resourceVersion = newBuilds.getMetadata() - .getResourceVersion(); + rv = newBuilds.getMetadata().getResourceVersion(); } - if (watches.get(namespace) == null) { - logger.info("creating Build watch for namespace " - + namespace - + " and resource version " - + resourceVersion); - - addWatch(namespace, getAuthenticatedOpenShiftClient() - .builds() - .inNamespace(namespace) - .withResourceVersion( - resourceVersion) - .watch(new WatcherCallback( - BuildWatcher.this, - namespace))); + + if (this.watch == null) { + synchronized (this.lock) { + if (this.watch == null) { + logger.info("creating Build watch for namespace " + ns + " and resource version " + rv); + OpenShiftClient client = getOpenshiftClient(); + this.watch = client.builds().inNamespace(ns).withResourceVersion(rv).watch(this); + } } + } + } catch (Exception e) { - logger.log(Level.SEVERE,"Failed to load initial Builds: " + e, e); + logger.log(Level.SEVERE, "Failed to load initial Builds: " + e, e); } + reconcileRunsAndBuilds(); } public void startAfterOnClose(String namespace) { synchronized (this.lock) { - addWatchForNamespace(namespace); + start(); } } - public void start() { - BuildToActionMapper.initialize(); - super.start(); - } - @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") + @Override public void eventReceived(Action action, Build build) { if (!OpenShiftUtils.isPipelineStrategyBuild(build)) return; @@ -182,18 +158,14 @@ public void eventReceived(Action action, Build build) { logger.warning("watch for build " + build.getMetadata().getName() + " received error event "); break; default: - logger.warning("watch for build " + build.getMetadata().getName() + " received unknown event " + action); + logger.warning( + "watch for build " + build.getMetadata().getName() + " received unknown event " + action); break; } } catch (Exception e) { logger.log(WARNING, "Caught: " + e, e); } } - @Override - public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) { - Build build = (Build)resource; - eventReceived(action, build); - } public static void onInitialBuilds(BuildList buildList) { if (buildList == null) @@ -205,24 +177,16 @@ public static void onInitialBuilds(BuildList buildList) { @Override public int compare(Build b1, Build b2) { if (b1.getMetadata().getAnnotations() == null - || b1.getMetadata().getAnnotations() - .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { - logger.warning("cannot compare build " - + b1.getMetadata().getName() - + " from namespace " - + b1.getMetadata().getNamespace() - + ", has bad annotations: " + || b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { + logger.warning("cannot compare build " + b1.getMetadata().getName() + " from namespace " + + b1.getMetadata().getNamespace() + ", has bad annotations: " + b1.getMetadata().getAnnotations()); return 0; } if (b2.getMetadata().getAnnotations() == null - || b2.getMetadata().getAnnotations() - .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { - logger.warning("cannot compare build " - + b2.getMetadata().getName() - + " from namespace " - + b2.getMetadata().getNamespace() - + ", has bad annotations: " + || b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { + logger.warning("cannot compare build " + b2.getMetadata().getName() + " from namespace " + + b2.getMetadata().getNamespace() + ", has bad annotations: " + b2.getMetadata().getAnnotations()); return 0; } @@ -230,14 +194,10 @@ public int compare(Build b1, Build b2) { try { rc = Long.compare( - Long.parseLong(b1 - .getMetadata() - .getAnnotations() - .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)), - Long.parseLong(b2 - .getMetadata() - .getAnnotations() - .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER))); + Long.parseLong( + b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)), + Long.parseLong( + b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER))); } catch (Throwable t) { logger.log(Level.FINE, "onInitialBuilds", t); } @@ -248,8 +208,7 @@ public int compare(Build b1, Build b2) { // We need to sort the builds into their build configs so we can // handle build run policies correctly. Map buildConfigMap = new HashMap<>(); - Map> buildConfigBuildMap = new HashMap<>( - items.size()); + Map> buildConfigBuildMap = new HashMap<>(items.size()); for (Build b : items) { if (!OpenShiftUtils.isPipelineStrategyBuild(b)) continue; @@ -261,9 +220,8 @@ public int compare(Build b1, Build b2) { String bcMapKey = namespace + "/" + buildConfigName; BuildConfig bc = buildConfigMap.get(bcMapKey); if (bc == null) { - bc = getAuthenticatedOpenShiftClient().buildConfigs() - .inNamespace(namespace).withName(buildConfigName) - .get(); + bc = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace) + .withName(buildConfigName).get(); if (bc == null) { // if the bc is not there via a REST get, then it is not // going to be, and we are not handling manual creation @@ -281,8 +239,7 @@ public int compare(Build b1, Build b2) { } // Now handle the builds. - for (Map.Entry> buildConfigBuilds : buildConfigBuildMap - .entrySet()) { + for (Map.Entry> buildConfigBuilds : buildConfigBuildMap.entrySet()) { BuildConfig bc = buildConfigBuilds.getKey(); if (bc.getMetadata() == null) { // Should never happen but let's be safe... @@ -292,21 +249,16 @@ public int compare(Build b1, Build b2) { if (job == null) { List builds = buildConfigBuilds.getValue(); for (Build b : builds) { - logger.info("skipping listed new build " - + b.getMetadata().getName() - + " no job at this time"); + logger.info("skipping listed new build " + b.getMetadata().getName() + " no job at this time"); addBuildToNoBCList(b); } continue; } - BuildConfigProjectProperty bcp = job - .getProperty(BuildConfigProjectProperty.class); + BuildConfigProjectProperty bcp = job.getProperty(BuildConfigProjectProperty.class); if (bcp == null) { List builds = buildConfigBuilds.getValue(); for (Build b : builds) { - logger.info("skipping listed new build " - + b.getMetadata().getName() - + " no prop at this time"); + logger.info("skipping listed new build " + b.getMetadata().getName() + " no prop at this time"); addBuildToNoBCList(b); } continue; @@ -332,8 +284,7 @@ private static void modifyEventToJenkinsJobRun(Build build) { } } - public static boolean addEventToJenkinsJobRun(Build build) - throws IOException { + public static boolean addEventToJenkinsJobRun(Build build) throws IOException { // should have been caught upstack, but just in case since public method if (!OpenShiftUtils.isPipelineStrategyBuild(build)) return false; @@ -352,8 +303,7 @@ public static boolean addEventToJenkinsJobRun(Build build) if (job != null) { return triggerJob(job, build); } - logger.info("skipping watch event for build " - + build.getMetadata().getName() + " no job at this time"); + logger.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time"); addBuildToNoBCList(build); return false; } @@ -363,25 +313,24 @@ private static void addBuildToNoBCList(Build build) { if (!OpenShiftUtils.isPipelineStrategyBuild(build)) return; try { - buildsWithNoBCList.put(build.getMetadata().getNamespace()+build.getMetadata().getName(), build); - } catch (ConcurrentModificationException | IllegalArgumentException | - UnsupportedOperationException | NullPointerException e) { - logger.log(Level.WARNING,"Failed to add item " + - build.getMetadata().getName(), e); + buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build); + } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException + | NullPointerException e) { + logger.log(Level.WARNING, "Failed to add item " + build.getMetadata().getName(), e); } } private static void removeBuildFromNoBCList(Build build) { - buildsWithNoBCList.remove(build.getMetadata().getNamespace()+build.getMetadata().getName()); + buildsWithNoBCList.remove(build.getMetadata().getNamespace() + build.getMetadata().getName()); } // trigger any builds whose watch events arrived before the // corresponding build config watch events public static void flushBuildsWithNoBCList() { - - ConcurrentHashMap clone = null; - synchronized(buildsWithNoBCList) { - clone = new ConcurrentHashMap(buildsWithNoBCList); + + ConcurrentHashMap clone = null; + synchronized (buildsWithNoBCList) { + clone = new ConcurrentHashMap(buildsWithNoBCList); } boolean anyRemoveFailures = false; for (Build build : clone.values()) { @@ -394,7 +343,7 @@ public static void flushBuildsWithNoBCList() { logger.log(Level.WARNING, "flushBuildsWithNoBCList", e); } try { - synchronized(buildsWithNoBCList) { + synchronized (buildsWithNoBCList) { removeBuildFromNoBCList(build); } } catch (Throwable t) { @@ -409,12 +358,12 @@ public static void flushBuildsWithNoBCList() { logger.log(Level.WARNING, "flushBuildsWithNoBCList", t); } } - - synchronized(buildsWithNoBCList) { + + synchronized (buildsWithNoBCList) { if (anyRemoveFailures && buildsWithNoBCList.size() > 0) { buildsWithNoBCList.clear(); - } - + } + } } } @@ -422,17 +371,15 @@ public static void flushBuildsWithNoBCList() { // innerDeleteEventToJenkinsJobRun is the actual delete logic at the heart // of deleteEventToJenkinsJobRun // that is either in a sync block or not based on the presence of a BC uid - private static void innerDeleteEventToJenkinsJobRun( - final Build build) throws Exception { + private static void innerDeleteEventToJenkinsJobRun(final Build build) throws Exception { final WorkflowJob job = getJobFromBuild(build); if (job != null) { - ACL.impersonate(ACL.SYSTEM, - new NotReallyRoleSensitiveCallable() { - @Override - public Void call() throws Exception { - cancelBuild(job, build, true); - return null; - } + ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { + @Override + public Void call() throws Exception { + cancelBuild(job, build, true); + return null; + } }); } else { // in case build was created and deleted quickly, prior to seeing BC @@ -451,14 +398,11 @@ public Void call() throws Exception { // delete events and build delete events that arrive concurrently and in a // nondeterministic // order - private static void deleteEventToJenkinsJobRun( - final Build build) throws Exception { - List ownerRefs = build.getMetadata() - .getOwnerReferences(); + private static void deleteEventToJenkinsJobRun(final Build build) throws Exception { + List ownerRefs = build.getMetadata().getOwnerReferences(); String bcUid = null; for (OwnerReference ref : ownerRefs) { - if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null - && ref.getUid().length() > 0) { + if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null && ref.getUid().length() > 0) { // employ intern to facilitate sync'ing on the same actual // object bcUid = ref.getUid().intern(); @@ -466,7 +410,7 @@ private static void deleteEventToJenkinsJobRun( // if entire job already deleted via bc delete, just return if (getJobFromBuildConfigNameNamespace(getAnnotation(build, BUILDCONFIG_NAME), build.getMetadata().getNamespace()) == null) { - return; + return; } innerDeleteEventToJenkinsJobRun(build); return; @@ -478,41 +422,40 @@ private static void deleteEventToJenkinsJobRun( innerDeleteEventToJenkinsJobRun(build); } - /** - * Reconciles Jenkins job runs and OpenShift builds - * - * Deletes all job runs that do not have an associated build in OpenShift - */ - private static void reconcileRunsAndBuilds() { - logger.info("Reconciling job runs and builds"); - - List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); - - for (WorkflowJob job : jobs) { - BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); - if (property == null || StringUtils.isBlank(property.getNamespace()) || StringUtils.isBlank(property.getName())) { - continue; - } - - logger.info("Checking job " + job.toString() + " runs for BuildConfig " + property.getNamespace() + "/" + property.getName()); - - BuildList buildList = getAuthenticatedOpenShiftClient().builds() - .inNamespace(property.getNamespace()).withLabel("buildconfig=" + property.getName()).list(); - - for (WorkflowRun run : job.getBuilds()) { - boolean found = false; - BuildCause cause = run.getCause(BuildCause.class); - for (Build build : buildList.getItems()) { - if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) { - found = true; - break; - } - } - if (!found) { - deleteRun(run); + /** + * Reconciles Jenkins job runs and OpenShift builds + * + * Deletes all job runs that do not have an associated build in OpenShift + */ + private static void reconcileRunsAndBuilds() { + logger.fine("Reconciling job runs and builds"); + List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); + for (WorkflowJob job : jobs) { + BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); + if (property != null) { + String ns = property.getNamespace(); + String name = property.getName(); + if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) { + logger.fine("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list(); + for (WorkflowRun run : job.getBuilds()) { + boolean found = false; + BuildCause cause = run.getCause(BuildCause.class); + for (Build build : builds.getItems()) { + if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) { + found = true; + break; + } + } + if (!found) { + deleteRun(run); + } + } + } + } + } - } } - } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index 1c57f3fea..af4b4b4cb 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -15,28 +15,31 @@ */ package io.fabric8.jenkins.openshiftsync; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import hudson.triggers.SafeTimerTask; -import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapList; -import io.fabric8.kubernetes.client.Watcher.Action; - -import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; +import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Logger; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static java.util.logging.Level.SEVERE; -import static java.util.logging.Level.WARNING; +import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ConfigMapList; +import io.fabric8.openshift.client.OpenShiftClient; -public class ConfigMapWatcher extends BaseWatcher { +public class ConfigMapWatcher extends BaseWatcher { private final Logger LOGGER = Logger.getLogger(getClass().getName()); @SuppressFBWarnings("EI_EXPOSE_REP2") - public ConfigMapWatcher(String[] namespaces) { - super(namespaces); + public ConfigMapWatcher(String namespace) { + super(namespace); } @Override @@ -44,49 +47,35 @@ public int getListIntervalInSeconds() { return GlobalPluginConfiguration.get().getConfigMapListInterval(); } - public Runnable getStartTimerTask() { - return new SafeTimerTask() { - @Override - public void doRun() { - if (!CredentialsUtils.hasCredentials()) { - LOGGER.fine("No Openshift Token credential defined."); - return; - } - for (String namespace : namespaces) { - addWatchForNamespace(namespace); - } - } - }; - } - - public void addWatchForNamespace(String namespace) { + public void start() { + LOGGER.info("Now handling startup config maps for " + namespace + " !!"); ConfigMapList configMaps = null; + String ns = this.namespace; try { LOGGER.fine("listing ConfigMap resources"); - configMaps = getAuthenticatedOpenShiftClient() - .configMaps().inNamespace(namespace).list(); - onInitialConfigMaps(configMaps); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + configMaps = client.configMaps().inNamespace(ns).list(); + onInitialConfigMaps(configMaps); LOGGER.fine("handled ConfigMap resources"); } catch (Exception e) { LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); } try { - String resourceVersion = "0"; + String rv = "0"; if (configMaps == null) { LOGGER.warning("Unable to get config map list; impacts resource version used for watch"); } else { - resourceVersion = configMaps.getMetadata().getResourceVersion(); + rv = configMaps.getMetadata().getResourceVersion(); } - if (watches.get(namespace) == null) { - LOGGER.info("creating ConfigMap watch for namespace " - + namespace - + " and resource version " - + resourceVersion); - addWatch(namespace, - getAuthenticatedOpenShiftClient() - .configMaps() - .inNamespace(namespace) - .withResourceVersion(resourceVersion).watch(new WatcherCallback(ConfigMapWatcher.this,namespace))); + + if (this.watch == null) { + synchronized (this.lock) { + if (this.watch == null) { + LOGGER.info("creating ConfigMap watch for namespace " + ns + " and resource version " + rv); + OpenShiftClient client = getOpenshiftClient(); + this.watch = client.configMaps().inNamespace(ns).withResourceVersion(rv).watch(this); + } + } } } catch (Exception e) { LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); @@ -96,16 +85,11 @@ public void addWatchForNamespace(String namespace) { public void startAfterOnClose(String namespace) { synchronized (this.lock) { - addWatchForNamespace(namespace); + start(); } } - public void start() { - super.start(); - // lets process the initial state - LOGGER.info("Now handling startup config maps!!"); - } - + @Override public void eventReceived(Action action, ConfigMap configMap) { try { List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); @@ -116,31 +100,27 @@ public void eventReceived(Action action, ConfigMap configMap) { switch (action) { case ADDED: if (hasSlaves) { - processSlavesForAddEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForAddEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } break; case MODIFIED: - processSlavesForModifyEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); break; case DELETED: - this.processSlavesForDeleteEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); break; case ERROR: LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received error event "); break; default: - LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received unknown event " + action); + LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received unknown event " + + action); break; } } catch (Exception e) { LOGGER.log(WARNING, "Caught: " + e, e); } } - @Override - public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) { - ConfigMap cfgmap = (ConfigMap)resource; - eventReceived(action, cfgmap); - } private void onInitialConfigMaps(ConfigMapList configMaps) { if (configMaps == null) @@ -152,16 +132,16 @@ private void onInitialConfigMaps(ConfigMapList configMaps) { if (items != null) { for (ConfigMap configMap : items) { try { - if (PodTemplateUtils.configMapContainsSlave(configMap) && !PodTemplateUtils.trackedPodTemplates.containsKey(configMap.getMetadata().getUid())) { + if (PodTemplateUtils.configMapContainsSlave(configMap) + && !PodTemplateUtils.trackedPodTemplates.containsKey(configMap.getMetadata().getUid())) { List templates = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); PodTemplateUtils.trackedPodTemplates.put(configMap.getMetadata().getUid(), templates); for (PodTemplate podTemplate : templates) { - PodTemplateUtils.addPodTemplate(podTemplate); + PodTemplateUtils.addPodTemplate(podTemplate); } } } catch (Exception e) { - LOGGER.log(SEVERE, - "Failed to update ConfigMap PodTemplates", e); + LOGGER.log(SEVERE, "Failed to update ConfigMap PodTemplates", e); } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java index 4b747dbc4..3658cee5f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java @@ -22,6 +22,9 @@ import static java.util.logging.Level.SEVERE; import static jenkins.model.Jenkins.ADMINISTER; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ScheduledFuture; import java.util.logging.Logger; import org.apache.commons.lang.StringUtils; @@ -34,6 +37,7 @@ import hudson.Util; import hudson.util.ListBoxModel; import io.fabric8.kubernetes.client.KubernetesClientException; +import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.GlobalConfiguration; import jenkins.model.Jenkins; import jenkins.util.Timer; @@ -64,6 +68,14 @@ public class GlobalPluginConfiguration extends GlobalConfiguration { private transient ConfigMapWatcher configMapWatcher; private transient ImageStreamWatcher imageStreamWatcher; + private final static List> watchers = new ArrayList<>(); + + private transient ScheduledFuture schedule; + + public final static List> getWatchers() { + return watchers; + } + @DataBoundConstructor public GlobalPluginConfiguration(boolean enable, String server, String namespace, boolean foldersEnabled, String credentialsId, String jobNamePattern, String skipOrganizationPrefix, String skipBranchSuffix, @@ -261,37 +273,40 @@ void setImageStreamWatcher(ImageStreamWatcher imageStreamWatcher) { private synchronized void configChange() { logger.info("OpenShift Sync Plugin processing a newly supplied configuration"); - if (this.buildConfigWatcher != null) { - this.buildConfigWatcher.stop(); - } - if (this.buildWatcher != null) { - this.buildWatcher.stop(); + synchronized (watchers) { + logger.info("Existing watchers: " + watchers); + for (BaseWatcher watch : watchers) { + watch.stop(); + } + watchers.clear(); + logger.info("Existing watchers: stopped and cleared : " + watchers); + logger.info("Existing scheduled task: " + schedule); + + if (this.schedule != null && !this.schedule.isCancelled()) { + this.schedule.cancel(true); + logger.info("Existing scheduled task cancelled: " + schedule); + } } - if (this.configMapWatcher != null) { - this.configMapWatcher.stop(); - } - if (this.imageStreamWatcher != null) { - this.imageStreamWatcher.stop(); - } - if (this.secretWatcher != null) { - this.secretWatcher.stop(); - } - this.buildWatcher = null; - this.buildConfigWatcher = null; - this.configMapWatcher = null; - this.imageStreamWatcher = null; - this.secretWatcher = null; + + OpenShiftClient client = OpenShiftUtils.getOpenShiftClient(); + logger.info("Shutting down OpenShift Client: " + client + " ..."); OpenShiftUtils.shutdownOpenShiftClient(); + logger.info("!!! OpenShift Client has been shutdown "); if (!this.enabled) { logger.info("OpenShift Sync Plugin has been disabled"); return; } try { + logger.info("Initializing OpenShift Client..."); OpenShiftUtils.initializeOpenShiftClient(this.server); - this.namespaces = getNamespaceOrUseDefault(this.namespaces, getOpenShiftClient()); + OpenShiftClient openShiftClient = getOpenShiftClient(); + this.namespaces = getNamespaceOrUseDefault(this.namespaces, openShiftClient); + logger.info("OpenShift Client initialized: " + openShiftClient); + Runnable task = new GlobalPluginConfigurationTimerTask(this); - Timer.get().schedule(task, 1, SECONDS); // lets give jenkins a while to get started ;) + // lets give jenkins a while to get started ;) + this.schedule = Timer.get().schedule(task, 1, SECONDS); } catch (KubernetesClientException e) { Throwable exceptionOrCause = (e.getCause() != null) ? e.getCause() : e; logger.log(SEVERE, "Failed to configure OpenShift Jenkins Sync Plugin: " + exceptionOrCause); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index 94020c586..c59a36dec 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -2,6 +2,8 @@ import static hudson.init.InitMilestone.COMPLETED; +import java.util.ArrayList; +import java.util.List; import java.util.logging.Logger; import hudson.init.InitMilestone; @@ -30,36 +32,52 @@ protected void doRun() throws Exception { if (initLevel == COMPLETED) { break; } - logger.fine("Jenkins not ready..."); + logger.info("Jenkins not ready..."); try { Thread.sleep(500); } catch (InterruptedException e) { - // ignore + logger.info("Interrupted while sleeping"); } } - intializeAndStartWatchers(); - } - - private void intializeAndStartWatchers() { + logger.info("Initializing all the watchers..."); String[] namespaces = globalPluginConfiguration.getNamespaces(); - BuildConfigWatcher buildConfigWatcher = new BuildConfigWatcher(namespaces); - globalPluginConfiguration.setBuildConfigWatcher(buildConfigWatcher); - buildConfigWatcher.start(); + List> watchers = new ArrayList<>(); + for (String namespace : namespaces) { + BuildConfigWatcher buildConfigWatcher = new BuildConfigWatcher(namespace); + watchers.add(buildConfigWatcher); + buildConfigWatcher.start(); - BuildWatcher buildWatcher = new BuildWatcher(namespaces); - globalPluginConfiguration.setBuildWatcher(buildWatcher); - buildWatcher.start(); + BuildWatcher buildWatcher = new BuildWatcher(namespace); + buildWatcher.start(); + watchers.add(buildWatcher); - ConfigMapWatcher configMapWatcher = new ConfigMapWatcher(namespaces); - globalPluginConfiguration.setConfigMapWatcher(configMapWatcher); - configMapWatcher.start(); + ConfigMapWatcher configMapWatcher = new ConfigMapWatcher(namespace); + configMapWatcher.start(); + watchers.add(configMapWatcher); - ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespaces); - globalPluginConfiguration.setImageStreamWatcher(imageStreamWatcher); - imageStreamWatcher.start(); + ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespace); + imageStreamWatcher.start(); + watchers.add(imageStreamWatcher); - SecretWatcher secretWatcher = new SecretWatcher(namespaces); - globalPluginConfiguration.setSecretWatcher(secretWatcher); - secretWatcher.start(); + SecretWatcher secretWatcher = new SecretWatcher(namespace); + secretWatcher.start(); + watchers.add(secretWatcher); + + } + logger.info("All the watchers have been initialized!!"); + synchronized (watchers) { + List> globalWatchers = GlobalPluginConfiguration.getWatchers(); + synchronized (globalWatchers) { + logger.info("Existing watchers: " + globalWatchers); + for (BaseWatcher watch : globalWatchers) { + watch.stop(); + } + globalWatchers.clear(); + logger.info("Existing watchers: stopped and cleared : " + globalWatchers); + globalWatchers.addAll(watchers); + logger.info("New watchers created : " + globalWatchers.size()); + + } + } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java index 1f65f86b6..28d8c79e0 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java @@ -15,6 +15,14 @@ */ package io.fabric8.jenkins.openshiftsync; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; import static java.util.logging.Level.SEVERE; import static java.util.logging.Level.WARNING; @@ -24,18 +32,17 @@ import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import hudson.triggers.SafeTimerTask; import io.fabric8.kubernetes.api.model.ObjectMeta; -import io.fabric8.kubernetes.client.Watcher.Action; import io.fabric8.openshift.api.model.ImageStream; import io.fabric8.openshift.api.model.ImageStreamList; +import io.fabric8.openshift.client.OpenShiftClient; -public class ImageStreamWatcher extends BaseWatcher { - private final Logger logger = Logger.getLogger(getClass().getName()); +public class ImageStreamWatcher extends BaseWatcher { + private final Logger logger = Logger.getLogger(getClass().getName()); @SuppressFBWarnings("EI_EXPOSE_REP2") - public ImageStreamWatcher(String[] namespaces) { - super(namespaces); + public ImageStreamWatcher(String namespace) { + super(namespace); } @Override @@ -43,43 +50,33 @@ public int getListIntervalInSeconds() { return GlobalPluginConfiguration.get().getImageStreamListInterval(); } - public Runnable getStartTimerTask() { - return new SafeTimerTask() { - @Override - public void doRun() { - if (!CredentialsUtils.hasCredentials()) { - logger.fine("No Openshift Token credential defined."); - return; - } - for (String namespace : namespaces) { - addWatchForNamespace(namespace); - } - } - }; - } - - public void addWatchForNamespace(String namespace) { + public void start() { + logger.info("Now handling startup image streams for " + namespace + " !!"); ImageStreamList imageStreams = null; + String ns = this.namespace; try { logger.fine("listing ImageStream resources"); - imageStreams = OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(namespace).list(); + imageStreams = getAuthenticatedOpenShiftClient().imageStreams().inNamespace(ns).list(); onImageStreamInitialization(imageStreams); logger.fine("handled ImageStream resources"); } catch (Exception e) { logger.log(SEVERE, "Failed to load ImageStreams: " + e, e); } try { - String resourceVersion = "0"; + String rv = "0"; if (imageStreams == null) { logger.warning("Unable to get image stream list; impacts resource version used for watch"); } else { - resourceVersion = imageStreams.getMetadata().getResourceVersion(); + rv = imageStreams.getMetadata().getResourceVersion(); } - if (watches.get(namespace) == null) { - logger.info("creating ImageStream watch for namespace " + namespace + " and resource version " + resourceVersion); - ImageStreamWatcher w = ImageStreamWatcher.this; - WatcherCallback watcher = new WatcherCallback(w, namespace); - addWatch(namespace, OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(namespace).withResourceVersion(resourceVersion).watch(watcher)); + if (this.watch == null) { + synchronized (this.lock) { + if (this.watch == null) { + logger.info("creating ImageStream watch for namespace " + ns + " and resource version " + rv); + OpenShiftClient client = getOpenshiftClient(); + this.watch = client.imageStreams().inNamespace(ns).withResourceVersion(rv).watch(this); + } + } } } catch (Exception e) { logger.log(SEVERE, "Failed to load ImageStreams: " + e, e); @@ -88,16 +85,11 @@ public void addWatchForNamespace(String namespace) { public void startAfterOnClose(String namespace) { synchronized (this.lock) { - addWatchForNamespace(namespace); + start(); } } - public void start() { - // lets process the initial state - logger.info("Now handling startup image streams!!"); - super.start(); - } - + @Override public void eventReceived(Action action, ImageStream imageStream) { try { List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(imageStream); @@ -107,13 +99,13 @@ public void eventReceived(Action action, ImageStream imageStream) { String namespace = metadata.getNamespace(); switch (action) { case ADDED: - processSlavesForAddEvent(slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); + processSlavesForAddEvent(this, slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); break; case MODIFIED: - processSlavesForModifyEvent(slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); + processSlavesForModifyEvent(this, slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); break; case DELETED: - processSlavesForDeleteEvent(slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); + processSlavesForDeleteEvent(this, slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); break; case ERROR: logger.warning("watch for imageStream " + name + " received error event "); @@ -127,24 +119,18 @@ public void eventReceived(Action action, ImageStream imageStream) { } } - @Override - public void eventReceived(Action action, T resource) { - ImageStream imageStream = (ImageStream) resource; - eventReceived(action, imageStream); - } - - private void onImageStreamInitialization(ImageStreamList imageStreams) { + private void onImageStreamInitialization(ImageStreamList imageStreams) { if (imageStreams != null) { List items = imageStreams.getItems(); if (items != null) { for (ImageStream imageStream : items) { try { - List agents = PodTemplateUtils.getPodTemplatesListFromImageStreams(imageStream); + List agents = getPodTemplatesListFromImageStreams(imageStream); for (PodTemplate entry : agents) { // watch event might beat the timer - put call is technically fine, but not // addPodTemplate given k8s plugin issues - if (!PodTemplateUtils.hasPodTemplate(entry)) { - PodTemplateUtils.addPodTemplate(entry); + if (!hasPodTemplate(entry)) { + addPodTemplate(entry); } } } catch (Exception e) { @@ -154,5 +140,4 @@ private void onImageStreamInitialization(ImageStreamList imageStreams) { } } } - } \ No newline at end of file diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java index 14a4dbe1d..28b1abec8 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java @@ -1,6 +1,23 @@ package io.fabric8.jenkins.openshiftsync; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static java.util.logging.Level.FINE; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud; +import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.csanchez.jenkins.plugins.kubernetes.PodVolumes; + import com.thoughtworks.xstream.XStreamException; + import hudson.util.XStream2; import io.fabric8.kubernetes.api.model.ConfigMap; import io.fabric8.kubernetes.api.model.ObjectMeta; @@ -11,411 +28,467 @@ import io.fabric8.openshift.api.model.ImageStreamTag; import io.fabric8.openshift.api.model.TagReference; import jenkins.model.Jenkins; -import org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud; -import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; -import org.csanchez.jenkins.plugins.kubernetes.PodVolumes; -import org.csanchez.jenkins.plugins.kubernetes.model.KeyValueEnvVar; -import org.csanchez.jenkins.plugins.kubernetes.model.TemplateEnvVar; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static java.util.logging.Level.FINE; public class PodTemplateUtils { - protected static final String cmType = "ConfigMap"; - protected static final String isType = "ImageStream"; - static final String IMAGESTREAM_TYPE = isType; - private static final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name"; - private static final String PT_NOT_OWNED = "The event for %s | %s | %s that no longer includes the pod template %s was ignored because the type %s was associated with that pod template"; - private static final Logger LOGGER = Logger.getLogger(PodTemplateUtils.class.getName()); - private static final String PARAM_FROM_ENV_DESCRIPTION = "From OpenShift Build Environment Variable"; - static final String SLAVE_LABEL = "slave-label"; - private static final String SPECIAL_IST_PREFIX = "imagestreamtag:"; - private static final int SPECIAL_IST_PREFIX_IDX = SPECIAL_IST_PREFIX.length(); - protected static ConcurrentHashMap> trackedPodTemplates = new ConcurrentHashMap>(); - protected static ConcurrentHashMap podTemplateToApiType = new ConcurrentHashMap(); - - protected static boolean hasOneAndOnlyOneWithSomethingAfter(String str, String substr) { - return str.contains(substr) - && str.indexOf(substr) == str.lastIndexOf(substr) - && str.indexOf(substr) < str.length(); - } - - public static PodTemplate podTemplateInit(String name, String image, String label) { - LOGGER.info("Initializing PodTemplate: "+name); - PodTemplate podTemplate = new PodTemplate(image, new ArrayList()); - // with the above ctor guarnateed to have 1 container - // also still force our image as the special case "jnlp" container for - // the KubernetesSlave; - // attempts to use the "jenkinsci/jnlp-slave:alpine" image for a - // separate jnlp container - // have proved unsuccessful (could not access gihub.com for example) - podTemplate.getContainers().get(0).setName("jnlp"); - // podTemplate.setInstanceCap(Integer.MAX_VALUE); - podTemplate.setName(name); - podTemplate.setLabel(label); - podTemplate.setAlwaysPullImage(true); - podTemplate.setCommand(""); - podTemplate.setArgs("${computer.jnlpmac} ${computer.name}"); - podTemplate.setRemoteFs("/tmp"); - String podName = System.getenv().get("HOSTNAME"); - if (podName != null) { - Pod pod = getAuthenticatedOpenShiftClient().pods().withName(podName).get(); - if (pod != null) { - podTemplate.setServiceAccount(pod.getSpec().getServiceAccountName()); - } + protected static final String cmType = "ConfigMap"; + protected static final String isType = "ImageStream"; + static final String IMAGESTREAM_TYPE = isType; + private static final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name"; + private static final String PT_NOT_OWNED = "The event for %s | %s | %s that no longer includes the pod template %s was ignored because the type %s was associated with that pod template"; + private static final Logger LOGGER = Logger.getLogger(PodTemplateUtils.class.getName()); + private static final String PARAM_FROM_ENV_DESCRIPTION = "From OpenShift Build Environment Variable"; + static final String SLAVE_LABEL = "slave-label"; + private static final String SPECIAL_IST_PREFIX = "imagestreamtag:"; + private static final int SPECIAL_IST_PREFIX_IDX = SPECIAL_IST_PREFIX.length(); + protected static ConcurrentHashMap> trackedPodTemplates = new ConcurrentHashMap>(); + protected static ConcurrentHashMap podTemplateToApiType = new ConcurrentHashMap(); + + protected static boolean hasOneAndOnlyOneWithSomethingAfter(String str, String substr) { + return str.contains(substr) && str.indexOf(substr) == str.lastIndexOf(substr) + && str.indexOf(substr) < str.length(); } - return podTemplate; - } - - - public static void removePodTemplate(PodTemplate podTemplate) { - KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); - if (kubeCloud != null) { - LOGGER.info("Removing PodTemplate: " + podTemplate.getName()); - // NOTE - PodTemplate does not currently override hashCode, equals, - // so - // the KubernetsCloud.removeTemplate currently is broken; - // kubeCloud.removeTemplate(podTemplate); - List list = kubeCloud.getTemplates(); - Iterator iter = list.iterator(); - while (iter.hasNext()) { - PodTemplate pt = iter.next(); - if (pt.getName().equals(podTemplate.getName())) { - iter.remove(); + + public static PodTemplate podTemplateInit(String name, String image, String label) { + LOGGER.info("Initializing PodTemplate: " + name); + PodTemplate podTemplate = new PodTemplate(image, new ArrayList()); + // with the above ctor guarnateed to have 1 container + // also still force our image as the special case "jnlp" container for + // the KubernetesSlave; + // attempts to use the "jenkinsci/jnlp-slave:alpine" image for a + // separate jnlp container + // have proved unsuccessful (could not access gihub.com for example) + podTemplate.getContainers().get(0).setName("jnlp"); + // podTemplate.setInstanceCap(Integer.MAX_VALUE); + podTemplate.setName(name); + podTemplate.setLabel(label); + podTemplate.setAlwaysPullImage(true); + podTemplate.setCommand(""); + podTemplate.setArgs("${computer.jnlpmac} ${computer.name}"); + podTemplate.setRemoteFs("/tmp"); + String podName = System.getenv().get("HOSTNAME"); + if (podName != null) { + Pod pod = getAuthenticatedOpenShiftClient().pods().withName(podName).get(); + if (pod != null) { + podTemplate.setServiceAccount(pod.getSpec().getServiceAccountName()); + } } - } - // now set new list back into cloud - kubeCloud.setTemplates(list); - try { - // pedantic mvn:findbugs - Jenkins jenkins = Jenkins.getInstance(); - if (jenkins != null) - jenkins.save(); - } catch (IOException e) { - LOGGER.log(Level.SEVERE, "removePodTemplate", e); - } - - if (LOGGER.isLoggable(Level.FINE)) { - LOGGER.fine("PodTemplates now:"); - for (PodTemplate pt : kubeCloud.getTemplates()) { - LOGGER.fine(pt.getName()); + return podTemplate; + } + + public static void removePodTemplate(PodTemplate podTemplate) { + KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); + if (kubeCloud != null) { + LOGGER.info("Removing PodTemplate: " + podTemplate.getName()); + // NOTE - PodTemplate does not currently override hashCode, equals, + // so + // the KubernetsCloud.removeTemplate currently is broken; + // kubeCloud.removeTemplate(podTemplate); + List list = kubeCloud.getTemplates(); + Iterator iter = list.iterator(); + while (iter.hasNext()) { + PodTemplate pt = iter.next(); + if (pt.getName().equals(podTemplate.getName())) { + iter.remove(); + } + } + // now set new list back into cloud + kubeCloud.setTemplates(list); + try { + // pedantic mvn:findbugs + Jenkins jenkins = Jenkins.getInstance(); + if (jenkins != null) + jenkins.save(); + } catch (IOException e) { + LOGGER.log(Level.SEVERE, "removePodTemplate", e); + } + + if (LOGGER.isLoggable(Level.FINE)) { + LOGGER.fine("PodTemplates now:"); + for (PodTemplate pt : kubeCloud.getTemplates()) { + LOGGER.fine(pt.getName()); + } + } } - } } - } - - public static synchronized List getPodTemplates() { - KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); - if (kubeCloud != null) { - // create copy of list for more flexiblity in loops - ArrayList list = new ArrayList(); - list.addAll(kubeCloud.getTemplates()); - return list; - } else { - return null; + + public static synchronized List getPodTemplates() { + KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); + if (kubeCloud != null) { + // create copy of list for more flexiblity in loops + ArrayList list = new ArrayList(); + list.addAll(kubeCloud.getTemplates()); + return list; + } else { + return null; + } } - } - - public static synchronized boolean hasPodTemplate(PodTemplate incomingPod) { - String name = incomingPod.getName(); - if (name == null) - return false; - String image = incomingPod.getImage(); - if (image == null) - return false; - KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); - if (kubeCloud != null) { - List list = kubeCloud.getTemplates(); - for (PodTemplate pod : list) { - if (name.equals(pod.getName()) && image.equals(pod.getImage())) - return true; - } + + public static synchronized boolean hasPodTemplate(PodTemplate incomingPod) { + String name = incomingPod.getName(); + if (name == null) + return false; + String image = incomingPod.getImage(); + if (image == null) + return false; + KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); + if (kubeCloud != null) { + List list = kubeCloud.getTemplates(); + for (PodTemplate pod : list) { + if (name.equals(pod.getName()) && image.equals(pod.getImage())) + return true; + } + } + return false; } - return false; - } - - public static synchronized void addPodTemplate(PodTemplate podTemplate) { - // clear out existing template with same name; k8s plugin maintains - // list, not map - removePodTemplate(podTemplate); - KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); - if (kubeCloud != null) { - LOGGER.info("Adding PodTemplate: " + podTemplate.getName()); - kubeCloud.addTemplate(podTemplate); - try { - // pedantic mvn:findbugs - Jenkins jenkins = Jenkins.getInstance(); - if (jenkins != null) - jenkins.save(); - } catch (IOException e) { - LOGGER.log(Level.SEVERE, "addPodTemplate", e); - } + + public static synchronized void addPodTemplate(PodTemplate podTemplate) { + // clear out existing template with same name; k8s plugin maintains + // list, not map + removePodTemplate(podTemplate); + KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); + if (kubeCloud != null) { + LOGGER.info("Adding PodTemplate: " + podTemplate.getName()); + kubeCloud.addTemplate(podTemplate); + try { + // pedantic mvn:findbugs + Jenkins jenkins = Jenkins.getInstance(); + if (jenkins != null) + jenkins.save(); + } catch (IOException e) { + LOGGER.log(Level.SEVERE, "addPodTemplate", e); + } + } } - } - - protected static void purgeTemplates(BaseWatcher baseWatcher, String type, String uid, String apiObjName, String namespace) { - LOGGER.info("Purging PodTemplates for from Configmap with Uid "+uid); - for (PodTemplate podTemplate : trackedPodTemplates.get(uid)) { - // we should not have included any pod templates we did not - // mark the type for, but we'll check just in case - removePodTemplate(LOGGER, PT_NOT_OWNED, type, apiObjName, namespace, podTemplate); + + protected static void purgeTemplates(BaseWatcher baseWatcher, String type, String uid, String apiObjName, + String namespace) { + LOGGER.info("Purging PodTemplates for from Configmap with Uid " + uid); + for (PodTemplate podTemplate : trackedPodTemplates.get(uid)) { + // we should not have included any pod templates we did not + // mark the type for, but we'll check just in case + removePodTemplate(LOGGER, PT_NOT_OWNED, type, apiObjName, namespace, podTemplate); + } + trackedPodTemplates.remove(uid); } - trackedPodTemplates.remove(uid); - } - - protected static void updateTrackedPodTemplatesMap(String uid, List finalSlaveList) { - if (finalSlaveList != null && finalSlaveList.size() > 0) - trackedPodTemplates.put(uid, finalSlaveList); - } - - // Adds PodTemplate to the List correspoding to the ConfigMap of given uid - protected static void trackPodTemplates(String uid, List podTemplatesToTrack) { - trackedPodTemplates.put(uid, podTemplatesToTrack); - } - - // Adds PodTemplate to the List correspoding to the ConfigMap of given uid and Deletes from Jenkins - protected static List onlyTrackPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, String namespace, List podTemplates, PodTemplate podTemplate) { - String name = podTemplate.getName(); - // we allow configmap overrides of maven and nodejs, but not imagestream ones - // as they are less specific/defined wrt podTemplate fields - - if (isReservedPodTemplateName(name) && isType.equals(type)) - return null; - // for imagestreams, if the core image has not changed, we avoid - // the remove/add pod template churn and multiple imagestream events - // come in for activity that does not affect the pod template - if (type.equals(isType) && hasPodTemplate(podTemplate)) - return null; - // once a CM or IS claims a name, it gets to keep it until it is remove or un-labeled - String ret = podTemplateToApiType.putIfAbsent(name, type); - // if not set, or previously set by an obj of the same type - if (ret == null || ret.equals(type)) { - removePodTemplate(podTemplate); - podTemplates.add(podTemplate); - } else { - LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret)); + + protected static void updateTrackedPodTemplatesMap(String uid, List finalSlaveList) { + if (finalSlaveList != null && finalSlaveList.size() > 0) + trackedPodTemplates.put(uid, finalSlaveList); } - return podTemplates; - } - - // Adds PodTemplate from Jenkins - protected static void addPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, String namespace, List podTemplates, PodTemplate podTemplate) { - String name = podTemplate.getName(); - // we allow configmap overrides of maven and nodejs, but not imagestream ones - // as they are less specific/defined wrt podTemplate fields - if (apiObjName != null && namespace != null && podTemplates != null){ - if (isReservedPodTemplateName(name) && isType.equals(type)) { - return; - } - String ret = podTemplateToApiType.putIfAbsent(name, type); - if (ret == null || ret.equals(type)) { - addPodTemplate(podTemplate); - podTemplates.add(podTemplate); - } else { - LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret)); - } - } else { - podTemplateToApiType.put(name, type); - addPodTemplate(podTemplate); + + // Adds PodTemplate to the List correspoding to the ConfigMap of + // given uid + protected static void trackPodTemplates(String uid, List podTemplatesToTrack) { + trackedPodTemplates.put(uid, podTemplatesToTrack); } - } - - // Delete a PodTemplate from Jenkins - protected static void removePodTemplate(Logger LOGGER, String PT_NOT_OWNED, String type, String apiObjName, String namespace, PodTemplate podTemplate) { - String name = podTemplate.getName(); - String t = podTemplateToApiType.get(name); - if (t != null && t.equals(type)) { - podTemplateToApiType.remove(name); - removePodTemplate(podTemplate); - } else { - LOGGER.info(String.format(PT_NOT_OWNED, type, apiObjName, namespace, name, t)); + + // Adds PodTemplate to the List correspoding to the ConfigMap of + // given uid and Deletes from Jenkins + protected static List onlyTrackPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, + String namespace, List podTemplates, PodTemplate podTemplate) { + String name = podTemplate.getName(); + // we allow configmap overrides of maven and nodejs, but not imagestream ones + // as they are less specific/defined wrt podTemplate fields + + if (isReservedPodTemplateName(name) && isType.equals(type)) + return null; + // for imagestreams, if the core image has not changed, we avoid + // the remove/add pod template churn and multiple imagestream events + // come in for activity that does not affect the pod template + if (type.equals(isType) && hasPodTemplate(podTemplate)) + return null; + // once a CM or IS claims a name, it gets to keep it until it is remove or + // un-labeled + String ret = podTemplateToApiType.putIfAbsent(name, type); + // if not set, or previously set by an obj of the same type + if (ret == null || ret.equals(type)) { + removePodTemplate(podTemplate); + podTemplates.add(podTemplate); + } else { + LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret)); + } + return podTemplates; } - } - - protected static boolean isReservedPodTemplateName(String name) { - if (name.equals("maven") || name.equals("nodejs")) - return true; - return false; - } - - protected static List getPodTemplatesListFromImageStreams(ImageStream imageStream) { - List results = new ArrayList(); - // for IS, since we can check labels, check there - ObjectMeta metadata = imageStream.getMetadata(); - String isName = metadata.getName(); - if (hasSlaveLabelOrAnnotation(metadata.getLabels())) { - ImageStreamStatus status = imageStream.getStatus(); - String repository = status.getDockerImageRepository(); - Map annotations = metadata.getAnnotations(); - PodTemplate podTemplate = podTemplateFromData(isName, repository, annotations); - results.add(podTemplate); + + // Adds PodTemplate from Jenkins + protected static void addPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, String namespace, + List podTemplates, PodTemplate podTemplate) { + String name = podTemplate.getName(); + // we allow configmap overrides of maven and nodejs, but not imagestream ones + // as they are less specific/defined wrt podTemplate fields + if (apiObjName != null && namespace != null && podTemplates != null) { + if (isReservedPodTemplateName(name) && isType.equals(type)) { + return; + } + String ret = podTemplateToApiType.putIfAbsent(name, type); + if (ret == null || ret.equals(type)) { + addPodTemplate(podTemplate); + podTemplates.add(podTemplate); + } else { + LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret)); + } + } else { + podTemplateToApiType.put(name, type); + addPodTemplate(podTemplate); + } } - results.addAll(extractPodTemplatesFromImageStreamTags(imageStream)); - return results; - } - - protected static List extractPodTemplatesFromImageStreamTags(ImageStream imageStream) { - // for slave-label, still check annotations - // since we cannot create watches on ImageStream tags, we have to - // traverse the tags and look for the slave label - List results = new ArrayList(); - List tags = imageStream.getSpec().getTags(); - for (TagReference tagRef : tags) { - addPodTemplateFromImageStreamTag(results, imageStream, tagRef); + + // Delete a PodTemplate from Jenkins + protected static void removePodTemplate(Logger LOGGER, String PT_NOT_OWNED, String type, String apiObjName, + String namespace, PodTemplate podTemplate) { + String name = podTemplate.getName(); + String t = podTemplateToApiType.get(name); + if (t != null && t.equals(type)) { + podTemplateToApiType.remove(name); + removePodTemplate(podTemplate); + } else { + LOGGER.info(String.format(PT_NOT_OWNED, type, apiObjName, namespace, name, t)); + } } - return results; - } - - protected static void addPodTemplateFromImageStreamTag(List results, ImageStream imageStream, TagReference tagRef) { - ObjectMeta metadata = imageStream.getMetadata(); - String ns = metadata.getNamespace(); - String isName = metadata.getName(); - ImageStreamTag tag = null; - try { - String tagName = isName + ":" + tagRef.getName(); - tag = OpenShiftUtils.getOpenshiftClient().imageStreamTags().inNamespace(ns).withName(tagName).get(); - } catch (Throwable t) { - LOGGER.log(FINE, "addPodTemplateFromImageStreamTag", t); + + protected static boolean isReservedPodTemplateName(String name) { + if (name.equals("maven") || name.equals("nodejs")) + return true; + return false; } - // for ImageStreamTag (IST), we can't set labels directly, but can inherit, so - // we check annotations (if ImageStreamTag directly updated) and then labels (if - // inherited from imagestream) - if (tag != null) { - ObjectMeta tagMetadata = tag.getMetadata(); - Map tagAnnotations = tagMetadata.getAnnotations(); - String tagName = tagMetadata.getName(); - String tagImageReference = tag.getImage().getDockerImageReference(); - if (hasSlaveLabelOrAnnotation(tagAnnotations)) { - results.add(podTemplateFromData(tagName, tagImageReference, tagAnnotations)); - } else { - Map tagLabels = tagMetadata.getLabels(); - if (hasSlaveLabelOrAnnotation(tagLabels)) { - results.add(podTemplateFromData(tagName, tagImageReference, tagLabels)); + + protected static List getPodTemplatesListFromImageStreams(ImageStream imageStream) { + List results = new ArrayList(); + // for IS, since we can check labels, check there + ObjectMeta metadata = imageStream.getMetadata(); + String isName = metadata.getName(); + if (hasSlaveLabelOrAnnotation(metadata.getLabels())) { + ImageStreamStatus status = imageStream.getStatus(); + String repository = status.getDockerImageRepository(); + Map annotations = metadata.getAnnotations(); + PodTemplate podTemplate = podTemplateFromData(isName, repository, annotations); + results.add(podTemplate); } - } + results.addAll(extractPodTemplatesFromImageStreamTags(imageStream)); + return results; } - } - - protected static PodTemplate podTemplateFromData(String name, String image, Map map) { - // node, pod names cannot have colons - String templateName = name.replaceAll(":", "."); - String label = (map != null && map.containsKey(SLAVE_LABEL)) ? map.get(SLAVE_LABEL) : name; - return podTemplateInit(templateName, image, label); - } - - // podTemplatesFromConfigMap takes every key from a ConfigMap and tries to - // create a PodTemplate from the contained - // XML. - public static List podTemplatesFromConfigMap(ConfigMapWatcher configMapWatcher, ConfigMap configMap) { - List results = new ArrayList<>(); - Map data = configMap.getData(); - - if (!configMapContainsSlave(configMap)) { - return results; + + protected static List extractPodTemplatesFromImageStreamTags(ImageStream imageStream) { + // for slave-label, still check annotations + // since we cannot create watches on ImageStream tags, we have to + // traverse the tags and look for the slave label + List results = new ArrayList(); + List tags = imageStream.getSpec().getTags(); + for (TagReference tagRef : tags) { + addPodTemplateFromImageStreamTag(results, imageStream, tagRef); + } + return results; } - XStream2 xStream2 = new XStream2(); - - for (Map.Entry entry : data.entrySet()) { - Object podTemplate; - try { - podTemplate = xStream2.fromXML(entry.getValue()); - - String warningPrefix = "Content of key '" + entry.getKey() - + "' in ConfigMap '" - + configMap.getMetadata().getName(); - if (podTemplate instanceof PodTemplate) { - PodTemplate pt = (PodTemplate) podTemplate; - - String image = pt.getImage(); - try { - // if requested via special prefix, convert this images - // entry field, if not already fully qualified, as if - // it were an IST - // IST of form [optional_namespace]/imagestreamname:tag - // checks based on ParseImageStreamTagName in - // https://github.com/openshift/origin/blob/master/pkg/image/apis/image/helper.go - if (image.startsWith(SPECIAL_IST_PREFIX)) { - image = image.substring(SPECIAL_IST_PREFIX_IDX); - if (image.contains("@")) { - LOGGER.warning(warningPrefix - + " the presence of @ implies an image stream image, not an image stream tag, " - + " so no ImageStreamTag to Docker image reference translation was performed."); - } else { - boolean hasNamespace = hasOneAndOnlyOneWithSomethingAfter(image, "/"); - boolean hasTag = hasOneAndOnlyOneWithSomethingAfter(image, ":"); - String namespace = getAuthenticatedOpenShiftClient().getNamespace(); - String isName = image; - String newImage = null; - if (hasNamespace) { - String[] parts = image.split("/"); - namespace = parts[0]; - isName = parts[1]; + protected static void addPodTemplateFromImageStreamTag(List results, ImageStream imageStream, + TagReference tagRef) { + ObjectMeta metadata = imageStream.getMetadata(); + String ns = metadata.getNamespace(); + String isName = metadata.getName(); + ImageStreamTag tag = null; + try { + String tagName = isName + ":" + tagRef.getName(); + tag = OpenShiftUtils.getOpenshiftClient().imageStreamTags().inNamespace(ns).withName(tagName).get(); + } catch (Throwable t) { + LOGGER.log(FINE, "addPodTemplateFromImageStreamTag", t); + } + // for ImageStreamTag (IST), we can't set labels directly, but can inherit, so + // we check annotations (if ImageStreamTag directly updated) and then labels (if + // inherited from imagestream) + if (tag != null) { + ObjectMeta tagMetadata = tag.getMetadata(); + Map tagAnnotations = tagMetadata.getAnnotations(); + String tagName = tagMetadata.getName(); + String tagImageReference = tag.getImage().getDockerImageReference(); + if (hasSlaveLabelOrAnnotation(tagAnnotations)) { + results.add(podTemplateFromData(tagName, tagImageReference, tagAnnotations)); + } else { + Map tagLabels = tagMetadata.getLabels(); + if (hasSlaveLabelOrAnnotation(tagLabels)) { + results.add(podTemplateFromData(tagName, tagImageReference, tagLabels)); } - if (hasTag) { - ImageStreamTag ist = getAuthenticatedOpenShiftClient() - .imageStreamTags() - .inNamespace(namespace) - .withName(isName).get(); - Image imageFromIst = ist.getImage(); - String dockerImageReference = imageFromIst.getDockerImageReference(); - - if (ist != null && imageFromIst != null && dockerImageReference != null && dockerImageReference.length() > 0) { - newImage = dockerImageReference; - LOGGER.fine(String.format("Converting image ref %s as an imagestreamtag %s to fully qualified image %s", image, isName, newImage)); - } else { - LOGGER.warning(warningPrefix - + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value, while a valid ImageStreamTag reference," - + " produced no valid ImageStreaTag upon lookup," - + " so no ImageStreamTag to Docker image reference translation was performed."); - } + } + } + } + + protected static PodTemplate podTemplateFromData(String name, String image, Map map) { + // node, pod names cannot have colons + String templateName = name.replaceAll(":", "."); + String label = (map != null && map.containsKey(SLAVE_LABEL)) ? map.get(SLAVE_LABEL) : name; + return podTemplateInit(templateName, image, label); + } + + // podTemplatesFromConfigMap takes every key from a ConfigMap and tries to + // create a PodTemplate from the contained + // XML. + public static List podTemplatesFromConfigMap(ConfigMapWatcher configMapWatcher, ConfigMap configMap) { + List results = new ArrayList<>(); + Map data = configMap.getData(); + + if (!configMapContainsSlave(configMap)) { + return results; + } + + XStream2 xStream2 = new XStream2(); + + for (Map.Entry entry : data.entrySet()) { + Object podTemplate; + try { + podTemplate = xStream2.fromXML(entry.getValue()); + + String warningPrefix = "Content of key '" + entry.getKey() + "' in ConfigMap '" + + configMap.getMetadata().getName(); + if (podTemplate instanceof PodTemplate) { + PodTemplate pt = (PodTemplate) podTemplate; + + String image = pt.getImage(); + try { + // if requested via special prefix, convert this images + // entry field, if not already fully qualified, as if + // it were an IST + // IST of form [optional_namespace]/imagestreamname:tag + // checks based on ParseImageStreamTagName in + // https://github.com/openshift/origin/blob/master/pkg/image/apis/image/helper.go + if (image.startsWith(SPECIAL_IST_PREFIX)) { + image = image.substring(SPECIAL_IST_PREFIX_IDX); + if (image.contains("@")) { + LOGGER.warning(warningPrefix + + " the presence of @ implies an image stream image, not an image stream tag, " + + " so no ImageStreamTag to Docker image reference translation was performed."); + } else { + boolean hasNamespace = hasOneAndOnlyOneWithSomethingAfter(image, "/"); + boolean hasTag = hasOneAndOnlyOneWithSomethingAfter(image, ":"); + String namespace = getAuthenticatedOpenShiftClient().getNamespace(); + String isName = image; + String newImage = null; + if (hasNamespace) { + String[] parts = image.split("/"); + namespace = parts[0]; + isName = parts[1]; + } + if (hasTag) { + ImageStreamTag ist = getAuthenticatedOpenShiftClient().imageStreamTags() + .inNamespace(namespace).withName(isName).get(); + Image imageFromIst = ist.getImage(); + String dockerImageReference = imageFromIst.getDockerImageReference(); + + if (ist != null && imageFromIst != null && dockerImageReference != null + && dockerImageReference.length() > 0) { + newImage = dockerImageReference; + LOGGER.fine(String.format( + "Converting image ref %s as an imagestreamtag %s to fully qualified image %s", + image, isName, newImage)); + } else { + LOGGER.warning(warningPrefix + + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value, while a valid ImageStreamTag reference," + + " produced no valid ImageStreaTag upon lookup," + + " so no ImageStreamTag to Docker image reference translation was performed."); + } + } else { + LOGGER.warning(warningPrefix + + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value had no tag indicator," + + " so no ImageStreamTag to Docker image reference translation was performed."); + } + if (newImage != null) { + LOGGER.fine("translated IST ref " + image + " to docker image ref " + newImage); + pt.getContainers().get(0).setImage(newImage); + } + } + } + } catch (Throwable t) { + if (LOGGER.isLoggable(FINE)) + LOGGER.log(FINE, "podTemplateFromConfigMap", t); + } + results.add((PodTemplate) podTemplate); } else { - LOGGER.warning(warningPrefix - + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value had no tag indicator," - + " so no ImageStreamTag to Docker image reference translation was performed."); + LOGGER.warning(warningPrefix + "' is not a PodTemplate"); + } + } catch (XStreamException xse) { + LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '" + + configMap.getMetadata().getName() + "'", xse).getMessage()); + } catch (Error e) { + LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '" + + configMap.getMetadata().getName() + "'", e).getMessage()); + } + } + + return results; + } + + protected static boolean configMapContainsSlave(ConfigMap configMap) { + return hasSlaveLabelOrAnnotation(configMap.getMetadata().getLabels()); + } + + protected static boolean hasSlaveLabelOrAnnotation(Map map) { + if (map != null) + return map.containsKey("role") && map.get("role").equals("jenkins-slave"); + return false; + } + + + + protected static void processSlavesForAddEvent(BaseWatcher watcher, List slaves, String type, String uid, String apiObjName, + String namespace) { + LOGGER.info("Adding PodTemplate(s) for "); + List finalSlaveList = new ArrayList(); + for (PodTemplate podTemplate : slaves) { + addPodTemplate(watcher, type, apiObjName, namespace, finalSlaveList, podTemplate); + } + updateTrackedPodTemplatesMap(uid, finalSlaveList); + } + + + + protected static void processSlavesForModifyEvent(BaseWatcher watcher, List slaves, String type, + String uid, String apiObjName, String namespace) { + LOGGER.info("Modifying PodTemplates"); + boolean alreadyTracked = trackedPodTemplates.containsKey(uid); + boolean hasSlaves = slaves.size() > 0; // Configmap has podTemplates + if (alreadyTracked) { + if (hasSlaves) { + // Since the user could have change the immutable image + // that a PodTemplate uses, we just + // recreate the PodTemplate altogether. This makes it so + // that any changes from within + // Jenkins is undone. + + // Check if there are new PodTemplates added or removed to the configmap, + // if they are, add them to or remove them from trackedPodTemplates + List podTemplatesToTrack = new ArrayList(); + purgeTemplates(watcher, type, uid, apiObjName, namespace); + for (PodTemplate pt : slaves) { + podTemplatesToTrack = PodTemplateUtils.onlyTrackPodTemplate(watcher, type, apiObjName, namespace, + podTemplatesToTrack, pt); } - if (newImage != null) { - LOGGER.fine("translated IST ref " + image + " to docker image ref " + newImage); - pt.getContainers().get(0).setImage(newImage); + updateTrackedPodTemplatesMap(uid, podTemplatesToTrack); + for (PodTemplate podTemplate : podTemplatesToTrack) { + // still do put here in case this is a new item from the last + // update on this ConfigMap/ImageStream + addPodTemplate(watcher, type, null, null, null, podTemplate); } - } + } else { + // The user modified the configMap to no longer be a + // jenkins-slave. + purgeTemplates(watcher, type, uid, apiObjName, namespace); } - } catch (Throwable t) { - if (LOGGER.isLoggable(FINE)) - LOGGER.log(FINE, "podTemplateFromConfigMap", t); - } - results.add((PodTemplate) podTemplate); } else { - LOGGER.warning(warningPrefix + "' is not a PodTemplate"); + if (hasSlaves) { + List finalSlaveList = new ArrayList(); + for (PodTemplate podTemplate : slaves) { + // The user modified the api obj to be a jenkins-slave + addPodTemplate(watcher, type, apiObjName, namespace, finalSlaveList, podTemplate); + } + updateTrackedPodTemplatesMap(uid, finalSlaveList); + } } - } catch (XStreamException xse) { - LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '" + configMap.getMetadata().getName() + "'", xse).getMessage()); - } catch (Error e) { - LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '" + configMap.getMetadata().getName() + "'", e).getMessage()); - } } - return results; - } - - protected static boolean configMapContainsSlave(ConfigMap configMap) { - return hasSlaveLabelOrAnnotation(configMap.getMetadata().getLabels()); - } + protected static void processSlavesForDeleteEvent(BaseWatcher watcher, List slaves, String type, + String uid, String apiObjName, String namespace) { + if (trackedPodTemplates.containsKey(uid)) { + purgeTemplates(watcher, type, uid, apiObjName, namespace); + } + } - protected static boolean hasSlaveLabelOrAnnotation(Map map) { - if (map != null) - return map.containsKey("role") - && map.get("role").equals("jenkins-slave"); - return false; - } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java index 56002b43d..d5a88d7c5 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java @@ -15,32 +15,34 @@ */ package io.fabric8.jenkins.openshiftsync; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import hudson.triggers.SafeTimerTask; -import io.fabric8.kubernetes.api.model.ObjectMeta; -import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.SecretList; -import io.fabric8.kubernetes.client.Watcher.Action; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC; +import static io.fabric8.jenkins.openshiftsync.Constants.VALUE_SECRET_SYNC; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static java.util.logging.Level.SEVERE; import java.util.List; import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Level; import java.util.logging.Logger; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static java.util.logging.Level.SEVERE; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.Secret; +import io.fabric8.kubernetes.api.model.SecretList; +import io.fabric8.openshift.client.OpenShiftClient; /** * Watches {@link Secret} objects in Kubernetes and syncs then to Credentials in * Jenkins */ -public class SecretWatcher extends BaseWatcher { +public class SecretWatcher extends BaseWatcher { private ConcurrentHashMap trackedSecrets; private final Logger logger = Logger.getLogger(getClass().getName()); @SuppressFBWarnings("EI_EXPOSE_REP2") - public SecretWatcher(String[] namespaces) { - super(namespaces); + public SecretWatcher(String namespace) { + super(namespace); this.trackedSecrets = new ConcurrentHashMap<>(); } @@ -49,72 +51,45 @@ public int getListIntervalInSeconds() { return GlobalPluginConfiguration.get().getSecretListInterval(); } - @Override - public Runnable getStartTimerTask() { - return new SafeTimerTask() { - @Override - public void doRun() { - if (!CredentialsUtils.hasCredentials()) { - logger.fine("No Openshift Token credential defined."); - return; - } - for (String namespace : namespaces) { - addWatchForNamespace(namespace); - } - - } - }; - } - - public void addWatchForNamespace(String namespace) { - SecretList secrets = null; - try { - logger.fine("listing Secrets resources"); - secrets = getAuthenticatedOpenShiftClient().secrets() - .inNamespace(namespace) - .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, Constants.VALUE_SECRET_SYNC).list(); - onInitialSecrets(secrets); - logger.fine("handled Secrets resources"); - } catch (Exception e) { - logger.log(SEVERE, "Failed to load Secrets: " + e, e); - } - try { - String resourceVersion = "0"; - if (secrets == null) { - logger.warning("Unable to get secret list; impacts resource version used for watch"); - } else { - resourceVersion = secrets.getMetadata() - .getResourceVersion(); + public void start() { + // lets process the initial state + //super.start(); + logger.info("Now handling startup secrets for " + namespace + " !!"); + SecretList secrets = null; + String ns = this.namespace; + try { + logger.fine("listing Secrets resources"); + secrets = getAuthenticatedOpenShiftClient().secrets().inNamespace(ns) + .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, Constants.VALUE_SECRET_SYNC).list(); + onInitialSecrets(secrets); + logger.fine("handled Secrets resources"); + } catch (Exception e) { + logger.log(SEVERE, "Failed to load Secrets: " + e, e); } - if (watches.get(namespace) == null) { - logger.info("creating Secret watch for namespace " - + namespace + " and resource version" - + resourceVersion); - addWatch(namespace, getAuthenticatedOpenShiftClient() - .secrets() - .inNamespace(namespace) - .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, - Constants.VALUE_SECRET_SYNC) - .withResourceVersion(resourceVersion) - .watch(new WatcherCallback(SecretWatcher.this,namespace))); - } - } catch (Exception e) { - logger.log(SEVERE, "Failed to load Secrets: " + e, e); - } - } + try { + String rv = "0"; + if (secrets == null) { + logger.warning("Unable to get secret list; impacts resource version used for watch"); + } else { + rv = secrets.getMetadata().getResourceVersion(); + } - public void startAfterOnClose(String namespace) { - synchronized (this.lock) { - addWatchForNamespace(namespace); + if (this.watch == null) { + synchronized (this.lock) { + if (this.watch == null) { + logger.info("creating Secret watch for namespace " + ns + " and resource version" + rv); + OpenShiftClient client = getOpenshiftClient(); + this.watch = client.secrets().inNamespace(ns) + .withLabel(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC) + .withResourceVersion(rv).watch(this); + } + } + } + } catch (Exception e) { + logger.log(SEVERE, "Failed to load Secrets: " + e, e); } } - public void start() { - // lets process the initial state - super.start(); - logger.info("Now handling startup secrets!!"); - } - private void onInitialSecrets(SecretList secrets) { if (secrets == null) return; @@ -126,8 +101,7 @@ private void onInitialSecrets(SecretList secrets) { try { if (validSecret(secret) && shouldProcessSecret(secret)) { upsertCredential(secret); - trackedSecrets.put(secret.getMetadata().getUid(), - secret.getMetadata().getResourceVersion()); + trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); } } catch (Exception e) { logger.log(SEVERE, "Failed to update job", e); @@ -137,6 +111,7 @@ private void onInitialSecrets(SecretList secrets) { } @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") + @Override public void eventReceived(Action action, Secret secret) { try { switch (action) { @@ -153,80 +128,76 @@ public void eventReceived(Action action, Secret secret) { logger.warning("watch for secret " + secret.getMetadata().getName() + " received error event "); break; default: - logger.warning("watch for secret " + secret.getMetadata().getName() + " received unknown event " + action); + logger.warning( + "watch for secret " + secret.getMetadata().getName() + " received unknown event " + action); break; } } catch (Exception e) { logger.log(Level.WARNING, "Caught: " + e, e); } } - @Override - public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) { - Secret secret = (Secret)resource; - eventReceived(action, secret); - } private void upsertCredential(final Secret secret) throws Exception { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - logger.info("Upserting Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); - if (validSecret(secret)) { - CredentialsUtils.upsertCredential(secret); - trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); - } + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + logger.info("Upserting Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); + if (validSecret(secret)) { + CredentialsUtils.upsertCredential(secret); + trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + } + } } - } } private void modifyCredential(Secret secret) throws Exception { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - logger.info("Modifying Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); - if (validSecret(secret) && shouldProcessSecret(secret)) { - CredentialsUtils.upsertCredential(secret); - trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); - } + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + logger.info("Modifying Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); + if (validSecret(secret) && shouldProcessSecret(secret)) { + CredentialsUtils.upsertCredential(secret); + trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + } + } } - } } private boolean validSecret(Secret secret) { - if (secret !=null){ - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - logger.info("Validating Secret with Uid "+ metadata.getUid() + " with Name " + name); - return name != null && !name.isEmpty() && namespace != null && !namespace.isEmpty(); + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + logger.info("Validating Secret with Uid " + metadata.getUid() + " with Name " + name); + return name != null && !name.isEmpty() && namespace != null && !namespace.isEmpty(); + } } - } return false; } private boolean shouldProcessSecret(Secret secret) { - if (secret !=null){ - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - String uid = metadata.getUid(); - String rv = metadata.getResourceVersion(); - String savedRV = trackedSecrets.get(uid); - if (savedRV == null || !savedRV.equals(rv)) { - return true; - } + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + String uid = metadata.getUid(); + String rv = metadata.getResourceVersion(); + String savedRV = trackedSecrets.get(uid); + if (savedRV == null || !savedRV.equals(rv)) { + return true; + } + } } - } - return false; + return false; } private void deleteCredential(final Secret secret) throws Exception { - if (secret != null){ - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - trackedSecrets.remove(metadata.getUid()); - CredentialsUtils.deleteCredential(secret); + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + trackedSecrets.remove(metadata.getUid()); + CredentialsUtils.deleteCredential(secret); + } } - } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java b/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java deleted file mode 100644 index 49c0c3bb6..000000000 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/WatcherCallback.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright (C) 2018 Red Hat, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.fabric8.jenkins.openshiftsync; - -import org.slf4j.LoggerFactory; - -import io.fabric8.kubernetes.client.Watcher; -import io.fabric8.kubernetes.client.WatcherException; - -public class WatcherCallback implements Watcher { - - private final BaseWatcher watcher; - private final String namespace; - - public WatcherCallback(BaseWatcher w, - String n) { - watcher = w; - namespace = n; - } - - @Override - public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) { - watcher.eventReceived(action, resource); - } - - @Override - public void onClose(WatcherException cause) { - LoggerFactory.getLogger(WatcherCallback.class).debug("Watcher closed: " + watcher + " , for namespace: " + namespace); - watcher.onClose(cause, namespace); - } - -} From 23c2c8f150ead220d8dd930e4cf4a9bd1c3f58f2 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Wed, 7 Apr 2021 03:34:32 +0200 Subject: [PATCH 06/22] Evaluating migration to informer --- .../openshiftsync/ConfigMapInformer.java | 192 ++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java new file mode 100644 index 000000000..e1f9d3b75 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -0,0 +1,192 @@ +/** + * Copyright (C) 2017 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; +import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; + +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Logger; + +import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ConfigMapList; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.client.OpenShiftClient; + +public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler { + private static final Logger LOGGER = Logger.getLogger(ConfigMapWatcher.class.getName()); + private static final long RESYNC_PERIOD = 1000L; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ConfigMapInformer(String namespace) { + super(namespace); + } + + @Override + public int getListIntervalInSeconds() { + return GlobalPluginConfiguration.get().getConfigMapListInterval(); + } + + public void start() { + LOGGER.info("Now handling startup config maps for " + namespace + " !!"); + ConfigMapList configMaps = null; + String ns = this.namespace; + try { + LOGGER.fine("listing ConfigMap resources"); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + SharedInformerFactory informerFactory = client.informers(); + SharedIndexInformer informer = informerFactory.inNamespace(namespace) + .sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); + informer.addEventHandler(this); + //configMaps = client.configMaps().inNamespace(ns).list(); + //onInitialConfigMaps(configMaps); + LOGGER.fine("handled ConfigMap resources"); + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); + } + try { + String rv = "0"; + if (configMaps == null) { + LOGGER.warning("Unable to get config map list; impacts resource version used for watch"); + } else { + rv = configMaps.getMetadata().getResourceVersion(); + } + + if (this.watch == null) { + synchronized (this.lock) { + if (this.watch == null) { + LOGGER.info("creating ConfigMap watch for namespace " + ns + " and resource version " + rv); + OpenShiftClient client = getOpenshiftClient(); + this.watch = client.configMaps().inNamespace(ns).withResourceVersion(rv).watch(this); + } + } + } + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); + } + + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + start(); + } + } + + @Override + public void eventReceived(Action action, ConfigMap configMap) { + try { + List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); + boolean hasSlaves = slavesFromCM.size() > 0; + String uid = configMap.getMetadata().getUid(); + String cmname = configMap.getMetadata().getName(); + String namespace = configMap.getMetadata().getNamespace(); + switch (action) { + case ADDED: + if (hasSlaves) { + processSlavesForAddEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + } + break; + case MODIFIED: + processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + break; + case DELETED: + processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + break; + case ERROR: + LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received error event "); + break; + default: + LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received unknown event " + + action); + break; + } + } catch (Exception e) { + LOGGER.log(WARNING, "Caught: " + e, e); + } + } + + private void onInitialConfigMaps(ConfigMapList configMaps) { + if (configMaps == null) + return; + if (PodTemplateUtils.trackedPodTemplates == null) { + PodTemplateUtils.trackedPodTemplates = new ConcurrentHashMap<>(configMaps.getItems().size()); + } + List items = configMaps.getItems(); + if (items != null) { + for (ConfigMap configMap : items) { + try { + if (PodTemplateUtils.configMapContainsSlave(configMap) + && !PodTemplateUtils.trackedPodTemplates.containsKey(configMap.getMetadata().getUid())) { + List templates = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); + PodTemplateUtils.trackedPodTemplates.put(configMap.getMetadata().getUid(), templates); + for (PodTemplate podTemplate : templates) { + PodTemplateUtils.addPodTemplate(podTemplate); + } + } + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to update ConfigMap PodTemplates", e); + } + } + } + } + + @Override + public void onAdd(ConfigMap obj) { + List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); + boolean hasSlaves = slavesFromCM.size() > 0; + if (hasSlaves) { + ObjectMeta metadata = obj.getMetadata(); + String uid = metadata.getUid(); + String cmname = metadata.getName(); + String namespace = metadata.getNamespace(); + processSlavesForAddEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + } + } + + @Override + public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { + List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, newObj); + ObjectMeta metadata = newObj.getMetadata(); + String uid = metadata.getUid(); + String cmname = metadata.getName(); + String namespace = metadata.getNamespace(); + processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + } + + @Override + public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { + List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); + ObjectMeta metadata = obj.getMetadata(); + String uid = metadata.getUid(); + String cmname = metadata.getName(); + String namespace = metadata.getNamespace(); + processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + } + +} From d0f10448c4b2fcdc22ce0a7452db7d2043f6386e Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Wed, 7 Apr 2021 03:38:10 +0200 Subject: [PATCH 07/22] Clean up code from BasePluginConfiguration --- .../jenkins/openshiftsync/BaseWatcher.java | 64 ------------------- .../GlobalPluginConfiguration.java | 26 -------- 2 files changed, 90 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java index 2c416eeb8..031eff507 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java @@ -40,11 +40,8 @@ public abstract class BaseWatcher implements Watcher { @SuppressFBWarnings("EI_EXPOSE_REP2") public BaseWatcher(String namespace) { this.namespace = namespace; - // this.watches = new ConcurrentHashMap<>(); } -// public abstract Runnable getStartTimerTask(); - public abstract int getListIntervalInSeconds(); protected abstract void start(); @@ -80,26 +77,12 @@ public void doRun() { }; } -// public abstract void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource); -// public abstract void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource); - public void startAfterOnClose(String namespace) { synchronized (this.lock) { start(); } } -// @Override -// public void onClose(WatcherException cause) { -// Watcher watcher = this; -// String namespace = getNamespace(); -// LOGGER.info("Closing watcher: cause: " + cause + ", watcher: " + watcher); -// // TODO implement here what should be done when closing this watcher -// // TODO Let's reimplement it, using Observer pattern and notifying the -// // GlobalPluginConfiguration listener -// // super.onClose(cause); -// } - @Override public void onClose() { Watcher watcher = this; @@ -113,17 +96,6 @@ public void onClose() { // watcher.onClose(cause); } - // public synchronized void start() { - // lets do this in a background thread to avoid errors like: - // Tried proxying - // io.fabric8.jenkins.openshiftsync.GlobalPluginConfiguration to support - // a circular dependency, but it is not an interface. - // Runnable task = getStartTimerTask(); - // still do the first run 100 milliseconds in - // this.relister = Timer.get().scheduleAtFixedRate(task, 100, - // getListIntervalInSeconds() * 1000, MILLISECONDS); - // } - public void stop() { if (this.watch != null) { synchronized (this.lock) { @@ -134,44 +106,8 @@ public void stop() { } } } - /* - * if (relister != null && !relister.isDone()) { relister.cancel(true); relister - * = null; } - * - * for (Map.Entry entry : watches.entrySet()) { - * entry.getValue().close(); watches.remove(entry.getKey()); } - */ } -// public void stop(String namespace) { -// Watch watch = watches.get(namespace); -// if (watch != null) { -// watch.close(); -// watches.remove(namespace); -// } -// } - // @Override -// public void onClose(WatcherException e, String namespace) { -// // scans of fabric client confirm this call be called with null -// // we do not want to totally ignore this, as the closing of the -// // watch can effect responsiveness -// LOGGER.info("Watch for type " + this.getClass().getName() + " closed for namespace : " + namespace); -// if (e != null) { -// synchronized (this.lock) { -// LOGGER.severe("Exception while watching namespace: " + namespace + ", " + e.toString()); -// // stop(namespace); -// // startAfterOnClose(namespace); -// } -// } -// } - -// public void addWatch(String key, Watch desiredWatch) { -// Watch watch = watches.putIfAbsent(key, desiredWatch); -// if (watch != null) { -// watch.close(); -// } -// } - public String toString() { return ReflectionToStringBuilder.toString(this, DEFAULT_STYLE, false, false) + ReflectionToStringBuilder.toString(this.watch); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java index 3658cee5f..7892bcdc9 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java @@ -62,12 +62,6 @@ public class GlobalPluginConfiguration extends GlobalConfiguration { private int configMapListInterval = 300; private int imageStreamListInterval = 300; - private transient BuildWatcher buildWatcher; - private transient BuildConfigWatcher buildConfigWatcher; - private transient SecretWatcher secretWatcher; - private transient ConfigMapWatcher configMapWatcher; - private transient ImageStreamWatcher imageStreamWatcher; - private final static List> watchers = new ArrayList<>(); private transient ScheduledFuture schedule; @@ -251,26 +245,6 @@ void setNamespaces(String[] namespaces) { this.namespaces = namespaces; } - void setBuildWatcher(BuildWatcher buildWatcher) { - this.buildWatcher = buildWatcher; - } - - void setBuildConfigWatcher(BuildConfigWatcher buildConfigWatcher) { - this.buildConfigWatcher = buildConfigWatcher; - } - - void setSecretWatcher(SecretWatcher secretWatcher) { - this.secretWatcher = secretWatcher; - } - - void setConfigMapWatcher(ConfigMapWatcher configMapWatcher) { - this.configMapWatcher = configMapWatcher; - } - - void setImageStreamWatcher(ImageStreamWatcher imageStreamWatcher) { - this.imageStreamWatcher = imageStreamWatcher; - } - private synchronized void configChange() { logger.info("OpenShift Sync Plugin processing a newly supplied configuration"); synchronized (watchers) { From 7c7f40e92459ef2c64307a717eb5a2cdde7adc6c Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Wed, 7 Apr 2021 19:37:11 +0200 Subject: [PATCH 08/22] Adding NPE check for restart cases --- .../openshiftsync/BuildConfigWatcher.java | 69 ++++++------ .../jenkins/openshiftsync/BuildWatcher.java | 65 ++++++++---- .../openshiftsync/ConfigMapWatcher.java | 4 + .../GlobalPluginConfiguration.java | 3 +- .../GlobalPluginConfigurationTimerTask.java | 100 +++++++++--------- .../openshiftsync/ImageStreamWatcher.java | 54 +++++----- .../openshiftsync/PodTemplateUtils.java | 30 +++--- .../jenkins/openshiftsync/SecretWatcher.java | 6 +- 8 files changed, 182 insertions(+), 149 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java index 1f5344476..62c2f1aa1 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java @@ -18,11 +18,13 @@ import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.initializeBuildConfigToJobMap; import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuildConfig; +import static java.util.concurrent.TimeUnit.MILLISECONDS; import static java.util.logging.Level.SEVERE; import java.util.List; @@ -138,8 +140,15 @@ private void onInitialBuildConfigs(BuildConfigList buildConfigs) { @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") @Override + @SuppressWarnings("deprecation") public void eventReceived(Action action, BuildConfig buildConfig) { + if (buildConfig == null) { + logger.warning("Received event with null BuildConfig: " + action + ", ignoring: " + this); + return; + } try { + boolean buildConfigNameNotNull = buildConfig != null && buildConfig.getMetadata() != null; + String name = buildConfigNameNotNull ? buildConfig.getMetadata().getName() : "null"; switch (action) { case ADDED: upsertJob(buildConfig); @@ -151,12 +160,10 @@ public void eventReceived(Action action, BuildConfig buildConfig) { modifyEventToJenkinsJob(buildConfig); break; case ERROR: - logger.warning( - "watch for buildconfig " + buildConfig.getMetadata().getName() + " received error event "); + logger.warning("watch for buildconfig " + name + " received error event "); break; default: - logger.warning("watch for buildconfig " + buildConfig.getMetadata().getName() - + " received unknown event " + action); + logger.warning("watch for buildconfig " + name + " received unknown event " + action); break; } // we employ impersonation here to insure we have "full access"; @@ -166,19 +173,13 @@ public void eventReceived(Action action, BuildConfig buildConfig) { ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { @Override public Void call() throws Exception { - // if bc event came after build events, let's - // poke the BuildWatcher builds with no BC list to - // create job - // runs + // if bc event came after build events, let's poke the BuildWatcher builds with + // no BC list to create job runs BuildWatcher.flushBuildsWithNoBCList(); - // now, if the build event was lost and never - // received, builds - // will stay in - // new for 5 minutes ... let's launch a background - // thread to - // clean them up - // at a quicker interval than the default 5 minute - // general build + // now, if the build event was lost and never received, builds will stay in new + // for 5 minutes ... + // let's launch a background thread to clean them up at a quicker interval than + // the default 5 minute general build // relist function if (action == Action.ADDED) { Runnable backupBuildQuery = new SafeTimerTask() { @@ -188,20 +189,16 @@ public void doRun() { logger.fine("No Openshift Token credential defined."); return; } - BuildList buildList = getAuthenticatedOpenShiftClient().builds() - .inNamespace(buildConfig.getMetadata().getNamespace()) - .withField(OPENSHIFT_BUILD_STATUS_FIELD, BuildPhases.NEW) - .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, - buildConfig.getMetadata().getName()) - .list(); + BuildList buildList = getAuthenticatedOpenShiftClient().builds().inNamespace(namespace) + .withField(OPENSHIFT_BUILD_STATUS_FIELD, NEW) + .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, name).list(); if (buildList.getItems().size() > 0) { - logger.info("build backup query for " + buildConfig.getMetadata().getName() - + " found new builds"); + logger.info("build backup query for " + name + " found new builds"); BuildWatcher.onInitialBuilds(buildList); } } }; - Timer.get().schedule(backupBuildQuery, 10 * 1000, TimeUnit.MILLISECONDS); + Timer.get().schedule(backupBuildQuery, 10 * 1000, MILLISECONDS); } return null; } @@ -275,17 +272,19 @@ public Void call() throws Exception { // nondeterministic // order private void deleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { - String bcUid = buildConfig.getMetadata().getUid(); - if (bcUid != null && bcUid.length() > 0) { - // employ intern of the BC UID to facilitate sync'ing on the same - // actual object - bcUid = bcUid.intern(); - synchronized (bcUid) { - innerDeleteEventToJenkinsJob(buildConfig); - return; + if (buildConfig != null) { + String bcUid = buildConfig.getMetadata().getUid(); + if (bcUid != null && bcUid.length() > 0) { + // employ intern of the BC UID to facilitate sync'ing on the same + // actual object + bcUid = bcUid.intern(); + synchronized (bcUid) { + innerDeleteEventToJenkinsJob(buildConfig); + return; + } } + // uid should not be null / empty, but just in case, still clean up + innerDeleteEventToJenkinsJob(buildConfig); } - // uid should not be null / empty, but just in case, still clean up - innerDeleteEventToJenkinsJob(buildConfig); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java index 7ee09e022..1fe5b5c56 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java @@ -33,7 +33,9 @@ import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuild; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase; +import static java.util.logging.Level.SEVERE; import static java.util.logging.Level.WARNING; import java.io.IOException; @@ -55,6 +57,9 @@ import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import hudson.security.ACL; import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.kubernetes.api.model.Status; +import io.fabric8.kubernetes.client.KubernetesClientException; +import io.fabric8.kubernetes.client.WatcherException; import io.fabric8.openshift.api.model.Build; import io.fabric8.openshift.api.model.BuildConfig; import io.fabric8.openshift.api.model.BuildList; @@ -125,9 +130,19 @@ public void start() { } } } - + } catch (KubernetesClientException e) { + logger.log(SEVERE, "Failed to load initial Builds: " + e, e); + this.watch.close(); + Status status = e.getStatus(); + String message = status != null ? status.getMessage() : "Unknown status on query"; + // TODO add a throttling mechancism to wait before retying in loop + try { + Thread.sleep(5000); + } catch (InterruptedException e1) { + } + this.onClose(new WatcherException(message, e)); } catch (Exception e) { - logger.log(Level.SEVERE, "Failed to load initial Builds: " + e, e); + logger.log(SEVERE, "Failed to load initial Builds: " + e, e); } reconcileRunsAndBuilds(); } @@ -141,29 +156,33 @@ public void startAfterOnClose(String namespace) { @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") @Override public void eventReceived(Action action, Build build) { - if (!OpenShiftUtils.isPipelineStrategyBuild(build)) + if (build == null) { + logger.warning("Received event with null Build: " + action + ", ignoring: " + this); return; - try { - switch (action) { - case ADDED: - addEventToJenkinsJobRun(build); - break; - case MODIFIED: - modifyEventToJenkinsJobRun(build); - break; - case DELETED: - deleteEventToJenkinsJobRun(build); - break; - case ERROR: - logger.warning("watch for build " + build.getMetadata().getName() + " received error event "); - break; - default: - logger.warning( - "watch for build " + build.getMetadata().getName() + " received unknown event " + action); - break; + } + if (isPipelineStrategyBuild(build)) { + try { + String name = build.getMetadata().getName(); + switch (action) { + case ADDED: + addEventToJenkinsJobRun(build); + break; + case MODIFIED: + modifyEventToJenkinsJobRun(build); + break; + case DELETED: + deleteEventToJenkinsJobRun(build); + break; + case ERROR: + logger.warning("watch for build " + name + " received error event "); + break; + default: + logger.warning("watch for build " + name + " received unknown event " + action); + break; + } + } catch (Exception e) { + logger.log(WARNING, "Caught: " + e, e); } - } catch (Exception e) { - logger.log(WARNING, "Caught: " + e, e); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index af4b4b4cb..84dad6e0f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -91,6 +91,10 @@ public void startAfterOnClose(String namespace) { @Override public void eventReceived(Action action, ConfigMap configMap) { + if (configMap == null) { + LOGGER.warning("Received event with null Build: " + action + ", ignoring: " + this); + return; + } try { List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); boolean hasSlaves = slavesFromCM.size() > 0; diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java index 7892bcdc9..afd7f5c37 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java @@ -36,7 +36,6 @@ import hudson.Extension; import hudson.Util; import hudson.util.ListBoxModel; -import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.GlobalConfiguration; import jenkins.model.Jenkins; @@ -281,7 +280,7 @@ private synchronized void configChange() { Runnable task = new GlobalPluginConfigurationTimerTask(this); // lets give jenkins a while to get started ;) this.schedule = Timer.get().schedule(task, 1, SECONDS); - } catch (KubernetesClientException e) { + } catch (Exception e) { Throwable exceptionOrCause = (e.getCause() != null) ? e.getCause() : e; logger.log(SEVERE, "Failed to configure OpenShift Jenkins Sync Plugin: " + exceptionOrCause); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index c59a36dec..a9c8db854 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -22,62 +22,66 @@ public GlobalPluginConfigurationTimerTask(GlobalPluginConfiguration globalPlugin @Override protected void doRun() throws Exception { - logger.info("Confirming Jenkins is started"); - while (true) { - final Jenkins instance = Jenkins.getActiveInstance(); - // We can look at Jenkins Init Level to see if we are ready to start. If we do - // not wait, we risk the chance of a deadlock. - InitMilestone initLevel = instance.getInitLevel(); - logger.fine("Jenkins init level: " + initLevel); - if (initLevel == COMPLETED) { - break; - } - logger.info("Jenkins not ready..."); - try { - Thread.sleep(500); - } catch (InterruptedException e) { - logger.info("Interrupted while sleeping"); + try { + logger.info("Confirming Jenkins is started"); + while (true) { + final Jenkins instance = Jenkins.getActiveInstance(); + // We can look at Jenkins Init Level to see if we are ready to start. If we do + // not wait, we risk the chance of a deadlock. + InitMilestone initLevel = instance.getInitLevel(); + logger.fine("Jenkins init level: " + initLevel); + if (initLevel == COMPLETED) { + break; + } + logger.info("Jenkins not ready..."); + try { + Thread.sleep(500); + } catch (InterruptedException e) { + logger.info("Interrupted while sleeping"); + } } - } - logger.info("Initializing all the watchers..."); - String[] namespaces = globalPluginConfiguration.getNamespaces(); - List> watchers = new ArrayList<>(); - for (String namespace : namespaces) { - BuildConfigWatcher buildConfigWatcher = new BuildConfigWatcher(namespace); - watchers.add(buildConfigWatcher); - buildConfigWatcher.start(); + logger.info("Initializing all the watchers..."); + String[] namespaces = globalPluginConfiguration.getNamespaces(); + List> watchers = new ArrayList<>(); + for (String namespace : namespaces) { + BuildConfigWatcher buildConfigWatcher = new BuildConfigWatcher(namespace); + watchers.add(buildConfigWatcher); + buildConfigWatcher.start(); - BuildWatcher buildWatcher = new BuildWatcher(namespace); - buildWatcher.start(); - watchers.add(buildWatcher); + BuildWatcher buildWatcher = new BuildWatcher(namespace); + buildWatcher.start(); + watchers.add(buildWatcher); - ConfigMapWatcher configMapWatcher = new ConfigMapWatcher(namespace); - configMapWatcher.start(); - watchers.add(configMapWatcher); + ConfigMapWatcher configMapWatcher = new ConfigMapWatcher(namespace); + configMapWatcher.start(); + watchers.add(configMapWatcher); - ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespace); - imageStreamWatcher.start(); - watchers.add(imageStreamWatcher); + ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespace); + imageStreamWatcher.start(); + watchers.add(imageStreamWatcher); - SecretWatcher secretWatcher = new SecretWatcher(namespace); - secretWatcher.start(); - watchers.add(secretWatcher); + SecretWatcher secretWatcher = new SecretWatcher(namespace); + secretWatcher.start(); + watchers.add(secretWatcher); - } - logger.info("All the watchers have been initialized!!"); - synchronized (watchers) { - List> globalWatchers = GlobalPluginConfiguration.getWatchers(); - synchronized (globalWatchers) { - logger.info("Existing watchers: " + globalWatchers); - for (BaseWatcher watch : globalWatchers) { - watch.stop(); - } - globalWatchers.clear(); - logger.info("Existing watchers: stopped and cleared : " + globalWatchers); - globalWatchers.addAll(watchers); - logger.info("New watchers created : " + globalWatchers.size()); + } + logger.info("All the watchers have been initialized!!"); + synchronized (watchers) { + List> globalWatchers = GlobalPluginConfiguration.getWatchers(); + synchronized (globalWatchers) { + logger.info("Existing watchers: " + globalWatchers); + for (BaseWatcher watch : globalWatchers) { + watch.stop(); + } + globalWatchers.clear(); + logger.info("Existing watchers: stopped and cleared : " + globalWatchers); + globalWatchers.addAll(watchers); + logger.info("New watchers created : " + globalWatchers.size()); + } } + } catch (Exception e) { + logger.severe("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + e); } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java index 28d8c79e0..1685071d2 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java @@ -17,6 +17,7 @@ import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.IMAGESTREAM_TYPE; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate; @@ -91,31 +92,36 @@ public void startAfterOnClose(String namespace) { @Override public void eventReceived(Action action, ImageStream imageStream) { - try { - List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(imageStream); - ObjectMeta metadata = imageStream.getMetadata(); - String uid = metadata.getUid(); - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - switch (action) { - case ADDED: - processSlavesForAddEvent(this, slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); - break; - case MODIFIED: - processSlavesForModifyEvent(this, slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); - break; - case DELETED: - processSlavesForDeleteEvent(this, slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace); - break; - case ERROR: - logger.warning("watch for imageStream " + name + " received error event "); - break; - default: - logger.warning("watch for imageStream " + name + " received unknown event " + action); - break; + String ns = this.namespace; + if (imageStream != null) { + try { + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(imageStream); + ObjectMeta metadata = imageStream.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + switch (action) { + case ADDED: + processSlavesForAddEvent(this, slaves, IMAGESTREAM_TYPE, uid, name, namespace); + break; + case MODIFIED: + processSlavesForModifyEvent(this, slaves, IMAGESTREAM_TYPE, uid, name, namespace); + break; + case DELETED: + processSlavesForDeleteEvent(this, slaves, IMAGESTREAM_TYPE, uid, name, namespace); + break; + case ERROR: + logger.warning("watch for imageStream " + ns + "/" + name + " received error event "); + break; + default: + logger.warning("watch for imageStream " + ns + "/" + name + " received unknown event " + action); + break; + } + } catch (Exception e) { + logger.log(WARNING, "Caught: " + e, e); } - } catch (Exception e) { - logger.log(WARNING, "Caught: " + e, e); + } else { + logger.log(SEVERE, "Received event with null imagestream " + ns + " and Action: " + action + "...ignoring"); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java index 28b1abec8..ab5a7a7ad 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java @@ -255,17 +255,19 @@ protected static boolean isReservedPodTemplateName(String name) { protected static List getPodTemplatesListFromImageStreams(ImageStream imageStream) { List results = new ArrayList(); - // for IS, since we can check labels, check there - ObjectMeta metadata = imageStream.getMetadata(); - String isName = metadata.getName(); - if (hasSlaveLabelOrAnnotation(metadata.getLabels())) { - ImageStreamStatus status = imageStream.getStatus(); - String repository = status.getDockerImageRepository(); - Map annotations = metadata.getAnnotations(); - PodTemplate podTemplate = podTemplateFromData(isName, repository, annotations); - results.add(podTemplate); + if (imageStream != null) { + // for IS, since we can check labels, check there + ObjectMeta metadata = imageStream.getMetadata(); + String isName = metadata.getName(); + if (hasSlaveLabelOrAnnotation(metadata.getLabels())) { + ImageStreamStatus status = imageStream.getStatus(); + String repository = status.getDockerImageRepository(); + Map annotations = metadata.getAnnotations(); + PodTemplate podTemplate = podTemplateFromData(isName, repository, annotations); + results.add(podTemplate); + } + results.addAll(extractPodTemplatesFromImageStreamTags(imageStream)); } - results.addAll(extractPodTemplatesFromImageStreamTags(imageStream)); return results; } @@ -426,10 +428,8 @@ protected static boolean hasSlaveLabelOrAnnotation(Map map) { return false; } - - - protected static void processSlavesForAddEvent(BaseWatcher watcher, List slaves, String type, String uid, String apiObjName, - String namespace) { + protected static void processSlavesForAddEvent(BaseWatcher watcher, List slaves, String type, + String uid, String apiObjName, String namespace) { LOGGER.info("Adding PodTemplate(s) for "); List finalSlaveList = new ArrayList(); for (PodTemplate podTemplate : slaves) { @@ -438,8 +438,6 @@ protected static void processSlavesForAddEvent(BaseWatcher watcher, List watcher, List slaves, String type, String uid, String apiObjName, String namespace) { LOGGER.info("Modifying PodTemplates"); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java index d5a88d7c5..08ae5a68d 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java @@ -53,7 +53,7 @@ public int getListIntervalInSeconds() { public void start() { // lets process the initial state - //super.start(); + // super.start(); logger.info("Now handling startup secrets for " + namespace + " !!"); SecretList secrets = null; String ns = this.namespace; @@ -113,6 +113,10 @@ private void onInitialSecrets(SecretList secrets) { @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") @Override public void eventReceived(Action action, Secret secret) { + if (secret == null) { + logger.warning("Received event with null Secret: " + action + ", ignoring: " + this); + return; + } try { switch (action) { case ADDED: From 0d2347523cd3176c0e47a34358c4fdfdca46cb67 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Wed, 7 Apr 2021 22:26:59 +0200 Subject: [PATCH 09/22] Basic informer for ConfigMap works --- .../openshiftsync/ConfigMapInformer.java | 168 ++++++------------ .../openshiftsync/ConfigMapWatcher.java | 4 +- .../GlobalPluginConfigurationTimerTask.java | 8 +- .../openshiftsync/PodTemplateUtils.java | 2 +- 4 files changed, 60 insertions(+), 122 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java index e1f9d3b75..415313c6f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -16,31 +16,34 @@ package io.fabric8.jenkins.openshiftsync; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.configMapContainsSlave; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.podTemplatesFromConfigMap; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.trackedPodTemplates; import static java.util.logging.Level.SEVERE; -import static java.util.logging.Level.WARNING; import java.util.List; -import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Logger; import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapList; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.kubernetes.client.informers.cache.Lister; import io.fabric8.openshift.client.OpenShiftClient; public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler { - private static final Logger LOGGER = Logger.getLogger(ConfigMapWatcher.class.getName()); - private static final long RESYNC_PERIOD = 1000L; + private static final Logger LOGGER = LoggerFactory.getLogger(ConfigMapWatcher.class); + + private static final long RESYNC_PERIOD = 30 * 1000L; @SuppressFBWarnings("EI_EXPOSE_REP2") public ConfigMapInformer(String namespace) { @@ -53,111 +56,22 @@ public int getListIntervalInSeconds() { } public void start() { - LOGGER.info("Now handling startup config maps for " + namespace + " !!"); - ConfigMapList configMaps = null; - String ns = this.namespace; - try { - LOGGER.fine("listing ConfigMap resources"); - OpenShiftClient client = getAuthenticatedOpenShiftClient(); - SharedInformerFactory informerFactory = client.informers(); - SharedIndexInformer informer = informerFactory.inNamespace(namespace) - .sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); - informer.addEventHandler(this); - //configMaps = client.configMaps().inNamespace(ns).list(); - //onInitialConfigMaps(configMaps); - LOGGER.fine("handled ConfigMap resources"); - } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); - } - try { - String rv = "0"; - if (configMaps == null) { - LOGGER.warning("Unable to get config map list; impacts resource version used for watch"); - } else { - rv = configMaps.getMetadata().getResourceVersion(); - } - - if (this.watch == null) { - synchronized (this.lock) { - if (this.watch == null) { - LOGGER.info("creating ConfigMap watch for namespace " + ns + " and resource version " + rv); - OpenShiftClient client = getOpenshiftClient(); - this.watch = client.configMaps().inNamespace(ns).withResourceVersion(rv).watch(this); - } - } - } - } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e); - } - - } - - public void startAfterOnClose(String namespace) { - synchronized (this.lock) { - start(); - } - } - - @Override - public void eventReceived(Action action, ConfigMap configMap) { - try { - List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); - boolean hasSlaves = slavesFromCM.size() > 0; - String uid = configMap.getMetadata().getUid(); - String cmname = configMap.getMetadata().getName(); - String namespace = configMap.getMetadata().getNamespace(); - switch (action) { - case ADDED: - if (hasSlaves) { - processSlavesForAddEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); - } - break; - case MODIFIED: - processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); - break; - case DELETED: - processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); - break; - case ERROR: - LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received error event "); - break; - default: - LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received unknown event " - + action); - break; - } - } catch (Exception e) { - LOGGER.log(WARNING, "Caught: " + e, e); - } - } - - private void onInitialConfigMaps(ConfigMapList configMaps) { - if (configMaps == null) - return; - if (PodTemplateUtils.trackedPodTemplates == null) { - PodTemplateUtils.trackedPodTemplates = new ConcurrentHashMap<>(configMaps.getItems().size()); - } - List items = configMaps.getItems(); - if (items != null) { - for (ConfigMap configMap : items) { - try { - if (PodTemplateUtils.configMapContainsSlave(configMap) - && !PodTemplateUtils.trackedPodTemplates.containsKey(configMap.getMetadata().getUid())) { - List templates = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); - PodTemplateUtils.trackedPodTemplates.put(configMap.getMetadata().getUid(), templates); - for (PodTemplate podTemplate : templates) { - PodTemplateUtils.addPodTemplate(podTemplate); - } - } - } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to update ConfigMap PodTemplates", e); - } - } - } + LOGGER.info("Now handling startup config maps for {} !!", namespace); + LOGGER.trace("listing ConfigMap resources"); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + SharedInformerFactory informerFactory = client.informers(); + SharedIndexInformer informer = informerFactory.inNamespace(namespace) + .sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); + informer.addEventHandler(this); + LOGGER.info("ConfigMap informer started for namespace: {}", namespace); + Lister list = new Lister<>(informer.getIndexer(), namespace); + onInit(list.list()); + informerFactory.startAllRegisteredInformers(); } @Override public void onAdd(ConfigMap obj) { + LOGGER.info("ConfigMap informer received add event for: {}", obj); List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); boolean hasSlaves = slavesFromCM.size() > 0; if (hasSlaves) { @@ -171,16 +85,18 @@ public void onAdd(ConfigMap obj) { @Override public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { - List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, newObj); - ObjectMeta metadata = newObj.getMetadata(); - String uid = metadata.getUid(); - String cmname = metadata.getName(); - String namespace = metadata.getNamespace(); - processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + LOGGER.info("ConfigMap informer received update event for: {} to: {}", oldObj, newObj); +// List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, newObj); +// ObjectMeta metadata = newObj.getMetadata(); +// String uid = metadata.getUid(); +// String cmname = metadata.getName(); +// String namespace = metadata.getNamespace(); +// processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } @Override public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { + LOGGER.info("ConfigMap informer received delete event for: {}", obj); List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); ObjectMeta metadata = obj.getMetadata(); String uid = metadata.getUid(); @@ -189,4 +105,28 @@ public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } + private void onInit(List list) { + if (list != null) { + for (ConfigMap configMap : list) { + addPodTemplateFromConfigMap(configMap); + } + } + } + + private void addPodTemplateFromConfigMap(ConfigMap configMap) { + try { + String uid = configMap.getMetadata().getUid(); + if (configMapContainsSlave(configMap) && !trackedPodTemplates.containsKey(uid)) { + List templates = podTemplatesFromConfigMap(this, configMap); + trackedPodTemplates.put(uid, templates); + for (PodTemplate podTemplate : templates) { + LOGGER.info("Adding PodTemplate {}", podTemplate); + addPodTemplate(podTemplate); + } + } + } catch (Exception e) { + LOGGER.error("Failed to update ConfigMap PodTemplates", e); + } + } + } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index 84dad6e0f..cacacee84 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -129,9 +129,7 @@ public void eventReceived(Action action, ConfigMap configMap) { private void onInitialConfigMaps(ConfigMapList configMaps) { if (configMaps == null) return; - if (PodTemplateUtils.trackedPodTemplates == null) { - PodTemplateUtils.trackedPodTemplates = new ConcurrentHashMap<>(configMaps.getItems().size()); - } + List items = configMaps.getItems(); if (items != null) { for (ConfigMap configMap : items) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index a9c8db854..62838050f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -52,9 +52,9 @@ protected void doRun() throws Exception { buildWatcher.start(); watchers.add(buildWatcher); - ConfigMapWatcher configMapWatcher = new ConfigMapWatcher(namespace); - configMapWatcher.start(); - watchers.add(configMapWatcher); + ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace); + configMapInformer.start(); + watchers.add(configMapInformer); ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespace); imageStreamWatcher.start(); @@ -81,7 +81,7 @@ protected void doRun() throws Exception { } } } catch (Exception e) { - logger.severe("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + e); + logger.severe(e.toString()); } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java index ab5a7a7ad..5091e788b 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java @@ -41,7 +41,7 @@ public class PodTemplateUtils { static final String SLAVE_LABEL = "slave-label"; private static final String SPECIAL_IST_PREFIX = "imagestreamtag:"; private static final int SPECIAL_IST_PREFIX_IDX = SPECIAL_IST_PREFIX.length(); - protected static ConcurrentHashMap> trackedPodTemplates = new ConcurrentHashMap>(); + protected final static ConcurrentHashMap> trackedPodTemplates = new ConcurrentHashMap>(); protected static ConcurrentHashMap podTemplateToApiType = new ConcurrentHashMap(); protected static boolean hasOneAndOnlyOneWithSomethingAfter(String str, String substr) { From 09038cc248e082717b37f08ed7df175fa7c18f9e Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Thu, 8 Apr 2021 12:46:47 +0200 Subject: [PATCH 10/22] Cleanup PodTemplateUtils to remove reference to BaseWatcher. Clean logs in ConfigMapInformer --- .../openshiftsync/ConfigMapInformer.java | 41 ++++++++++--------- .../openshiftsync/ConfigMapWatcher.java | 9 ++-- .../openshiftsync/ImageStreamWatcher.java | 6 +-- .../openshiftsync/PodTemplateUtils.java | 31 +++++++------- 4 files changed, 43 insertions(+), 44 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java index 415313c6f..3cfd14067 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -23,13 +23,11 @@ import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.trackedPodTemplates; -import static java.util.logging.Level.SEVERE; import java.util.List; +import java.util.logging.Logger; import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.ConfigMap; @@ -41,7 +39,7 @@ import io.fabric8.openshift.client.OpenShiftClient; public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler { - private static final Logger LOGGER = LoggerFactory.getLogger(ConfigMapWatcher.class); + private final Logger LOGGER = Logger.getLogger(getClass().getName()); private static final long RESYNC_PERIOD = 30 * 1000L; @@ -56,14 +54,14 @@ public int getListIntervalInSeconds() { } public void start() { - LOGGER.info("Now handling startup config maps for {} !!", namespace); - LOGGER.trace("listing ConfigMap resources"); + LOGGER.info("Now handling startup config maps for {} !!" + namespace); + LOGGER.fine("listing ConfigMap resources"); OpenShiftClient client = getAuthenticatedOpenShiftClient(); SharedInformerFactory informerFactory = client.informers(); SharedIndexInformer informer = informerFactory.inNamespace(namespace) .sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); informer.addEventHandler(this); - LOGGER.info("ConfigMap informer started for namespace: {}", namespace); + LOGGER.info("ConfigMap informer started for namespace: {}" + namespace); Lister list = new Lister<>(informer.getIndexer(), namespace); onInit(list.list()); informerFactory.startAllRegisteredInformers(); @@ -71,7 +69,7 @@ public void start() { @Override public void onAdd(ConfigMap obj) { - LOGGER.info("ConfigMap informer received add event for: {}", obj); + LOGGER.info("ConfigMap informer received add event for: {}" + obj); List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); boolean hasSlaves = slavesFromCM.size() > 0; if (hasSlaves) { @@ -79,30 +77,33 @@ public void onAdd(ConfigMap obj) { String uid = metadata.getUid(); String cmname = metadata.getName(); String namespace = metadata.getNamespace(); - processSlavesForAddEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForAddEvent( slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } } @Override public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { - LOGGER.info("ConfigMap informer received update event for: {} to: {}", oldObj, newObj); -// List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, newObj); -// ObjectMeta metadata = newObj.getMetadata(); -// String uid = metadata.getUid(); -// String cmname = metadata.getName(); -// String namespace = metadata.getNamespace(); -// processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + LOGGER.fine("ConfigMap informer received update event for: {} to: {}" + oldObj + newObj); + String oldResourceVersion = oldObj.getMetadata() != null ? oldObj.getMetadata().getResourceVersion() : null; + String newResourceVersion = newObj.getMetadata() != null ? newObj.getMetadata().getResourceVersion() : null; + LOGGER.info("Update event received resource versions: {} to: {}" + oldResourceVersion + newResourceVersion); + List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, newObj); + ObjectMeta metadata = newObj.getMetadata(); + String uid = metadata.getUid(); + String cmname = metadata.getName(); + String namespace = metadata.getNamespace(); + processSlavesForModifyEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } @Override public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { - LOGGER.info("ConfigMap informer received delete event for: {}", obj); + LOGGER.info("ConfigMap informer received delete event for: {}" + obj); List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); ObjectMeta metadata = obj.getMetadata(); String uid = metadata.getUid(); String cmname = metadata.getName(); String namespace = metadata.getNamespace(); - processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForDeleteEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } private void onInit(List list) { @@ -120,12 +121,12 @@ private void addPodTemplateFromConfigMap(ConfigMap configMap) { List templates = podTemplatesFromConfigMap(this, configMap); trackedPodTemplates.put(uid, templates); for (PodTemplate podTemplate : templates) { - LOGGER.info("Adding PodTemplate {}", podTemplate); + LOGGER.info("Adding PodTemplate {}" + podTemplate); addPodTemplate(podTemplate); } } } catch (Exception e) { - LOGGER.error("Failed to update ConfigMap PodTemplates", e); + LOGGER.severe("Failed to update ConfigMap PodTemplates" + e); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index cacacee84..dbf387cd6 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -24,7 +24,6 @@ import static java.util.logging.Level.WARNING; import java.util.List; -import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Logger; import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; @@ -104,14 +103,14 @@ public void eventReceived(Action action, ConfigMap configMap) { switch (action) { case ADDED: if (hasSlaves) { - processSlavesForAddEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForAddEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); } break; case MODIFIED: - processSlavesForModifyEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForModifyEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); break; case DELETED: - processSlavesForDeleteEvent(this, slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + processSlavesForDeleteEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); break; case ERROR: LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received error event "); @@ -129,7 +128,7 @@ public void eventReceived(Action action, ConfigMap configMap) { private void onInitialConfigMaps(ConfigMapList configMaps) { if (configMaps == null) return; - + List items = configMaps.getItems(); if (items != null) { for (ConfigMap configMap : items) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java index 1685071d2..9175af755 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java @@ -102,13 +102,13 @@ public void eventReceived(Action action, ImageStream imageStream) { String namespace = metadata.getNamespace(); switch (action) { case ADDED: - processSlavesForAddEvent(this, slaves, IMAGESTREAM_TYPE, uid, name, namespace); + processSlavesForAddEvent(slaves, IMAGESTREAM_TYPE, uid, name, namespace); break; case MODIFIED: - processSlavesForModifyEvent(this, slaves, IMAGESTREAM_TYPE, uid, name, namespace); + processSlavesForModifyEvent(slaves, IMAGESTREAM_TYPE, uid, name, namespace); break; case DELETED: - processSlavesForDeleteEvent(this, slaves, IMAGESTREAM_TYPE, uid, name, namespace); + processSlavesForDeleteEvent(slaves, IMAGESTREAM_TYPE, uid, name, namespace); break; case ERROR: logger.warning("watch for imageStream " + ns + "/" + name + " received error event "); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java index 5091e788b..7a4d17b08 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java @@ -161,8 +161,7 @@ public static synchronized void addPodTemplate(PodTemplate podTemplate) { } } - protected static void purgeTemplates(BaseWatcher baseWatcher, String type, String uid, String apiObjName, - String namespace) { + protected static void purgeTemplates(String type, String uid, String apiObjName, String namespace) { LOGGER.info("Purging PodTemplates for from Configmap with Uid " + uid); for (PodTemplate podTemplate : trackedPodTemplates.get(uid)) { // we should not have included any pod templates we did not @@ -185,7 +184,7 @@ protected static void trackPodTemplates(String uid, List podTemplat // Adds PodTemplate to the List correspoding to the ConfigMap of // given uid and Deletes from Jenkins - protected static List onlyTrackPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, + protected static List onlyTrackPodTemplate(String type, String apiObjName, String namespace, List podTemplates, PodTemplate podTemplate) { String name = podTemplate.getName(); // we allow configmap overrides of maven and nodejs, but not imagestream ones @@ -212,7 +211,7 @@ protected static List onlyTrackPodTemplate(BaseWatcher baseWatcher, } // Adds PodTemplate from Jenkins - protected static void addPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, String namespace, + protected static void addPodTemplate(String type, String apiObjName, String namespace, List podTemplates, PodTemplate podTemplate) { String name = podTemplate.getName(); // we allow configmap overrides of maven and nodejs, but not imagestream ones @@ -428,17 +427,17 @@ protected static boolean hasSlaveLabelOrAnnotation(Map map) { return false; } - protected static void processSlavesForAddEvent(BaseWatcher watcher, List slaves, String type, - String uid, String apiObjName, String namespace) { + protected static void processSlavesForAddEvent(List slaves, String type, String uid, String apiObjName, + String namespace) { LOGGER.info("Adding PodTemplate(s) for "); List finalSlaveList = new ArrayList(); for (PodTemplate podTemplate : slaves) { - addPodTemplate(watcher, type, apiObjName, namespace, finalSlaveList, podTemplate); + addPodTemplate(type, apiObjName, namespace, finalSlaveList, podTemplate); } updateTrackedPodTemplatesMap(uid, finalSlaveList); } - protected static void processSlavesForModifyEvent(BaseWatcher watcher, List slaves, String type, + protected static void processSlavesForModifyEvent(List slaves, String type, String uid, String apiObjName, String namespace) { LOGGER.info("Modifying PodTemplates"); boolean alreadyTracked = trackedPodTemplates.containsKey(uid); @@ -454,38 +453,38 @@ protected static void processSlavesForModifyEvent(BaseWatcher watcher, List

podTemplatesToTrack = new ArrayList(); - purgeTemplates(watcher, type, uid, apiObjName, namespace); + purgeTemplates(type, uid, apiObjName, namespace); for (PodTemplate pt : slaves) { - podTemplatesToTrack = PodTemplateUtils.onlyTrackPodTemplate(watcher, type, apiObjName, namespace, + podTemplatesToTrack = PodTemplateUtils.onlyTrackPodTemplate(type, apiObjName, namespace, podTemplatesToTrack, pt); } updateTrackedPodTemplatesMap(uid, podTemplatesToTrack); for (PodTemplate podTemplate : podTemplatesToTrack) { // still do put here in case this is a new item from the last // update on this ConfigMap/ImageStream - addPodTemplate(watcher, type, null, null, null, podTemplate); + addPodTemplate(type, null, null, null, podTemplate); } } else { // The user modified the configMap to no longer be a // jenkins-slave. - purgeTemplates(watcher, type, uid, apiObjName, namespace); + purgeTemplates(type, uid, apiObjName, namespace); } } else { if (hasSlaves) { List finalSlaveList = new ArrayList(); for (PodTemplate podTemplate : slaves) { // The user modified the api obj to be a jenkins-slave - addPodTemplate(watcher, type, apiObjName, namespace, finalSlaveList, podTemplate); + addPodTemplate(type, apiObjName, namespace, finalSlaveList, podTemplate); } updateTrackedPodTemplatesMap(uid, finalSlaveList); } } } - protected static void processSlavesForDeleteEvent(BaseWatcher watcher, List slaves, String type, - String uid, String apiObjName, String namespace) { + protected static void processSlavesForDeleteEvent(List slaves, String type, String uid, + String apiObjName, String namespace) { if (trackedPodTemplates.containsKey(uid)) { - purgeTemplates(watcher, type, uid, apiObjName, namespace); + purgeTemplates(type, uid, apiObjName, namespace); } } From 5b3b29a68c69ba43ad87220643a29f334638fe83 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Thu, 8 Apr 2021 21:59:13 +0200 Subject: [PATCH 11/22] Migrated all watchers into informers --- .../openshiftsync/BuildComparator.java | 39 ++ .../openshiftsync/BuildConfigInformer.java | 264 ++++++++++ .../BuildConfigSecretToCredentialsMap.java | 36 -- .../jenkins/openshiftsync/BuildInformer.java | 399 +++++++++++++++ .../jenkins/openshiftsync/BuildWatcher.java | 2 +- .../openshiftsync/ConfigMapInformer.java | 93 ++-- .../openshiftsync/ConfigMapWatcher.java | 17 +- .../jenkins/openshiftsync/Constants.java | 14 +- .../openshiftsync/CredentialsUtils.java | 461 ++++++++++-------- .../GlobalPluginConfigurationTimerTask.java | 31 +- .../openshiftsync/ImageStreamInformer.java | 134 +++++ .../openshiftsync/ImageStreamWatcher.java | 12 +- .../jenkins/openshiftsync/OpenShiftUtils.java | 10 + .../openshiftsync/PodTemplateUtils.java | 103 ++-- .../jenkins/openshiftsync/SecretInformer.java | 110 +++++ .../jenkins/openshiftsync/SecretWatcher.java | 41 +- 16 files changed, 1378 insertions(+), 388 deletions(-) create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java delete mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java new file mode 100644 index 000000000..ac62ebc2d --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java @@ -0,0 +1,39 @@ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER; + +import java.util.Comparator; +import java.util.logging.Level; +import java.util.logging.Logger; + +import io.fabric8.openshift.api.model.Build; + +public class BuildComparator implements Comparator { + private static final Logger LOGGER = Logger.getLogger(BuildInformer.class.getName()); + + @Override + public int compare(Build b1, Build b2) { + if (b1.getMetadata().getAnnotations() == null + || b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { + LOGGER.warning("cannot compare build " + b1.getMetadata().getName() + " from namespace " + + b1.getMetadata().getNamespace() + ", has bad annotations: " + b1.getMetadata().getAnnotations()); + return 0; + } + if (b2.getMetadata().getAnnotations() == null + || b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { + LOGGER.warning("cannot compare build " + b2.getMetadata().getName() + " from namespace " + + b2.getMetadata().getNamespace() + ", has bad annotations: " + b2.getMetadata().getAnnotations()); + return 0; + } + int rc = 0; + try { + rc = Long.compare( + + Long.parseLong(b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)), + Long.parseLong(b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER))); + } catch (Throwable t) { + LOGGER.log(Level.FINE, "onInitialBuilds", t); + } + return rc; + } +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java new file mode 100644 index 000000000..009ab6565 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java @@ -0,0 +1,264 @@ +/** + * Copyright (C) 2016 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuildConfig; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.logging.Level.SEVERE; + +import java.util.List; +import java.util.logging.Logger; + +import hudson.model.Job; +import hudson.security.ACL; +import hudson.triggers.SafeTimerTask; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.kubernetes.client.informers.cache.Lister; +import io.fabric8.openshift.api.model.BuildConfig; +import io.fabric8.openshift.api.model.BuildConfigList; +import io.fabric8.openshift.api.model.BuildList; +import io.fabric8.openshift.client.OpenShiftClient; +import jenkins.model.Jenkins; +import jenkins.security.NotReallyRoleSensitiveCallable; +import jenkins.util.Timer; + +/** + * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we + * ensure there is a suitable Jenkins Job object defined with the correct + * configuration + */ +public class BuildConfigInformer extends BuildConfigWatcher implements ResourceEventHandler { + + private final static Logger LOGGER = Logger.getLogger(BuildConfigInformer.class.getName()); + private SharedIndexInformer informer; + + public BuildConfigInformer(String namespace) { + super(namespace); + } + + @Override + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getBuildConfigListInterval(); + } + + public void start() { + LOGGER.info("Starting BuildConfig informer for {} !!" + namespace); + LOGGER.fine("listing BuildConfig resources"); + SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); + this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getListIntervalInSeconds()); + informer.addEventHandler(this); + factory.startAllRegisteredInformers(); + LOGGER.info("BuildConfig informer started for namespace: {}" + namespace); + // waitInformerSync(informer); + BuildConfigList list = getOpenshiftClient().buildConfigs().inNamespace(namespace).list(); + onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping secret informer {} !!" + namespace); + this.informer.stop(); + } + + @Override + public void onAdd(BuildConfig obj) { + LOGGER.fine("BuildConfig informer received add event for: {}" + obj); + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("BuildConfig informer received add event for: {}" + name); + upsertJob(obj); + } + + @Override + public void onUpdate(BuildConfig oldObj, BuildConfig newObj) { + LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldObj + newObj); + modifyEventToJenkinsJob(newObj); + } + + @Override + public void onDelete(BuildConfig obj, boolean deletedFinalStateUnknown) { + LOGGER.info("BuildConfig informer received delete event for: {}" + obj); + deleteEventToJenkinsJob(obj); + } + + @SuppressWarnings({ "deprecation", "serial" }) + private void cleanupJobsMissingStartBuildEvent(BuildConfig buildConfig) throws Exception { + boolean buildConfigNameNotNull = buildConfig != null && buildConfig.getMetadata() != null; + String name = buildConfigNameNotNull ? buildConfig.getMetadata().getName() : "null"; + // we employ impersonation here to insure we have "full access"; + // for example, can we actually + // read in jobs defs for verification? without impersonation here + // we would get null back when trying to read in the job from disk + ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { + @Override + public Void call() throws Exception { + // if bc event came after build events, let's poke the BuildWatcher builds with + // no BC list to create job runs + BuildWatcher.flushBuildsWithNoBCList(); + // now, if the build event was lost and never received, builds will stay in new + // for 5 minutes ... + // let's launch a background thread to clean them up at a quicker interval than + // the default 5 minute general build + // relist function + Runnable backupBuildQuery = new SafeTimerTask() { + @Override + public void doRun() { + if (!CredentialsUtils.hasCredentials()) { + LOGGER.fine("No Openshift Token credential defined."); + return; + } + final OpenShiftClient client = getAuthenticatedOpenShiftClient(); + BuildList buildList = client.builds().inNamespace(namespace) + .withField(OPENSHIFT_BUILD_STATUS_FIELD, NEW) + .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, name).list(); + if (buildList.getItems().size() > 0) { + LOGGER.info("build backup query for " + name + " found new builds"); + BuildWatcher.onInitialBuilds(buildList); + } + } + }; + Timer.get().schedule(backupBuildQuery, 10 * 1000, MILLISECONDS); + return null; + } + }); + } + + @SuppressWarnings({ "deprecation" }) + private void upsertJob(final BuildConfig buildConfig) { + if (isPipelineStrategyBuildConfig(buildConfig)) { + // sync on intern of name should guarantee sync on same actual obj + synchronized (buildConfig.getMetadata().getUid().intern()) { + try { + ACL.impersonate(ACL.SYSTEM, new JobProcessor(this, buildConfig)); + } catch (Exception e) { + LOGGER.severe("Error while trying to insert JobRun: " + e); + + } + } + } + try { + cleanupJobsMissingStartBuildEvent(buildConfig); + } catch (Exception e) { + LOGGER.severe("Error while trying to clean up orphan JobRuns: " + e); + } + } + + private void modifyEventToJenkinsJob(BuildConfig buildConfig) { + if (isPipelineStrategyBuildConfig(buildConfig)) { + upsertJob(buildConfig); + return; + } + + // no longer a Jenkins build so lets delete it if it exists + deleteEventToJenkinsJob(buildConfig); + } + + // innerDeleteEventToJenkinsJob is the actual delete logic at the heart of + // deleteEventToJenkinsJob that is either in a sync block or not based on the + // presence of a BC uid + @SuppressWarnings({ "deprecation", "serial" }) + private void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { + Job job = getJobFromBuildConfig(buildConfig); + if (job != null) { + // employ intern of the BC UID to facilitate sync'ing on the same + // actual object + synchronized (buildConfig.getMetadata().getUid().intern()) { + ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { + @Override + public Void call() throws Exception { + try { + deleteInProgress( + buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); + job.delete(); + } finally { + removeJobWithBuildConfig(buildConfig); + Jenkins.getActiveInstance().rebuildDependencyGraphAsync(); + deleteCompleted( + buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); + } + return null; + } + }); + // if the bc has a source secret it is possible it should + // be deleted as well (called function will cross reference + // with secret watch) + CredentialsUtils.deleteSourceCredentials(buildConfig); + } + + } + + } + + private void onInit(List list) { + for (BuildConfig buildConfig : list) { + try { + upsertJob(buildConfig); + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to update job", e); + } + } + // poke the BuildWatcher builds with no BC list and see if we + // can create job + // runs for premature builds + BuildWatcher.flushBuildsWithNoBCList(); + } + + // in response to receiving an openshift delete build config event, this + // method will drive + // the clean up of the Jenkins job the build config is mapped one to one + // with; as part of that + // clean up it will synchronize with the build event watcher to handle build + // config + // delete events and build delete events that arrive concurrently and in a + // nondeterministic + // order + private void deleteEventToJenkinsJob(final BuildConfig buildConfig) { + if (buildConfig != null) { + String bcUid = buildConfig.getMetadata().getUid(); + if (bcUid != null && bcUid.length() > 0) { + // employ intern of the BC UID to facilitate sync'ing on the same + // actual object + bcUid = bcUid.intern(); + synchronized (bcUid) { + try { + innerDeleteEventToJenkinsJob(buildConfig); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + return; + } + } + // uid should not be null / empty, but just in case, still clean up + try { + innerDeleteEventToJenkinsJob(buildConfig); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java deleted file mode 100644 index 14d6386a8..000000000 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright (C) 2017 Red Hat, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.fabric8.jenkins.openshiftsync; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -public class BuildConfigSecretToCredentialsMap { - - private static Map buildConfigSecretToCredentialMap = new ConcurrentHashMap(); - - private BuildConfigSecretToCredentialsMap() { - } - - static void linkBCSecretToCredential(String bc, String credential) { - buildConfigSecretToCredentialMap.put(bc, credential); - } - - static String unlinkBCSecretToCrendential(String bc) { - return buildConfigSecretToCredentialMap.remove(bc); - } - -} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java new file mode 100644 index 000000000..f4095d7a2 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java @@ -0,0 +1,399 @@ +/** + * Copyright (C) 2016 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Annotations.BUILDCONFIG_NAME; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfigNameNamespace; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.cancelBuild; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.deleteRun; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.getJobFromBuild; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.handleBuildList; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.triggerJob; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.apache.commons.lang.StringUtils; +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; + +import hudson.security.ACL; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.api.model.Build; +import io.fabric8.openshift.api.model.BuildConfig; +import io.fabric8.openshift.api.model.BuildList; +import io.fabric8.openshift.api.model.BuildStatus; +import io.fabric8.openshift.client.OpenShiftClient; +import jenkins.model.Jenkins; +import jenkins.security.NotReallyRoleSensitiveCallable; + +public class BuildInformer extends BuildWatcher implements ResourceEventHandler { + + private static final Logger LOGGER = Logger.getLogger(BuildInformer.class.getName()); + private final static BuildComparator BUILD_COMPARATOR = new BuildComparator(); + + // now that listing interval is 5 minutes (used to be 10 seconds), we have + // seen timing windows where if the build watch events come before build config + // watch events when both are created in a simultaneous fashion, there is an up + // to 5 minutes delay before the job run gets kicked off started seeing + // duplicate builds getting kicked off so quit depending on so moved off of + // concurrent hash set to concurrent hash map using namepace/name key + + private SharedIndexInformer informer; + + public BuildInformer(String namespace) { + super(namespace); + } + + @Override + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval(); + } + + public void start() { + LOGGER.info("Starting Build informer for {} !!" + namespace); + LOGGER.fine("Listing Build resources"); + SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); + this.informer = factory.sharedIndexInformerFor(Build.class, getListIntervalInSeconds()); + this.informer.addEventHandler(this); + factory.startAllRegisteredInformers(); + LOGGER.info("Build informer started for namespace: {}" + namespace); + BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list(); + onInit(list.getItems()); + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + start(); + } + } + + @Override + public void onAdd(Build obj) { + LOGGER.fine("Build informer received add event for: {}" + obj); + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("Build informer received add event for: {}" + name); + addEventToJenkinsJobRun(obj); + } + + @Override + public void onUpdate(Build oldObj, Build newObj) { + LOGGER.info("Build informer received update event for: {} to: {}" + oldObj + newObj); + modifyEventToJenkinsJobRun(newObj); + } + + @Override + public void onDelete(Build obj, boolean deletedFinalStateUnknown) { + LOGGER.info("Build informer received delete event for: {}" + obj); + deleteEventToJenkinsJobRun(obj); + } + + public static void onInit(List list) { + Collections.sort(list, BUILD_COMPARATOR); + // We need to sort the builds into their build configs so we can + // handle build run policies correctly. + Map buildConfigMap = new HashMap<>(); + Map> buildConfigBuildMap = new HashMap<>(list.size()); + mapBuildToBuildConfigs(list, buildConfigMap, buildConfigBuildMap); + mapBuildsToBuildConfigs(buildConfigBuildMap); + reconcileRunsAndBuilds(); + } + + private static void mapBuildsToBuildConfigs(Map> buildConfigBuildMap) { + // Now handle the builds. + for (Map.Entry> buildConfigBuilds : buildConfigBuildMap.entrySet()) { + BuildConfig bc = buildConfigBuilds.getKey(); + if (bc.getMetadata() == null) { + // Should never happen but let's be safe... + continue; + } + WorkflowJob job = getJobFromBuildConfig(bc); + if (job == null) { + List builds = buildConfigBuilds.getValue(); + for (Build b : builds) { + LOGGER.info("skipping listed new build " + b.getMetadata().getName() + " no job at this time"); + addBuildToNoBCList(b); + } + continue; + } + BuildConfigProjectProperty bcp = job.getProperty(BuildConfigProjectProperty.class); + if (bcp == null) { + List builds = buildConfigBuilds.getValue(); + for (Build b : builds) { + LOGGER.info("skipping listed new build " + b.getMetadata().getName() + " no prop at this time"); + addBuildToNoBCList(b); + } + continue; + } + List builds = buildConfigBuilds.getValue(); + handleBuildList(job, builds, bcp); + } + } + + private static void mapBuildToBuildConfigs(List list, Map buildConfigMap, + Map> buildConfigBuildMap) { + for (Build b : list) { + if (!OpenShiftUtils.isPipelineStrategyBuild(b)) { + continue; + } + String buildConfigName = b.getStatus().getConfig().getName(); + if (StringUtils.isEmpty(buildConfigName)) { + continue; + } + String namespace = b.getMetadata().getNamespace(); + String buildConfigNamespacedName = namespace + "/" + buildConfigName; + BuildConfig bc = buildConfigMap.get(buildConfigNamespacedName); + if (bc == null) { + final OpenShiftClient client = getAuthenticatedOpenShiftClient(); + bc = client.buildConfigs().inNamespace(namespace).withName(buildConfigName).get(); + if (bc == null) { + // if the bc is not there via a REST get, then it is not + // going to be, and we are not handling manual creation + // of pipeline build objects, so don't bother with "no bc list" + continue; + } + buildConfigMap.put(buildConfigNamespacedName, bc); + } + List bcBuilds = buildConfigBuildMap.get(bc); + if (bcBuilds == null) { + bcBuilds = new ArrayList<>(); + buildConfigBuildMap.put(bc, bcBuilds); + } + bcBuilds.add(b); + } + } + + private static void modifyEventToJenkinsJobRun(Build build) { + BuildStatus status = build.getStatus(); + if (status != null && isCancellable(status) && isCancelled(status)) { + WorkflowJob job = getJobFromBuild(build); + if (job != null) { + cancelBuild(job, build); + } else { + removeBuildFromNoBCList(build); + } + } else { + // see if any pre-BC cached builds can now be flushed + flushBuildsWithNoBCList(); + } + } + + public static boolean addEventToJenkinsJobRun(Build build) { + // should have been caught upstack, but just in case since public method + if (!OpenShiftUtils.isPipelineStrategyBuild(build)) + return false; + BuildStatus status = build.getStatus(); + if (status != null) { + if (isCancelled(status)) { + updateOpenShiftBuildPhase(build, CANCELLED); + return false; + } + if (!isNew(status)) { + return false; + } + } + + WorkflowJob job = getJobFromBuild(build); + if (job != null) { + try { + return triggerJob(job, build); + } catch (IOException e) { + LOGGER.severe("Error while trying to trigger Job: " + e); + } + } + LOGGER.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time"); + addBuildToNoBCList(build); + return false; + } + + private static void addBuildToNoBCList(Build build) { + // should have been caught upstack, but just in case since public method + if (!OpenShiftUtils.isPipelineStrategyBuild(build)) + return; + try { + buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build); + } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException + | NullPointerException e) { + LOGGER.log(Level.WARNING, "Failed to add item " + build.getMetadata().getName(), e); + } + } + + private static void removeBuildFromNoBCList(Build build) { + buildsWithNoBCList.remove(build.getMetadata().getNamespace() + build.getMetadata().getName()); + } + + // trigger any builds whose watch events arrived before the + // corresponding build config watch events + public static void flushBuildsWithNoBCList() { + + ConcurrentHashMap clone = null; + synchronized (buildsWithNoBCList) { + clone = new ConcurrentHashMap(buildsWithNoBCList); + } + boolean anyRemoveFailures = false; + for (Build build : clone.values()) { + WorkflowJob job = getJobFromBuild(build); + if (job != null) { + try { + LOGGER.info("triggering job run for previously skipped build " + build.getMetadata().getName()); + triggerJob(job, build); + } catch (IOException e) { + LOGGER.log(Level.WARNING, "flushBuildsWithNoBCList", e); + } + try { + synchronized (buildsWithNoBCList) { + removeBuildFromNoBCList(build); + } + } catch (Throwable t) { + // TODO + // concurrent mod exceptions are not suppose to occur + // with concurrent hash set; this try/catch with log + // and the anyRemoveFailures post processing is a bit + // of safety paranoia until this proves to be true + // over extended usage ... probably can remove at some + // point + anyRemoveFailures = true; + LOGGER.log(Level.WARNING, "flushBuildsWithNoBCList", t); + } + } + + synchronized (buildsWithNoBCList) { + if (anyRemoveFailures && buildsWithNoBCList.size() > 0) { + buildsWithNoBCList.clear(); + } + + } + } + } + + // innerDeleteEventToJenkinsJobRun is the actual delete logic at the heart of + // deleteEventToJenkinsJobRun that is either in a sync block or not based on the + // presence of a BC uid + @SuppressWarnings({ "deprecation", "serial" }) + private static void innerDeleteEventToJenkinsJobRun(final Build build) throws Exception { + final WorkflowJob job = getJobFromBuild(build); + if (job != null) { + ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { + @Override + public Void call() throws Exception { + cancelBuild(job, build, true); + return null; + } + }); + } else { + // in case build was created and deleted quickly, prior to seeing BC + // event, clear out from pre-BC cache + removeBuildFromNoBCList(build); + } + deleteRun(job, build); + } + + // in response to receiving an openshift delete build event, this method + // will drive the clean up of the Jenkins job run the build is mapped one to one + // with; as part of that clean up it will synchronize with the build config + // event watcher to handle build config delete events and build delete events + // that arrive concurrently and in a nondeterministic order + private static void deleteEventToJenkinsJobRun(final Build build) { + List ownerRefs = build.getMetadata().getOwnerReferences(); + String bcUid = null; + for (OwnerReference ref : ownerRefs) { + if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null && ref.getUid().length() > 0) { + // employ intern to facilitate sync'ing on the same actual object + bcUid = ref.getUid().intern(); + synchronized (bcUid) { + // if entire job already deleted via bc delete, just return + if (getJobFromBuildConfigNameNamespace(getAnnotation(build, BUILDCONFIG_NAME), + build.getMetadata().getNamespace()) == null) { + return; + } + try { + innerDeleteEventToJenkinsJobRun(build); + } catch (Exception e) { + LOGGER.severe("Error while trying to delete JobRun: " + e); + } + return; + } + } + } + // otherwise, if something odd is up and there is no parent BC, just clean up + try { + innerDeleteEventToJenkinsJobRun(build); + } catch (Exception e) { + LOGGER.severe("Error while trying to delete JobRun: " + e); + } + } + + /** + * Reconciles Jenkins job runs and OpenShift builds + * + * Deletes all job runs that do not have an associated build in OpenShift + */ + @SuppressWarnings("deprecation") + private static void reconcileRunsAndBuilds() { + LOGGER.fine("Reconciling job runs and builds"); + List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); + for (WorkflowJob job : jobs) { + BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); + if (property != null) { + String ns = property.getNamespace(); + String name = property.getName(); + if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) { + LOGGER.fine("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list(); + for (WorkflowRun run : job.getBuilds()) { + boolean found = false; + BuildCause cause = run.getCause(BuildCause.class); + for (Build build : builds.getItems()) { + if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) { + found = true; + break; + } + } + if (!found) { + deleteRun(run); + } + } + } + } + + } + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java index 1fe5b5c56..9afa8d732 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java @@ -81,7 +81,7 @@ public class BuildWatcher extends BaseWatcher { // started seeing duplicate builds getting kicked off so quit depending on // so moved off of concurrent hash set to concurrent hash map using // namepace/name key - private static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap(); + protected static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap(); @SuppressFBWarnings("EI_EXPOSE_REP2") public BuildWatcher(String namespace) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java index 3cfd14067..449b8edbc 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -15,35 +15,29 @@ */ package io.fabric8.jenkins.openshiftsync; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.configMapContainsSlave; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.podTemplatesFromConfigMap; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.trackedPodTemplates; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.CONFIGMAP; import java.util.List; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ConfigMapList; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; -import io.fabric8.kubernetes.client.informers.cache.Lister; -import io.fabric8.openshift.client.OpenShiftClient; public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler { - private final Logger LOGGER = Logger.getLogger(getClass().getName()); + private final static Logger LOGGER = Logger.getLogger(ConfigMapWatcher.class.getName()); private static final long RESYNC_PERIOD = 30 * 1000L; + private SharedIndexInformer informer; - @SuppressFBWarnings("EI_EXPOSE_REP2") public ConfigMapInformer(String namespace) { super(namespace); } @@ -54,31 +48,32 @@ public int getListIntervalInSeconds() { } public void start() { - LOGGER.info("Now handling startup config maps for {} !!" + namespace); + LOGGER.info("Starting configMap informer for {} !!" + namespace); LOGGER.fine("listing ConfigMap resources"); - OpenShiftClient client = getAuthenticatedOpenShiftClient(); - SharedInformerFactory informerFactory = client.informers(); - SharedIndexInformer informer = informerFactory.inNamespace(namespace) - .sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); + SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); + this.informer = factory.sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("ConfigMap informer started for namespace: {}" + namespace); - Lister list = new Lister<>(informer.getIndexer(), namespace); - onInit(list.list()); - informerFactory.startAllRegisteredInformers(); + ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list(); + onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping configMap informer {} !!" + namespace); + this.informer.stop(); } @Override public void onAdd(ConfigMap obj) { - LOGGER.info("ConfigMap informer received add event for: {}" + obj); - List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); - boolean hasSlaves = slavesFromCM.size() > 0; - if (hasSlaves) { - ObjectMeta metadata = obj.getMetadata(); - String uid = metadata.getUid(); - String cmname = metadata.getName(); - String namespace = metadata.getNamespace(); - processSlavesForAddEvent( slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); - } + LOGGER.fine("ConfigMap informer received add event for: {}" + obj); + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("ConfigMap informer received add event for: {}" + name); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); + String uid = metadata.getUid(); + String namespace = metadata.getNamespace(); + PodTemplateUtils.addAgents(podTemplates, CONFIGMAP, uid, name, namespace); } @Override @@ -87,47 +82,41 @@ public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { String oldResourceVersion = oldObj.getMetadata() != null ? oldObj.getMetadata().getResourceVersion() : null; String newResourceVersion = newObj.getMetadata() != null ? newObj.getMetadata().getResourceVersion() : null; LOGGER.info("Update event received resource versions: {} to: {}" + oldResourceVersion + newResourceVersion); - List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, newObj); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(newObj); ObjectMeta metadata = newObj.getMetadata(); String uid = metadata.getUid(); - String cmname = metadata.getName(); + String name = metadata.getName(); String namespace = metadata.getNamespace(); - processSlavesForModifyEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace); } @Override public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { - LOGGER.info("ConfigMap informer received delete event for: {}" + obj); - List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, obj); + LOGGER.fine("ConfigMap informer received delete event for: {}" + obj); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); ObjectMeta metadata = obj.getMetadata(); String uid = metadata.getUid(); - String cmname = metadata.getName(); + String name = metadata.getName(); String namespace = metadata.getNamespace(); - processSlavesForDeleteEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + PodTemplateUtils.deleteAgents(podTemplates, CONFIGMAP, uid, name, namespace); } private void onInit(List list) { if (list != null) { for (ConfigMap configMap : list) { - addPodTemplateFromConfigMap(configMap); + PodTemplateUtils.addPodTemplateFromConfigMap(configMap); } } } - private void addPodTemplateFromConfigMap(ConfigMap configMap) { - try { - String uid = configMap.getMetadata().getUid(); - if (configMapContainsSlave(configMap) && !trackedPodTemplates.containsKey(uid)) { - List templates = podTemplatesFromConfigMap(this, configMap); - trackedPodTemplates.put(uid, templates); - for (PodTemplate podTemplate : templates) { - LOGGER.info("Adding PodTemplate {}" + podTemplate); - addPodTemplate(podTemplate); - } + private void waitInformerSync(SharedIndexInformer informer) { + while (!informer.hasSynced()) { + LOGGER.info("Waiting informer to sync for " + namespace); + try { + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + LOGGER.info("Interrupted waiting thread: " + e); } - } catch (Exception e) { - LOGGER.severe("Failed to update ConfigMap PodTemplates" + e); } } - } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index dbf387cd6..b47cf9440 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -17,9 +17,10 @@ import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.CONFIGMAP; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.deleteAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents; import static java.util.logging.Level.SEVERE; import static java.util.logging.Level.WARNING; @@ -95,7 +96,7 @@ public void eventReceived(Action action, ConfigMap configMap) { return; } try { - List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); + List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(configMap); boolean hasSlaves = slavesFromCM.size() > 0; String uid = configMap.getMetadata().getUid(); String cmname = configMap.getMetadata().getName(); @@ -103,14 +104,14 @@ public void eventReceived(Action action, ConfigMap configMap) { switch (action) { case ADDED: if (hasSlaves) { - processSlavesForAddEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + addAgents(slavesFromCM, CONFIGMAP, uid, cmname, namespace); } break; case MODIFIED: - processSlavesForModifyEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + updateAgents(slavesFromCM, CONFIGMAP, uid, cmname, namespace); break; case DELETED: - processSlavesForDeleteEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace); + deleteAgents(slavesFromCM, CONFIGMAP, uid, cmname, namespace); break; case ERROR: LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received error event "); @@ -135,7 +136,7 @@ private void onInitialConfigMaps(ConfigMapList configMaps) { try { if (PodTemplateUtils.configMapContainsSlave(configMap) && !PodTemplateUtils.trackedPodTemplates.containsKey(configMap.getMetadata().getUid())) { - List templates = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap); + List templates = PodTemplateUtils.podTemplatesFromConfigMap(configMap); PodTemplateUtils.trackedPodTemplates.put(configMap.getMetadata().getUid(), templates); for (PodTemplate podTemplate : templates) { PodTemplateUtils.addPodTemplate(podTemplate); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java b/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java index dd4874c4d..40ffe890b 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java @@ -15,7 +15,6 @@ */ package io.fabric8.jenkins.openshiftsync; - /** */ public class Constants { @@ -32,9 +31,12 @@ public class Constants { public static final String OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE = "openshift.io/jenkins-namespace"; public static final String OPENSHIFT_LABELS_BUILD_CONFIG_NAME = "openshift.io/build-config.name"; public static final String OPENSHIFT_LABELS_BUILD_CONFIG_GIT_REPOSITORY_NAME = "openshift.io/gitRepository"; - // see PR https://github.com/openshift/jenkins-sync-plugin/pull/189, there was a issue with having "/" - // in a label we construct a watch over, where usual UTF-8 encoding of the label name (which becomes part of - // a query param on the REST invocation) was causing okhttp3 to complain (there is even more history/discussion + // see PR https://github.com/openshift/jenkins-sync-plugin/pull/189, there was a + // issue with having "/" + // in a label we construct a watch over, where usual UTF-8 encoding of the label + // name (which becomes part of + // a query param on the REST invocation) was causing okhttp3 to complain (there + // is even more history/discussion // in the PR as to issues with fixing). // so we avoid use of "/" for this label public static final String OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC = "credential.sync.jenkins.openshift.io"; @@ -53,9 +55,11 @@ public class Constants { public static final String OPENSHIFT_SECRETS_TYPE_OPAQUE = "Opaque"; public static final String OPENSHIFT_BUILD_STATUS_FIELD = "status"; public static final String OPENSHIFT_SECRETS_DATA_CLIENT_TOKEN = "openshift-client-token"; - + public static final String OPENSHIFT_PROJECT_ENV_VAR_NAME = "PROJECT_NAME"; public static final String OPENSHIFT_PROJECT_FILE = "/run/secrets/kubernetes.io/serviceaccount/namespace"; + public static final String IMAGESTREAM_AGENT_LABEL_VALUE = "jenkins-slave"; + public static final String IMAGESTREAM_AGENT_LABEL = "role"; } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java index 8223a9139..21d3ed73f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java @@ -1,10 +1,45 @@ package io.fabric8.jenkins.openshiftsync; +import static com.cloudbees.plugins.credentials.CredentialsProvider.lookupStores; +import static com.cloudbees.plugins.credentials.CredentialsScope.GLOBAL; +import static hudson.Util.fixNull; +import static hudson.util.Secret.fromString; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_CERTIFICATE; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_CLIENT_TOKEN; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_FILENAME; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_PASSPHRASE; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_PASSWORD; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_SECRET_TEXT; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_SSHPRIVATEKEY; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_USERNAME; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_TYPE_BASICAUTH; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_TYPE_OPAQUE; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_TYPE_SSH; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; +import static org.apache.commons.lang.StringUtils.isNotBlank; + +import java.io.IOException; +import java.util.Base64; +import java.util.Base64.Decoder; +import java.util.Collections; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.acegisecurity.context.SecurityContext; +import org.acegisecurity.context.SecurityContextHolder; +import org.apache.commons.lang.StringUtils; +import org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl; +import org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl; + import com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey; import com.cloudbees.plugins.credentials.Credentials; import com.cloudbees.plugins.credentials.CredentialsMatchers; import com.cloudbees.plugins.credentials.CredentialsProvider; -import com.cloudbees.plugins.credentials.CredentialsScope; import com.cloudbees.plugins.credentials.CredentialsStore; import com.cloudbees.plugins.credentials.SecretBytes; import com.cloudbees.plugins.credentials.domains.Domain; @@ -14,8 +49,8 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; import com.openshift.jenkins.plugins.OpenShiftTokenCredentials; + import hudson.model.Fingerprint; -import hudson.remoting.Base64; import hudson.security.ACL; import io.fabric8.kubernetes.api.model.LocalObjectReference; import io.fabric8.kubernetes.api.model.ObjectMeta; @@ -23,120 +58,107 @@ import io.fabric8.openshift.api.model.BuildConfig; import io.fabric8.openshift.api.model.BuildConfigSpec; import io.fabric8.openshift.api.model.BuildSource; +import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.Jenkins; -import org.acegisecurity.context.SecurityContext; -import org.acegisecurity.context.SecurityContextHolder; -import org.apache.commons.lang.StringUtils; -import org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl; -import org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static com.cloudbees.plugins.credentials.CredentialsScope.GLOBAL; -import static hudson.Util.fixNull; -import static io.fabric8.jenkins.openshiftsync.Constants.*; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; -import static java.nio.charset.StandardCharsets.UTF_8; -import static java.util.logging.Level.WARNING; -import static org.apache.commons.lang.StringUtils.isNotBlank; public class CredentialsUtils { - private final static Logger logger = Logger.getLogger(CredentialsUtils.class.getName()); - public static final String KUBERNETES_SERVICE_ACCOUNT = "Kubernetes Service Account"; - public static ConcurrentHashMap uidToSecretNameMap; - - - public static Secret getSourceCredentials(BuildConfig buildConfig) { - BuildConfigSpec spec = buildConfig.getSpec(); - if (spec != null) { - BuildSource source = spec.getSource(); - if (source != null) { - LocalObjectReference sourceSecret = source.getSourceSecret(); - if (sourceSecret != null) { - String sourceSecretName = sourceSecret.getName(); - if (sourceSecretName != null && !sourceSecretName.isEmpty()) { - ObjectMeta buildConfigMetadata = buildConfig.getMetadata(); - String namespace = buildConfigMetadata.getNamespace(); - String buildConfigName = buildConfigMetadata.getName(); - logger.info("Retrieving SourceSecret for BuildConfig " + buildConfigName + " in Namespace " + namespace); - Secret secret = getAuthenticatedOpenShiftClient().secrets().inNamespace(namespace).withName(sourceSecretName).get(); - if (secret == null) { - logger.warning("Secret Name provided in BuildConfig " + buildConfigName + " as " + sourceSecretName + " does not exist. " + - "Please review the BuildConfig and make the necessary changes."); - } else{ - return secret; + private static final String SECRET_TEXT_SECRET_TYPE = "secretText"; + private static final String FILE_SECRET_TYPE = "filename"; + private static final String TOKEN_SECRET_TYPE = "token"; + private static final Decoder DECODER = Base64.getDecoder(); + private final static Logger logger = Logger.getLogger(CredentialsUtils.class.getName()); + private final static Map SOURCE_SECRET_TO_CREDS_MAP = new ConcurrentHashMap(); + public static final String KUBERNETES_SERVICE_ACCOUNT = "Kubernetes Service Account"; + public final static ConcurrentHashMap UID_TO_SECRET_MAP = new ConcurrentHashMap(); + + public static Secret getSourceSecretForBuildConfig(BuildConfig buildConfig) { + BuildConfigSpec spec = buildConfig.getSpec(); + if (spec != null) { + BuildSource source = spec.getSource(); + if (source != null) { + LocalObjectReference sourceSecret = source.getSourceSecret(); + if (sourceSecret != null) { + String sourceSecretName = sourceSecret.getName(); + if (sourceSecretName != null && !sourceSecretName.isEmpty()) { + ObjectMeta buildConfigMetadata = buildConfig.getMetadata(); + String namespace = buildConfigMetadata.getNamespace(); + String name = buildConfigMetadata.getName(); + logger.info("Retrieving SourceSecret for BuildConfig " + name + " in Namespace " + namespace); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + Secret secret = client.secrets().inNamespace(namespace).withName(sourceSecretName).get(); + if (secret != null) { + return secret; + } else { + String message = "Secret Name provided in BuildConfig " + name + " as " + sourceSecretName; + message += " does not exist. Please review the BuildConfig and make the necessary changes."; + logger.warning(message); + } + } + } } - } } - } - } return null; } public static String updateSourceCredentials(BuildConfig buildConfig) throws IOException { - String credId = null; - Secret sourceSecret = getSourceCredentials(buildConfig); - if (sourceSecret != null) { + String credentialsName = null; + Secret sourceSecret = getSourceSecretForBuildConfig(buildConfig); + if (sourceSecret != null) { ObjectMeta sourceSecretMetadata = sourceSecret.getMetadata(); - if (sourceSecretMetadata != null){ - String namespace = sourceSecretMetadata.getNamespace(); - String secretName = sourceSecretMetadata.getName(); - ObjectMeta buildConfigMetadata = buildConfig.getMetadata(); - String buildConfigName = buildConfigMetadata.getName(); - credId = upsertCredential(sourceSecret, namespace, secretName); - if (credId != null) { - logger.info("Linking BuildConfig sourceSecret "+secretName+" to Jenkins Credential "+credId); - BuildConfigSecretToCredentialsMap.linkBCSecretToCredential(NamespaceName.create(buildConfig).toString(), credId); - return credId; - }else { - // call delete and remove any credential that fits the - // project/bcname pattern - logger.info("Unlinking BuildConfig sourceSecret matching BuildConfig "+buildConfigName); - credId = BuildConfigSecretToCredentialsMap.unlinkBCSecretToCrendential(NamespaceName.create(buildConfig).toString()); - if (credId != null){ - logger.info("Deleting sourceSecret "+secretName+" in namespace "+namespace); - deleteCredential(credId, NamespaceName.create(buildConfig), buildConfigMetadata.getResourceVersion()); + if (sourceSecretMetadata != null) { + String namespace = sourceSecretMetadata.getNamespace(); + String secretName = sourceSecretMetadata.getName(); + ObjectMeta buildConfigMetadata = buildConfig.getMetadata(); + String buildConfigName = buildConfigMetadata.getName(); + credentialsName = insertOrUpdateCredentialsFromSecret(sourceSecret); + String buildConfigAsString = NamespaceName.create(buildConfig).toString(); + if (credentialsName != null) { + logger.info("Linking sourceSecret " + secretName + " to Jenkins Credentials " + credentialsName); + linkSourceSecretToCredentials(buildConfigAsString, credentialsName); + return credentialsName; + } else { + // call delete and remove any credential that fits the project/bcname pattern + logger.info("Unlinking BuildConfig sourceSecret matching BuildConfig " + buildConfigName); + credentialsName = unlinkBCSecretToCrendential(buildConfigAsString); + if (credentialsName != null) { + logger.info("Deleting sourceSecret " + secretName + " in namespace " + namespace); + String resourceVersion = buildConfigMetadata.getResourceVersion(); + deleteCredential(credentialsName, NamespaceName.create(buildConfig), resourceVersion); + } } - } } - } - return credId; + } + return credentialsName; } public static void deleteSourceCredentials(BuildConfig buildConfig) throws IOException { - Secret sourceSecret = getSourceCredentials(buildConfig); + Secret sourceSecret = getSourceSecretForBuildConfig(buildConfig); if (sourceSecret != null) { ObjectMeta metadata = sourceSecret.getMetadata(); if (metadata != null) { - Map labels = metadata.getLabels(); - if (labels != null) { - String labelValue =labels.get(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC); - boolean watching = labelValue != null && labelValue.equalsIgnoreCase(Constants.VALUE_SECRET_SYNC); - // for a bc delete, if we are watching this secret, do not delete - // credential until secret is actually deleted - if (watching) - return; - deleteCredential(sourceSecret); - } + Map labels = metadata.getLabels(); + if (labels != null) { + String labelValue = labels.get(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC); + boolean watching = labelValue != null && labelValue.equalsIgnoreCase(Constants.VALUE_SECRET_SYNC); + // for a bc delete, if we are watching this secret, do not delete + // credential until secret is actually deleted + if (watching) + return; + deleteCredential(sourceSecret); + } } } } - + private static String getSecretCustomName(Secret secret) { ObjectMeta metadata = secret.getMetadata(); if (metadata != null) { - Map annotations = metadata.getAnnotations(); + Map annotations = metadata.getAnnotations(); if (annotations != null) { String secretName = annotations.get(Annotations.SECRET_NAME); - if (secretName != null){ - return secretName; + if (secretName != null) { + return secretName; } } } @@ -145,6 +167,7 @@ private static String getSecretCustomName(Secret secret) { /** * Inserts or creates a Jenkins Credential for the given Secret + * * @param secret the secret to insert * @return the insert secret name * @throws IOException when the update of the secret fails @@ -153,74 +176,78 @@ public static String upsertCredential(Secret secret) throws IOException { if (secret != null) { ObjectMeta metadata = secret.getMetadata(); if (metadata != null) { - return upsertCredential(secret, metadata.getNamespace(), metadata.getName()); + return insertOrUpdateCredentialsFromSecret(secret); } } return null; } - private static String upsertCredential(Secret secret, String namespace, String secretName) throws IOException { - if (uidToSecretNameMap == null){ - uidToSecretNameMap = new ConcurrentHashMap(); - } - String customSecretName = getSecretCustomName(secret); + private static String insertOrUpdateCredentialsFromSecret(Secret secret) throws IOException { if (secret != null) { - Credentials creds = secretToCredentials(secret); - if (creds != null) { - // checking with updated secret name if custom name is not null - String id = generateCredentialsName(namespace, secretName, customSecretName); - Credentials existingCreds = lookupCredentials(id); - final SecurityContext previousContext = ACL.impersonate(ACL.SYSTEM); - try { - CredentialsStore s = CredentialsProvider.lookupStores(Jenkins.getActiveInstance()).iterator().next(); - String originalId = generateCredentialsName(namespace, secretName, null); - Credentials existingOriginalCreds = lookupCredentials(originalId); - NamespaceName secretNamespaceName = null; - - ObjectMeta metadata = secret.getMetadata(); - String secretUid = metadata.getUid(); - if (!originalId.equals(id)) { - boolean hasAddedCredential = s.addCredentials(Domain.global(), creds); - if (!hasAddedCredential) { - logger.warning("Setting secret failed for secret with new Id " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); - logger.warning("Check if Id "+id+" is not already used."); - } else { - String oldId = uidToSecretNameMap.get(secretUid); - if (oldId != null) { - Credentials oldCredentials = lookupCredentials(oldId); - s.removeCredentials(Domain.global(), oldCredentials); - } else if (existingOriginalCreds != null) { - s.removeCredentials(Domain.global(), existingOriginalCreds); - } - uidToSecretNameMap.put(secretUid, id); - secretNamespaceName = NamespaceName.create(secret); - logger.info("Updated credential " + oldId + " with new Id " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); + String customSecretName = getSecretCustomName(secret); + ObjectMeta metadata = secret.getMetadata(); + String namespace = metadata.getNamespace(); + String secretName = metadata.getName(); + Credentials creds = secretToCredentials(secret); + if (creds != null) { + // checking with updated secret name if custom name is not null + String id = generateCredentialsName(namespace, secretName, customSecretName); + Credentials existingCreds = lookupCredentials(id); + final SecurityContext previousContext = ACL.impersonate(ACL.SYSTEM); + try { + CredentialsStore creentialsStore = lookupStores(Jenkins.getActiveInstance()).iterator().next(); + String originalId = generateCredentialsName(namespace, secretName, null); + Credentials existingOriginalCreds = lookupCredentials(originalId); + NamespaceName secretNamespaceName = null; + + String secretUid = metadata.getUid(); + if (!originalId.equals(id)) { + boolean hasAddedCredential = creentialsStore.addCredentials(Domain.global(), creds); + if (!hasAddedCredential) { + logger.warning("Setting secret failed for secret with new Id " + id + " from Secret " + + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); + logger.warning("Check if Id " + id + " is not already used."); + } else { + String oldId = UID_TO_SECRET_MAP.get(secretUid); + if (oldId != null) { + Credentials oldCredentials = lookupCredentials(oldId); + creentialsStore.removeCredentials(Domain.global(), oldCredentials); + } else if (existingOriginalCreds != null) { + creentialsStore.removeCredentials(Domain.global(), existingOriginalCreds); + } + UID_TO_SECRET_MAP.put(secretUid, id); + secretNamespaceName = NamespaceName.create(secret); + logger.info("Updated credential " + oldId + " with new Id " + id + " from Secret " + + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); + } + } else { + if (existingCreds != null) { + creentialsStore.updateCredentials(Domain.global(), existingCreds, creds); + UID_TO_SECRET_MAP.put(secretUid, id); + secretNamespaceName = NamespaceName.create(secret); + logger.info("Updated credential " + id + " from Secret " + secretNamespaceName + + " with revision: " + metadata.getResourceVersion()); + } else { + boolean hasAddedCredential = creentialsStore.addCredentials(Domain.global(), creds); + if (!hasAddedCredential) { + logger.warning("Update failed for secret with new Id " + id + " from Secret " + + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); + } else { + UID_TO_SECRET_MAP.put(secretUid, id); + secretNamespaceName = NamespaceName.create(secret); + logger.info("Created credential " + id + " from Secret " + secretNamespaceName + + " with revision: " + metadata.getResourceVersion()); + } + } + } + creentialsStore.save(); + } finally { + SecurityContextHolder.setContext(previousContext); } - } else { - if (existingCreds != null) { - s.updateCredentials(Domain.global(), existingCreds, creds); - uidToSecretNameMap.put(secretUid, id); - secretNamespaceName = NamespaceName.create(secret); - logger.info("Updated credential " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); - } else { - boolean hasAddedCredential = s.addCredentials(Domain.global(), creds); - if (!hasAddedCredential) { - logger.warning("Update failed for secret with new Id " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); - } else { - uidToSecretNameMap.put(secretUid, id); - secretNamespaceName = NamespaceName.create(secret); - logger.info("Created credential " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion()); - } + if (id != null && !id.isEmpty()) { + return id; } - } - s.save(); - } finally { - SecurityContextHolder.setContext(previousContext); - } - if (id != null && !id.isEmpty()){ - return id; } - } } return null; } @@ -232,8 +259,10 @@ private static void deleteCredential(String id, NamespaceName name, String resou try { Fingerprint fp = CredentialsProvider.getFingerprintOf(existingCred); if (fp != null && fp.getJobs().size() > 0) { - // per messages in credentials console, it is not a given but it is possible for job refs to a - // credential to be tracked ; if so, we will not prevent deletion, but at least note things for + // per messages in credentials console, it is not a given but it is possible for + // job refs to a + // credential to be tracked ; if so, we will not prevent deletion, but at least + // note things for // potential diagnostics StringBuffer sb = new StringBuffer(); for (String job : fp.getJobs()) @@ -241,23 +270,31 @@ private static void deleteCredential(String id, NamespaceName name, String resou logger.info("About to delete credential " + id + "which is referenced by jobs: " + sb.toString()); } CredentialsStore s = CredentialsProvider.lookupStores(Jenkins.getActiveInstance()).iterator().next(); - if (!existingCred.getDescriptor().getDisplayName().contains(KUBERNETES_SERVICE_ACCOUNT)) { - s.removeCredentials(Domain.global(), existingCred); - logger.info("Deleted credential " + id + " from Secret " + name + " with revision: " + resourceRevision); - s.save(); - } else { - logger.warning("Stopped attempt to delete " + KUBERNETES_SERVICE_ACCOUNT + " credentials with Id " + id ); - } + if (!existingCred.getDescriptor().getDisplayName().contains(KUBERNETES_SERVICE_ACCOUNT)) { + s.removeCredentials(Domain.global(), existingCred); + logger.info("Deleted credential " + id + " from Secret " + name + " with revision: " + + resourceRevision); + s.save(); + } else { + logger.warning( + "Stopped attempt to delete " + KUBERNETES_SERVICE_ACCOUNT + " credentials with Id " + id); + } } finally { SecurityContextHolder.setContext(previousContext); } } } - public static void deleteCredential(Secret secret) throws IOException { + public static void deleteCredential(Secret secret) { if (secret != null) { - String id = generateCredentialsName(secret.getMetadata().getNamespace(), secret.getMetadata().getName(), getSecretCustomName(secret)); - deleteCredential(id, NamespaceName.create(secret), secret.getMetadata().getResourceVersion()); + String id = generateCredentialsName(secret.getMetadata().getNamespace(), secret.getMetadata().getName(), + getSecretCustomName(secret)); + try { + deleteCredential(id, NamespaceName.create(secret), secret.getMetadata().getResourceVersion()); + } catch (IOException e) { + logger.log(SEVERE, "Credentials has not been deleted: " + e, e); + throw new RuntimeException(e); + } } } @@ -295,25 +332,27 @@ private static String generateCredentialsName(String namespace, String name, Str return (customName == null) ? namespace + "-" + name : customName; } - private static Credentials arbitraryKeyValueTextCredential(Map data, String generatedCredentialsName) { + private static Credentials arbitraryKeyValueTextCredential(Map data, + String generatedCredentialsName) { String text = ""; if (data != null && data.size() > 0) { // convert to JSON for parsing ease in pipelines try { text = new ObjectMapper().writeValueAsString(data); } catch (JsonProcessingException e) { - logger.log(Level.WARNING, "Arbitrary opaque secret " + generatedCredentialsName + " had issue converting json", e); + logger.log(Level.WARNING, + "Arbitrary opaque secret " + generatedCredentialsName + " had issue converting json", e); } } if (StringUtils.isBlank(text)) { - logger.log( - Level.WARNING, + logger.log(Level.WARNING, "Opaque secret {0} did not provide any data that could be processed into a Jenkins credential", new Object[] { generatedCredentialsName }); return null; } - return newSecretTextCredential(generatedCredentialsName, new String(Base64.encode(text.getBytes()))); + return newSecretTextCredential(generatedCredentialsName, + new String(Base64.getEncoder().encode(text.getBytes()))); } private static Credentials secretToCredentials(Secret secret) { @@ -321,16 +360,17 @@ private static Credentials secretToCredentials(Secret secret) { String name = secret.getMetadata().getName(); Map data = secret.getData(); if (data == null) { - logger.log(WARNING, "An OpenShift secret was marked for import, but it has no secret data. No credential will be created."); + logger.log(WARNING, "Secret " + name + " does not contain any data. No credential will be created."); return null; } - final String generatedCredentialsName = generateCredentialsName(namespace, name, getSecretCustomName(secret)); + String generatedCredentialsName = generateCredentialsName(namespace, name, getSecretCustomName(secret)); String passwordData = data.get(OPENSHIFT_SECRETS_DATA_PASSWORD); String sshKeyData = data.get(OPENSHIFT_SECRETS_DATA_SSHPRIVATEKEY); String usernameData = data.get(OPENSHIFT_SECRETS_DATA_USERNAME); - // We support "passphrase" and "password" for the ssh passphrase; passphrase has precedence over password - String passphraseData = data.get(OPENSHIFT_SECRETS_DATA_PASSPHRASE); + // We support "passphrase" and "password" for the ssh passphrase; passphrase has + // precedence over password + String passphraseData = data.get(OPENSHIFT_SECRETS_DATA_PASSPHRASE); String sshPassphrase = isNotBlank(passphraseData) ? passphraseData : passwordData; switch (secret.getType()) { @@ -355,14 +395,14 @@ private static Credentials secretToCredentials(Secret secret) { } String openshiftTokenData = data.get(OPENSHIFT_SECRETS_DATA_CLIENT_TOKEN); if (isNotBlank(openshiftTokenData)) { - return newOpenshiftTokenCredentials(generatedCredentialsName, openshiftTokenData); + return newOpenshiftTokenCredentials(generatedCredentialsName, openshiftTokenData); } return arbitraryKeyValueTextCredential(data, generatedCredentialsName); case OPENSHIFT_SECRETS_TYPE_BASICAUTH: return newUsernamePasswordCredentials(generatedCredentialsName, usernameData, passwordData); case OPENSHIFT_SECRETS_TYPE_SSH: - return newSSHUserCredential(generatedCredentialsName, usernameData, sshKeyData, sshPassphrase); + return newSSHUserCredential(generatedCredentialsName, usernameData, sshKeyData, sshPassphrase); default: // the type field is marked optional in k8s.io/api/core/v1/types.go, // default to OPENSHIFT_SECRETS_DATA_SECRET_TEXT in this case @@ -371,68 +411,64 @@ private static Credentials secretToCredentials(Secret secret) { } private static Credentials newOpenshiftTokenCredentials(String secretName, String secretText) { - if (secretName == null || secretName.length() == 0 || secretText == null || secretText.length() == 0) { - logger.log(Level.WARNING, - "Invalid secret data, secretName: " + secretName + " secretText is null: " + (secretText == null) - + " secretText is empty: " + (secretText != null ? secretText.length() == 0 : false)); - return null; - - } - - return new OpenShiftTokenCredentials(CredentialsScope.GLOBAL, secretName, secretName, - hudson.util.Secret.fromString(new String(Base64.decode(secretText), StandardCharsets.UTF_8))); + if (secretName == null || secretName.length() == 0 || secretText == null || secretText.length() == 0) { + logInvalidSecretData(secretName, secretText, TOKEN_SECRET_TYPE); + return null; + } + return new OpenShiftTokenCredentials(GLOBAL, secretName, secretName, + fromString(new String(DECODER.decode(secretText), UTF_8))); } private static Credentials newSecretFileCredential(String secretName, String fileData) { if (secretName == null || secretName.length() == 0 || fileData == null || fileData.length() == 0) { - logger.log(Level.WARNING, - "Invalid secret data, secretName: " + secretName + " filename is null: " + (fileData == null) - + " filename is empty: " + (fileData != null ? fileData.length() == 0 : false)); + logInvalidSecretData(secretName, fileData, FILE_SECRET_TYPE); return null; - } - return new FileCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName, secretName, - SecretBytes.fromString(fileData)); + return new FileCredentialsImpl(GLOBAL, secretName, secretName, secretName, SecretBytes.fromString(fileData)); } private static Credentials newSecretTextCredential(String secretName, String secretText) { if (secretName == null || secretName.length() == 0 || secretText == null || secretText.length() == 0) { - logger.log(Level.WARNING, - "Invalid secret data, secretName: " + secretName + " secretText is null: " + (secretText == null) - + " secretText is empty: " + (secretText != null ? secretText.length() == 0 : false)); + logInvalidSecretData(secretName, secretText, SECRET_TEXT_SECRET_TYPE); return null; - } - return new StringCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName, - hudson.util.Secret.fromString(new String(Base64.decode(secretText), StandardCharsets.UTF_8))); + String data = new String(DECODER.decode(secretText), UTF_8); + return new StringCredentialsImpl(GLOBAL, secretName, secretName, fromString(data)); } private static Credentials newCertificateCredential(String secretName, String passwordData, String certificateData) { if (secretName == null || secretName.length() == 0 || certificateData == null || certificateData.length() == 0) { - logger.log(Level.WARNING, - "Invalid secret data, secretName: " + secretName + " certificate is null: " - + (certificateData == null) + " certificate is empty: " - + (certificateData != null ? certificateData.length() == 0 : false)); + logInvalidSecretData(secretName, certificateData, "certificate"); return null; } - String certificatePassword = passwordData != null ? new String(Base64.decode(passwordData)) : null; - return new CertificateCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName, certificatePassword, + String certificatePassword = passwordData != null ? new String(DECODER.decode(passwordData)) : null; + return new CertificateCredentialsImpl(GLOBAL, secretName, secretName, certificatePassword, new CertificateCredentialsImpl.UploadedKeyStoreSource(SecretBytes.fromString(certificateData))); } - private static Credentials newSSHUserCredential(String secretName, String username, String sshKeyData, String passwordData) { + private static void logInvalidSecretData(String secretName, String secretText, String secretType) { + logger.log(Level.WARNING, + "Invalid secret data, secretName: " + secretName + " " + secretType + " is null: " + + (secretText == null) + " " + secretType + " is empty: " + + (secretText != null ? secretText.length() == 0 : false)); + } + + private static Credentials newSSHUserCredential(String secretName, String username, String sshKeyData, + String passwordData) { boolean secretNameIsBlank = StringUtils.isBlank(secretName); boolean sshKeyDataIsBlank = StringUtils.isBlank(sshKeyData); - if ( secretNameIsBlank || sshKeyDataIsBlank) { - logger.log(WARNING, "Invalid secret data, secretName: " + secretName + " sshKeyData is blank null: " + sshKeyDataIsBlank); + if (secretNameIsBlank || sshKeyDataIsBlank) { + logger.log(WARNING, "Invalid secret data, secretName: " + secretName + " sshKeyData is blank null: " + + sshKeyDataIsBlank); return null; } - String sshKeyPassword = (passwordData != null) ? new String(Base64.decode(passwordData),UTF_8) : null; - String sshKey = new String(Base64.decode(sshKeyData), UTF_8); - String sshUser = fixNull(username).isEmpty() ? "" : new String(Base64.decode(username), UTF_8); - BasicSSHUserPrivateKey.DirectEntryPrivateKeySource key = new BasicSSHUserPrivateKey.DirectEntryPrivateKeySource(sshKey); + String sshKeyPassword = (passwordData != null) ? new String(DECODER.decode(passwordData), UTF_8) : null; + String sshKey = new String(DECODER.decode(sshKeyData), UTF_8); + String sshUser = fixNull(username).isEmpty() ? "" : new String(DECODER.decode(username), UTF_8); + BasicSSHUserPrivateKey.DirectEntryPrivateKeySource key = new BasicSSHUserPrivateKey.DirectEntryPrivateKeySource( + sshKey); return new BasicSSHUserPrivateKey(GLOBAL, secretName, sshUser, key, sshKeyPassword, secretName); } @@ -449,9 +485,8 @@ private static Credentials newUsernamePasswordCredentials(String secretName, Str return null; } - return new UsernamePasswordCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName, - new String(Base64.decode(usernameData), UTF_8), - new String(Base64.decode(passwordData), UTF_8)); + return new UsernamePasswordCredentialsImpl(GLOBAL, secretName, secretName, + new String(DECODER.decode(usernameData), UTF_8), new String(DECODER.decode(passwordData), UTF_8)); } /** @@ -463,4 +498,12 @@ public static boolean hasCredentials() { return !StringUtils.isEmpty(getAuthenticatedOpenShiftClient().getConfiguration().getOauthToken()); } + static void linkSourceSecretToCredentials(String bc, String credential) { + SOURCE_SECRET_TO_CREDS_MAP.put(bc, credential); + } + + static String unlinkBCSecretToCrendential(String bc) { + return SOURCE_SECRET_TO_CREDS_MAP.remove(bc); + } + } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index 62838050f..2cb0677c5 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -1,6 +1,7 @@ package io.fabric8.jenkins.openshiftsync; import static hudson.init.InitMilestone.COMPLETED; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import java.util.ArrayList; import java.util.List; @@ -8,6 +9,8 @@ import hudson.init.InitMilestone; import hudson.triggers.SafeTimerTask; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.Jenkins; public class GlobalPluginConfigurationTimerTask extends SafeTimerTask { @@ -44,28 +47,32 @@ protected void doRun() throws Exception { String[] namespaces = globalPluginConfiguration.getNamespaces(); List> watchers = new ArrayList<>(); for (String namespace : namespaces) { - BuildConfigWatcher buildConfigWatcher = new BuildConfigWatcher(namespace); - watchers.add(buildConfigWatcher); - buildConfigWatcher.start(); + BuildConfigInformer buildConfigInformer = new BuildConfigInformer(namespace); + watchers.add(buildConfigInformer); + buildConfigInformer.start(); - BuildWatcher buildWatcher = new BuildWatcher(namespace); - buildWatcher.start(); - watchers.add(buildWatcher); + BuildInformer buildInformer = new BuildInformer(namespace); + buildInformer.start(); + watchers.add(buildInformer); ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace); configMapInformer.start(); watchers.add(configMapInformer); - ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespace); - imageStreamWatcher.start(); - watchers.add(imageStreamWatcher); + ImageStreamInformer imageStreamInformer = new ImageStreamInformer(namespace); + imageStreamInformer.start(); + watchers.add(imageStreamInformer); - SecretWatcher secretWatcher = new SecretWatcher(namespace); - secretWatcher.start(); - watchers.add(secretWatcher); + SecretInformer secretInformer = new SecretInformer(namespace); + secretInformer.start(); + watchers.add(secretInformer); } logger.info("All the watchers have been initialized!!"); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + SharedInformerFactory informerFactory = client.informers(); + informerFactory.startAllRegisteredInformers(); + synchronized (watchers) { List> globalWatchers = GlobalPluginConfiguration.getWatchers(); synchronized (globalWatchers) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java new file mode 100644 index 000000000..3efa1382d --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java @@ -0,0 +1,134 @@ +/** + * Copyright (C) 2017 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL; +import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.IMAGESTREAM_TYPE; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.deleteAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents; +import static java.util.Collections.singletonMap; +import static java.util.logging.Level.SEVERE; + +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; + +import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.dsl.base.OperationContext; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.api.model.ImageStream; +import io.fabric8.openshift.api.model.ImageStreamList; + +public class ImageStreamInformer extends ImageStreamWatcher implements ResourceEventHandler { + public ImageStreamInformer(String namespace) { + super(namespace); + } + + private final static Logger LOGGER = Logger.getLogger(ImageStreamInformer.class.getName()); + private SharedIndexInformer informer; + + @Override + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval(); + } + + public void start() { + LOGGER.info("Starting ImageStream informer for {} !!" + namespace); + LOGGER.fine("Listing ImageStream resources"); + SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); + Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE); + OperationContext withLabels = new OperationContext().withLabels(labels); + this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getListIntervalInSeconds()); + informer.addEventHandler(this); + factory.startAllRegisteredInformers(); + LOGGER.info("ImageStream informer started for namespace: {}" + namespace); + ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list(); + onInit(list.getItems()); + } + + public void startAfterOnClose(String namespace) { + synchronized (this.lock) { + start(); + } + } + + public void stop() { + LOGGER.info("Stopping secret informer {} !!" + namespace); + this.informer.stop(); + } + + @Override + public void onAdd(ImageStream obj) { + LOGGER.fine("ImageStream informer received add event for: {}" + obj); + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + String uid = metadata.getUid(); + LOGGER.info("ImageStream informer received add event for: {}" + name); + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); + addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } + + @Override + public void onUpdate(ImageStream oldObj, ImageStream newObj) { + LOGGER.info("ImageStream informer received update event for: {} to: {}" + oldObj + newObj); + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(newObj); + ObjectMeta metadata = newObj.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } + + @Override + public void onDelete(ImageStream obj, boolean deletedFinalStateUnknown) { + LOGGER.info("ImageStream informer received delete event for: {}" + obj); + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); + ObjectMeta metadata = obj.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + + } + + private void onInit(List list) { + for (ImageStream imageStream : list) { + try { + List agents = getPodTemplatesListFromImageStreams(imageStream); + for (PodTemplate podTemplate : agents) { + // watch event might beat the timer - put call is technically fine, but not + // addPodTemplate given k8s plugin issues + if (!hasPodTemplate(podTemplate)) { + addPodTemplate(podTemplate); + } + } + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to update job", e); + } + } + } +} \ No newline at end of file diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java index 9175af755..2f71f447d 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java @@ -21,9 +21,9 @@ import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForAddEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForDeleteEvent; -import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.processSlavesForModifyEvent; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.deleteAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents; import static java.util.logging.Level.SEVERE; import static java.util.logging.Level.WARNING; @@ -102,13 +102,13 @@ public void eventReceived(Action action, ImageStream imageStream) { String namespace = metadata.getNamespace(); switch (action) { case ADDED: - processSlavesForAddEvent(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); break; case MODIFIED: - processSlavesForModifyEvent(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); break; case DELETED: - processSlavesForDeleteEvent(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); break; case ERROR: logger.warning("watch for imageStream " + ns + "/" + name + " received error event "); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java index 6d59cc37d..c996ddbfd 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java @@ -60,6 +60,7 @@ import io.fabric8.kubernetes.api.model.ServiceSpec; import io.fabric8.kubernetes.client.Config; import io.fabric8.kubernetes.client.Version; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; import io.fabric8.openshift.api.model.Build; import io.fabric8.openshift.api.model.BuildBuilder; import io.fabric8.openshift.api.model.BuildConfig; @@ -84,6 +85,8 @@ public class OpenShiftUtils { private static OpenShiftClient openShiftClient; private static String jenkinsPodNamespace = null; + private static SharedInformerFactory factory; + static { jenkinsPodNamespace = System.getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME); if (jenkinsPodNamespace != null && jenkinsPodNamespace.trim().length() > 0) { @@ -158,6 +161,13 @@ public synchronized static OpenShiftClient getAuthenticatedOpenShiftClient() { return openShiftClient; } + public synchronized static SharedInformerFactory getInformerFactory() { + if (factory == null) { + factory = getAuthenticatedOpenShiftClient().informers(); + } + return factory; + } + public synchronized static void shutdownOpenShiftClient() { if (openShiftClient != null) { openShiftClient.close(); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java index 7a4d17b08..bc084a24c 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java @@ -1,5 +1,7 @@ package io.fabric8.jenkins.openshiftsync; +import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL; +import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static java.util.logging.Level.FINE; @@ -31,7 +33,9 @@ public class PodTemplateUtils { - protected static final String cmType = "ConfigMap"; + private static final String MAVEN_POD_TEMPLATE_NAME = "maven"; + private static final String NODEJS_POD_TEMPLATE_NAME = "nodejs"; + protected static final String CONFIGMAP = "ConfigMap"; protected static final String isType = "ImageStream"; static final String IMAGESTREAM_TYPE = isType; private static final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name"; @@ -79,16 +83,17 @@ public static PodTemplate podTemplateInit(String name, String image, String labe public static void removePodTemplate(PodTemplate podTemplate) { KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); if (kubeCloud != null) { - LOGGER.info("Removing PodTemplate: " + podTemplate.getName()); + String name = podTemplate.getName(); + String namespace = podTemplate.getNamespace(); + LOGGER.info("Removing PodTemplate: " + name + " in namespace: " + namespace); // NOTE - PodTemplate does not currently override hashCode, equals, - // so - // the KubernetsCloud.removeTemplate currently is broken; + // so the KubernetsCloud.removeTemplate currently is broken; // kubeCloud.removeTemplate(podTemplate); List list = kubeCloud.getTemplates(); Iterator iter = list.iterator(); while (iter.hasNext()) { PodTemplate pt = iter.next(); - if (pt.getName().equals(podTemplate.getName())) { + if (pt.getName().equals(name)) { iter.remove(); } } @@ -114,29 +119,26 @@ public static void removePodTemplate(PodTemplate podTemplate) { public static synchronized List getPodTemplates() { KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); + List list = new ArrayList(); if (kubeCloud != null) { - // create copy of list for more flexiblity in loops - ArrayList list = new ArrayList(); + // create copy of list for more flexibility in loops list.addAll(kubeCloud.getTemplates()); - return list; - } else { - return null; } + return list; } - public static synchronized boolean hasPodTemplate(PodTemplate incomingPod) { - String name = incomingPod.getName(); - if (name == null) - return false; - String image = incomingPod.getImage(); - if (image == null) - return false; - KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); - if (kubeCloud != null) { - List list = kubeCloud.getTemplates(); - for (PodTemplate pod : list) { - if (name.equals(pod.getName()) && image.equals(pod.getImage())) - return true; + @SuppressWarnings("deprecation") + public static synchronized boolean hasPodTemplate(PodTemplate podTemplate) { + String name = podTemplate.getName(); + String image = podTemplate.getImage(); + if (name != null && image != null) { + KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud(); + if (kubeCloud != null) { + List list = kubeCloud.getTemplates(); + for (PodTemplate pod : list) { + if (name.equals(pod.getName()) && image.equals(pod.getImage())) + return true; + } } } return false; @@ -166,7 +168,7 @@ protected static void purgeTemplates(String type, String uid, String apiObjName, for (PodTemplate podTemplate : trackedPodTemplates.get(uid)) { // we should not have included any pod templates we did not // mark the type for, but we'll check just in case - removePodTemplate(LOGGER, PT_NOT_OWNED, type, apiObjName, namespace, podTemplate); + removePodTemplate(type, apiObjName, namespace, podTemplate); } trackedPodTemplates.remove(uid); } @@ -184,8 +186,8 @@ protected static void trackPodTemplates(String uid, List podTemplat // Adds PodTemplate to the List correspoding to the ConfigMap of // given uid and Deletes from Jenkins - protected static List onlyTrackPodTemplate(String type, String apiObjName, - String namespace, List podTemplates, PodTemplate podTemplate) { + protected static List onlyTrackPodTemplate(String type, String apiObjName, String namespace, + List podTemplates, PodTemplate podTemplate) { String name = podTemplate.getName(); // we allow configmap overrides of maven and nodejs, but not imagestream ones // as they are less specific/defined wrt podTemplate fields @@ -218,14 +220,15 @@ protected static void addPodTemplate(String type, String apiObjName, String name // as they are less specific/defined wrt podTemplate fields if (apiObjName != null && namespace != null && podTemplates != null) { if (isReservedPodTemplateName(name) && isType.equals(type)) { + LOGGER.info("PodTemplate " + name + " cannot be added because it has a reserved name...ignoring"); return; } - String ret = podTemplateToApiType.putIfAbsent(name, type); - if (ret == null || ret.equals(type)) { + String podTemplateAsXmlString = podTemplateToApiType.putIfAbsent(name, type); + if (podTemplateAsXmlString == null || podTemplateAsXmlString.equals(type)) { addPodTemplate(podTemplate); podTemplates.add(podTemplate); } else { - LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret)); + LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, podTemplateAsXmlString)); } } else { podTemplateToApiType.put(name, type); @@ -234,8 +237,7 @@ protected static void addPodTemplate(String type, String apiObjName, String name } // Delete a PodTemplate from Jenkins - protected static void removePodTemplate(Logger LOGGER, String PT_NOT_OWNED, String type, String apiObjName, - String namespace, PodTemplate podTemplate) { + protected static void removePodTemplate(String type, String apiObjName, String namespace, PodTemplate podTemplate) { String name = podTemplate.getName(); String t = podTemplateToApiType.get(name); if (t != null && t.equals(type)) { @@ -247,9 +249,7 @@ protected static void removePodTemplate(Logger LOGGER, String PT_NOT_OWNED, Stri } protected static boolean isReservedPodTemplateName(String name) { - if (name.equals("maven") || name.equals("nodejs")) - return true; - return false; + return (name.equals(MAVEN_POD_TEMPLATE_NAME) || name.equals(NODEJS_POD_TEMPLATE_NAME)); } protected static List getPodTemplatesListFromImageStreams(ImageStream imageStream) { @@ -323,7 +323,7 @@ protected static PodTemplate podTemplateFromData(String name, String image, Map< // podTemplatesFromConfigMap takes every key from a ConfigMap and tries to // create a PodTemplate from the contained // XML. - public static List podTemplatesFromConfigMap(ConfigMapWatcher configMapWatcher, ConfigMap configMap) { + public static List podTemplatesFromConfigMap(ConfigMap configMap) { List results = new ArrayList<>(); Map data = configMap.getData(); @@ -422,14 +422,13 @@ protected static boolean configMapContainsSlave(ConfigMap configMap) { } protected static boolean hasSlaveLabelOrAnnotation(Map map) { - if (map != null) - return map.containsKey("role") && map.get("role").equals("jenkins-slave"); - return false; + return map != null && map.containsKey(IMAGESTREAM_AGENT_LABEL) + && map.get(IMAGESTREAM_AGENT_LABEL).equals(IMAGESTREAM_AGENT_LABEL_VALUE); } - protected static void processSlavesForAddEvent(List slaves, String type, String uid, String apiObjName, + protected static void addAgents(List slaves, String type, String uid, String apiObjName, String namespace) { - LOGGER.info("Adding PodTemplate(s) for "); + LOGGER.info("Adding PodTemplate(s) for " + namespace); List finalSlaveList = new ArrayList(); for (PodTemplate podTemplate : slaves) { addPodTemplate(type, apiObjName, namespace, finalSlaveList, podTemplate); @@ -437,8 +436,8 @@ protected static void processSlavesForAddEvent(List slaves, String updateTrackedPodTemplatesMap(uid, finalSlaveList); } - protected static void processSlavesForModifyEvent(List slaves, String type, - String uid, String apiObjName, String namespace) { + protected static void updateAgents(List slaves, String type, String uid, String apiObjName, + String namespace) { LOGGER.info("Modifying PodTemplates"); boolean alreadyTracked = trackedPodTemplates.containsKey(uid); boolean hasSlaves = slaves.size() > 0; // Configmap has podTemplates @@ -481,11 +480,27 @@ protected static void processSlavesForModifyEvent(List slaves, Stri } } - protected static void processSlavesForDeleteEvent(List slaves, String type, String uid, - String apiObjName, String namespace) { + protected static void deleteAgents(List slaves, String type, String uid, String apiObjName, + String namespace) { if (trackedPodTemplates.containsKey(uid)) { purgeTemplates(type, uid, apiObjName, namespace); } } + protected static void addPodTemplateFromConfigMap(ConfigMap configMap) { + try { + String uid = configMap.getMetadata().getUid(); + if (configMapContainsSlave(configMap) && !trackedPodTemplates.containsKey(uid)) { + List templates = podTemplatesFromConfigMap(configMap); + trackedPodTemplates.put(uid, templates); + for (PodTemplate podTemplate : templates) { + LOGGER.info("Adding PodTemplate {}" + podTemplate); + addPodTemplate(podTemplate); + } + } + } catch (Exception e) { + LOGGER.severe("Failed to update ConfigMap PodTemplates" + e); + } + } + } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java new file mode 100644 index 000000000..a0062ad4c --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java @@ -0,0 +1,110 @@ +/** + * Copyright (C) 2017 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC; +import static io.fabric8.jenkins.openshiftsync.Constants.VALUE_SECRET_SYNC; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; +import static java.util.Collections.singletonMap; +import static java.util.logging.Level.SEVERE; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Logger; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.Secret; +import io.fabric8.kubernetes.api.model.SecretList; +import io.fabric8.kubernetes.client.dsl.base.OperationContext; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; + +public class SecretInformer extends SecretWatcher implements ResourceEventHandler { + + private final static Logger LOGGER = Logger.getLogger(SecretInformer.class.getName()); + private final static ConcurrentHashMap trackedSecrets = new ConcurrentHashMap(); + private static final long RESYNC_PERIOD = 30 * 1000L; + + private SharedIndexInformer informer; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public SecretInformer(String namespace) { + super(namespace); + } + + @Override + public int getListIntervalInSeconds() { + return GlobalPluginConfiguration.get().getSecretListInterval(); + } + + public void start() { + LOGGER.info("Starting secret informer {} !!" + namespace); + LOGGER.fine("listing Secret resources"); + SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); + Map labels = singletonMap(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC); + OperationContext withLabels = new OperationContext().withLabels(labels); + this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, RESYNC_PERIOD); + informer.addEventHandler(this); + factory.startAllRegisteredInformers(); + LOGGER.info("Secret informer started for namespace: {}" + namespace); + SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list(); + onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping secret informer {} !!" + namespace); + this.informer.stop(); + } + + @Override + public void onAdd(Secret obj) { + LOGGER.fine("Secret informer received add event for: {}" + obj); + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("Secret informer received add event for: {}" + name); + insertOrUpdateCredentialFromSecret(obj); + } + + @Override + public void onUpdate(Secret oldObj, Secret newObj) { + LOGGER.info("Secret informer received update event for: {} to: {}" + oldObj + newObj); + updateCredential(newObj); + } + + @Override + public void onDelete(Secret obj, boolean deletedFinalStateUnknown) { + LOGGER.info("Secret informer received delete event for: {}" + obj); + CredentialsUtils.deleteCredential(obj); + } + + private void onInit(List list) { + for (Secret secret : list) { + try { + if (validSecret(secret) && shouldProcessSecret(secret)) { + insertOrUpdateCredentialFromSecret(secret); + trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); + } + } catch (Exception e) { + LOGGER.log(SEVERE, "Failed to update secred", e); + } + } + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java index 08ae5a68d..d9ef4f438 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java @@ -20,10 +20,11 @@ import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; +import java.io.IOException; import java.util.List; import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Level; import java.util.logging.Logger; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; @@ -100,7 +101,7 @@ private void onInitialSecrets(SecretList secrets) { for (Secret secret : items) { try { if (validSecret(secret) && shouldProcessSecret(secret)) { - upsertCredential(secret); + insertOrUpdateCredentialFromSecret(secret); trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); } } catch (Exception e) { @@ -120,13 +121,13 @@ public void eventReceived(Action action, Secret secret) { try { switch (action) { case ADDED: - upsertCredential(secret); + insertOrUpdateCredentialFromSecret(secret); break; case DELETED: deleteCredential(secret); break; case MODIFIED: - modifyCredential(secret); + updateCredential(secret); break; case ERROR: logger.warning("watch for secret " + secret.getMetadata().getName() + " received error event "); @@ -137,37 +138,47 @@ public void eventReceived(Action action, Secret secret) { break; } } catch (Exception e) { - logger.log(Level.WARNING, "Caught: " + e, e); + logger.log(WARNING, "Caught: " + e, e); } } - private void upsertCredential(final Secret secret) throws Exception { + protected void insertOrUpdateCredentialFromSecret(final Secret secret) { if (secret != null) { ObjectMeta metadata = secret.getMetadata(); if (metadata != null) { logger.info("Upserting Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); if (validSecret(secret)) { - CredentialsUtils.upsertCredential(secret); - trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + try { + CredentialsUtils.upsertCredential(secret); + trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + } catch (IOException e) { + logger.log(SEVERE, "Credential has not been saved: " + e, e); + throw new RuntimeException(e); + } } } } } - private void modifyCredential(Secret secret) throws Exception { + protected void updateCredential(Secret secret) { if (secret != null) { ObjectMeta metadata = secret.getMetadata(); if (metadata != null) { logger.info("Modifying Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); if (validSecret(secret) && shouldProcessSecret(secret)) { - CredentialsUtils.upsertCredential(secret); - trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + try { + CredentialsUtils.upsertCredential(secret); + trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + } catch (IOException e) { + logger.log(SEVERE, "Secret has not been saved: " + e, e); + throw new RuntimeException(e); + } } } } } - private boolean validSecret(Secret secret) { + protected boolean validSecret(Secret secret) { if (secret != null) { ObjectMeta metadata = secret.getMetadata(); if (metadata != null) { @@ -180,14 +191,14 @@ private boolean validSecret(Secret secret) { return false; } - private boolean shouldProcessSecret(Secret secret) { + protected boolean shouldProcessSecret(Secret secret) { if (secret != null) { ObjectMeta metadata = secret.getMetadata(); if (metadata != null) { String uid = metadata.getUid(); String rv = metadata.getResourceVersion(); - String savedRV = trackedSecrets.get(uid); - if (savedRV == null || !savedRV.equals(rv)) { + String oldResourceVersion = trackedSecrets.get(uid); + if (oldResourceVersion == null || !oldResourceVersion.equals(rv)) { return true; } } From e01afc6b6055c863cfa3065c95f53dae546d48b5 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Mon, 12 Apr 2021 18:53:28 +0200 Subject: [PATCH 12/22] Informer implementation working + migrating informers to slf4j --- pom.xml | 11 +-- .../openshiftsync/BuildConfigInformer.java | 53 +++++++------ .../openshiftsync/BuildConfigToJobMap.java | 2 +- .../jenkins/openshiftsync/BuildInformer.java | 77 ++++++++++--------- .../openshiftsync/BuildSyncRunListener.java | 71 +++++++++-------- .../openshiftsync/BuildToActionMapper.java | 7 +- .../openshiftsync/ConfigMapInformer.java | 75 +++++++++--------- .../openshiftsync/GenericEventHandler.java | 30 ++++++++ .../GlobalPluginConfigurationTimerTask.java | 39 +++++----- .../openshiftsync/ImageStreamInformer.java | 72 +++++++++-------- .../jenkins/openshiftsync/OpenShiftUtils.java | 28 ++++--- .../jenkins/openshiftsync/SecretInformer.java | 46 +++++------ 12 files changed, 282 insertions(+), 229 deletions(-) create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java diff --git a/pom.xml b/pom.xml index d072cd467..5f1b2a70d 100644 --- a/pom.xml +++ b/pom.xml @@ -25,7 +25,7 @@ 2.263 8 2.71 - 5.2.1 + 5.3.0 DEBUG 3.0.4 false @@ -141,11 +141,7 @@ org.slf4j slf4j-api - - - org.slf4j - slf4j-simple - ${slf4j.version} + provided @@ -162,10 +158,9 @@ org.jenkins-ci.plugins kubernetes-client-api - 5.2.1-beta-1 + 5.3.0-beta-1-SNAPSHOT - org.eclipse.jetty jetty-util diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java index 009ab6565..72a0db1eb 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java @@ -22,13 +22,13 @@ import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuildConfig; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.logging.Level.SEVERE; import java.util.List; -import java.util.logging.Logger; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import hudson.model.Job; import hudson.security.ACL; @@ -37,9 +37,7 @@ import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; -import io.fabric8.kubernetes.client.informers.cache.Lister; import io.fabric8.openshift.api.model.BuildConfig; -import io.fabric8.openshift.api.model.BuildConfigList; import io.fabric8.openshift.api.model.BuildList; import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.Jenkins; @@ -53,7 +51,7 @@ */ public class BuildConfigInformer extends BuildConfigWatcher implements ResourceEventHandler { - private final static Logger LOGGER = Logger.getLogger(BuildConfigInformer.class.getName()); + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private SharedIndexInformer informer; public BuildConfigInformer(String namespace) { @@ -67,15 +65,14 @@ public int getListIntervalInSeconds() { public void start() { LOGGER.info("Starting BuildConfig informer for {} !!" + namespace); - LOGGER.fine("listing BuildConfig resources"); + LOGGER.debug("listing BuildConfig resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getListIntervalInSeconds()); informer.addEventHandler(this); - factory.startAllRegisteredInformers(); LOGGER.info("BuildConfig informer started for namespace: {}" + namespace); - // waitInformerSync(informer); - BuildConfigList list = getOpenshiftClient().buildConfigs().inNamespace(namespace).list(); - onInit(list.getItems()); + // BuildConfigList list = + // getOpenshiftClient().buildConfigs().inNamespace(namespace).list(); + // onInit(list.getItems()); } public void stop() { @@ -85,23 +82,32 @@ public void stop() { @Override public void onAdd(BuildConfig obj) { - LOGGER.fine("BuildConfig informer received add event for: {}" + obj); - ObjectMeta metadata = obj.getMetadata(); - String name = metadata.getName(); - LOGGER.info("BuildConfig informer received add event for: {}" + name); - upsertJob(obj); + LOGGER.debug("BuildConfig informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("BuildConfig informer received add event for: {}" + name); + upsertJob(obj); + } } @Override public void onUpdate(BuildConfig oldObj, BuildConfig newObj) { - LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldObj + newObj); - modifyEventToJenkinsJob(newObj); + LOGGER.debug("BuildConfig informer received update event for: {} to: {}" + oldObj + " " + newObj); + if (newObj != null) { + String oldRv = oldObj.getMetadata().getResourceVersion(); + String newRv = newObj.getMetadata().getResourceVersion(); + LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldRv + " " + newRv); + modifyEventToJenkinsJob(newObj); + } } @Override public void onDelete(BuildConfig obj, boolean deletedFinalStateUnknown) { LOGGER.info("BuildConfig informer received delete event for: {}" + obj); - deleteEventToJenkinsJob(obj); + if (obj != null) { + deleteEventToJenkinsJob(obj); + } } @SuppressWarnings({ "deprecation", "serial" }) @@ -127,7 +133,7 @@ public Void call() throws Exception { @Override public void doRun() { if (!CredentialsUtils.hasCredentials()) { - LOGGER.fine("No Openshift Token credential defined."); + LOGGER.debug("No Openshift Token credential defined."); return; } final OpenShiftClient client = getAuthenticatedOpenShiftClient(); @@ -154,7 +160,7 @@ private void upsertJob(final BuildConfig buildConfig) { try { ACL.impersonate(ACL.SYSTEM, new JobProcessor(this, buildConfig)); } catch (Exception e) { - LOGGER.severe("Error while trying to insert JobRun: " + e); + LOGGER.error("Error while trying to insert JobRun: " + e); } } @@ -162,7 +168,8 @@ private void upsertJob(final BuildConfig buildConfig) { try { cleanupJobsMissingStartBuildEvent(buildConfig); } catch (Exception e) { - LOGGER.severe("Error while trying to clean up orphan JobRuns: " + e); + LOGGER.error("Error while trying to clean up orphan JobRuns: " + e); + e.printStackTrace(); } } @@ -217,7 +224,7 @@ private void onInit(List list) { try { upsertJob(buildConfig); } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to update job", e); + LOGGER.error("Failed to update job", e); } } // poke the BuildWatcher builds with no BC list and see if we diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java index 8091052c3..d21ada839 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java @@ -17,7 +17,7 @@ public class BuildConfigToJobMap { private final static Logger logger = Logger.getLogger(BuildConfigToJobMap.class.getName()); - private static ConcurrentHashMap buildConfigToJobMap; + private static ConcurrentHashMap buildConfigToJobMap = new ConcurrentHashMap(); private BuildConfigToJobMap() { } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java index f4095d7a2..23d37a3cd 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java @@ -27,7 +27,6 @@ import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; @@ -41,12 +40,12 @@ import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Level; -import java.util.logging.Logger; import org.apache.commons.lang.StringUtils; import org.jenkinsci.plugins.workflow.job.WorkflowJob; import org.jenkinsci.plugins.workflow.job.WorkflowRun; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import hudson.security.ACL; import io.fabric8.kubernetes.api.model.ObjectMeta; @@ -64,22 +63,22 @@ public class BuildInformer extends BuildWatcher implements ResourceEventHandler { - private static final Logger LOGGER = Logger.getLogger(BuildInformer.class.getName()); + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private final static BuildComparator BUILD_COMPARATOR = new BuildComparator(); - - // now that listing interval is 5 minutes (used to be 10 seconds), we have - // seen timing windows where if the build watch events come before build config - // watch events when both are created in a simultaneous fashion, there is an up - // to 5 minutes delay before the job run gets kicked off started seeing - // duplicate builds getting kicked off so quit depending on so moved off of - // concurrent hash set to concurrent hash map using namepace/name key - private SharedIndexInformer informer; public BuildInformer(String namespace) { super(namespace); } + /** + * now that listing interval is 5 minutes (used to be 10 seconds), we have seen + * timing windows where if the build watch events come before build config watch + * events when both are created in a simultaneous fashion, there is an up to 5 + * minutes delay before the job run gets kicked off started seeing duplicate + * builds getting kicked off so quit depending on so moved off of concurrent + * hash set to concurrent hash map using namepace/name key + */ @Override public int getListIntervalInSeconds() { return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval(); @@ -87,41 +86,43 @@ public int getListIntervalInSeconds() { public void start() { LOGGER.info("Starting Build informer for {} !!" + namespace); - LOGGER.fine("Listing Build resources"); + LOGGER.debug("Listing Build resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); this.informer = factory.sharedIndexInformerFor(Build.class, getListIntervalInSeconds()); this.informer.addEventHandler(this); - factory.startAllRegisteredInformers(); LOGGER.info("Build informer started for namespace: {}" + namespace); - BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list(); - onInit(list.getItems()); - } - - public void startAfterOnClose(String namespace) { - synchronized (this.lock) { - start(); - } +// BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list(); +// onInit(list.getItems()); } @Override public void onAdd(Build obj) { - LOGGER.fine("Build informer received add event for: {}" + obj); - ObjectMeta metadata = obj.getMetadata(); - String name = metadata.getName(); - LOGGER.info("Build informer received add event for: {}" + name); - addEventToJenkinsJobRun(obj); + LOGGER.debug("Build informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("Build informer received add event for: {}" + name); + addEventToJenkinsJobRun(obj); + } } @Override public void onUpdate(Build oldObj, Build newObj) { - LOGGER.info("Build informer received update event for: {} to: {}" + oldObj + newObj); - modifyEventToJenkinsJobRun(newObj); + LOGGER.debug("Build informer received update event for: {} to: {}" + oldObj + " " + newObj); + if (newObj != null) { + String oldRv = oldObj.getMetadata().getResourceVersion(); + String newRv = newObj.getMetadata().getResourceVersion(); + LOGGER.info("Build informer received update event for: {} to: {}" + oldRv + " " + newRv); + modifyEventToJenkinsJobRun(newObj); + } } @Override public void onDelete(Build obj, boolean deletedFinalStateUnknown) { LOGGER.info("Build informer received delete event for: {}" + obj); - deleteEventToJenkinsJobRun(obj); + if (obj != null) { + deleteEventToJenkinsJobRun(obj); + } } public static void onInit(List list) { @@ -234,7 +235,7 @@ public static boolean addEventToJenkinsJobRun(Build build) { try { return triggerJob(job, build); } catch (IOException e) { - LOGGER.severe("Error while trying to trigger Job: " + e); + LOGGER.error("Error while trying to trigger Job: " + e); } } LOGGER.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time"); @@ -250,7 +251,7 @@ private static void addBuildToNoBCList(Build build) { buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build); } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException | NullPointerException e) { - LOGGER.log(Level.WARNING, "Failed to add item " + build.getMetadata().getName(), e); + LOGGER.warn( "Failed to add item " + build.getMetadata().getName(), e); } } @@ -274,7 +275,7 @@ public static void flushBuildsWithNoBCList() { LOGGER.info("triggering job run for previously skipped build " + build.getMetadata().getName()); triggerJob(job, build); } catch (IOException e) { - LOGGER.log(Level.WARNING, "flushBuildsWithNoBCList", e); + LOGGER.warn( "flushBuildsWithNoBCList", e); } try { synchronized (buildsWithNoBCList) { @@ -289,7 +290,7 @@ public static void flushBuildsWithNoBCList() { // over extended usage ... probably can remove at some // point anyRemoveFailures = true; - LOGGER.log(Level.WARNING, "flushBuildsWithNoBCList", t); + LOGGER.warn( "flushBuildsWithNoBCList", t); } } @@ -345,7 +346,7 @@ private static void deleteEventToJenkinsJobRun(final Build build) { try { innerDeleteEventToJenkinsJobRun(build); } catch (Exception e) { - LOGGER.severe("Error while trying to delete JobRun: " + e); + LOGGER.error("Error while trying to delete JobRun: " + e); } return; } @@ -355,7 +356,7 @@ private static void deleteEventToJenkinsJobRun(final Build build) { try { innerDeleteEventToJenkinsJobRun(build); } catch (Exception e) { - LOGGER.severe("Error while trying to delete JobRun: " + e); + LOGGER.error("Error while trying to delete JobRun: " + e); } } @@ -366,7 +367,7 @@ private static void deleteEventToJenkinsJobRun(final Build build) { */ @SuppressWarnings("deprecation") private static void reconcileRunsAndBuilds() { - LOGGER.fine("Reconciling job runs and builds"); + LOGGER.debug("Reconciling job runs and builds"); List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); for (WorkflowJob job : jobs) { BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); @@ -374,7 +375,7 @@ private static void reconcileRunsAndBuilds() { String ns = property.getNamespace(); String name = property.getName(); if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) { - LOGGER.fine("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); + LOGGER.debug("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); OpenShiftClient client = getAuthenticatedOpenShiftClient(); BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list(); for (WorkflowRun run : job.getBuilds()) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java index 4e244c20d..a71b58876 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java @@ -15,6 +15,16 @@ */ package io.fabric8.jenkins.openshiftsync; +import static hudson.model.Result.ABORTED; +import static hudson.model.Result.FAILURE; +import static hudson.model.Result.SUCCESS; +import static hudson.model.Result.UNSTABLE; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.COMPLETE; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.FAILED; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL; @@ -26,10 +36,6 @@ import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.formatTimestamp; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; -import static java.util.logging.Level.FINE; -import static java.util.logging.Level.INFO; -import static java.util.logging.Level.SEVERE; -import static java.util.logging.Level.WARNING; import java.io.IOException; import java.lang.reflect.Constructor; @@ -42,8 +48,6 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; -import java.util.logging.Logger; import javax.annotation.Nonnull; @@ -52,6 +56,8 @@ import org.jenkinsci.plugins.workflow.support.steps.input.InputAction; import org.jenkinsci.plugins.workflow.support.steps.input.InputStepExecution; import org.kohsuke.stapler.DataBoundConstructor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.cloudbees.workflow.rest.external.AtomFlowNodeExt; import com.cloudbees.workflow.rest.external.FlowNodeExt; @@ -85,10 +91,10 @@ * current status, logsURL and metrics */ @Extension +@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" }) public class BuildSyncRunListener extends RunListener { private static final String KUBERNETES_NAMESPACE = "KUBERNETES_NAMESPACE"; - - private static final Logger logger = Logger.getLogger(BuildSyncRunListener.class.getName()); + private static final Logger logger = LoggerFactory.getLogger(BuildSyncRunListener.class.getName()); private long pollPeriodMs = 1000 * 5; // 5 seconds private long delayPollPeriodMs = 1000; // 1 seconds @@ -139,14 +145,14 @@ public void onStarted(Run run, TaskListener listener) { run.setDescription(cause.getShortDescription()); } } catch (IOException e) { - logger.log(WARNING, "Cannot set build description: " + e); + logger.warn("Cannot set build description: " + e); } if (runsToPoll.add(run)) { logger.info("starting polling build " + run.getUrl()); } checkTimerStarted(); } else { - logger.fine("not polling polling build " + run.getUrl() + " as its not a WorkflowJob"); + logger.trace("not polling polling build " + run.getUrl() + " as its not a WorkflowJob"); } super.onStarted(run, listener); } @@ -190,7 +196,8 @@ public void onFinalized(Run run) { if (shouldPollRun(run)) { runsToPoll.remove(run); pollRun(run); - logger.info("onFinalized " + run.getUrl()); + String jenkinsURL = Jenkins.get().getRootUrl(); + logger.info("Run COMPLETED: Build details can be accessed at: " + jenkinsURL + run.getUrl()); } super.onFinalized(run); } @@ -218,7 +225,7 @@ protected void pollRun(Run run) { // bumped // by another dependency vs. our bumping it explicitly, I want to // find out quickly that we need to switch methods again - logger.log(Level.WARNING, "pollRun", t); + logger.warn("pollRun", t); } try { @@ -226,7 +233,7 @@ protected void pollRun(Run run) { } catch (KubernetesClientException e) { if (e.getCode() == HttpStatus.SC_UNPROCESSABLE_ENTITY) { runsToPoll.remove(run); - logger.log(WARNING, "Cannot update status: {0}", e.getMessage()); + logger.warn("Cannot update status: {0}", e.getMessage()); return; } throw e; @@ -236,7 +243,7 @@ protected void pollRun(Run run) { private boolean shouldUpdateOpenShiftBuild(BuildCause cause, int latestStageNum, int latestNumFlowNodes, StatusExt status) { long currTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime()); - logger.fine(String.format( + logger.debug(String.format( "shouldUpdateOpenShiftBuild curr time %s last update %s curr stage num %s last stage num %s" + "curr flow num %s last flow num %s status %s", String.valueOf(currTime), String.valueOf(cause.getLastUpdateToOpenshift()), @@ -317,8 +324,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { } } } catch (Throwable t) { - if (logger.isLoggable(Level.FINE)) - logger.log(Level.FINE, "upsertBuild", t); + logger.error("upsertBuild", t); } Map blueRunResults = new HashMap(); @@ -388,7 +394,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { try { json = new ObjectMapper().writeValueAsString(wfRunExt); } catch (JsonProcessingException e) { - logger.log(SEVERE, "Failed to serialize workflow run. " + e, e); + logger.error("Failed to serialize workflow run. " + e, e); return; } @@ -412,7 +418,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { } String name = cause.getName(); - logger.log(FINE, "Patching build {0}/{1}: setting phase to {2}", new Object[] { ns, name, phase }); + logger.debug("Patching build {0}/{1}: setting phase to {2}", new Object[] { ns, name, phase }); try { Map annotations = new HashMap(); @@ -430,7 +436,8 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) { } final String finalStartTime = startTime; final String finalCompletionTime = completionTime; - logger.log(INFO, "Creating a new build builder: "); + logger.info("Setting build status values to: {}:[ {} ]: {}->{}", name, phase, startTime, completionTime); + logger.debug("Setting build annotations values to: {} ]", annotations); getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name) .edit(b -> new BuildBuilder(b).editMetadata().withAnnotations(annotations).endMetadata() .editStatus().withPhase(phase).withStartTimestamp(finalStartTime) @@ -458,7 +465,7 @@ private String getPendingActionsJson(WorkflowRun run) { try { executions = inputAction.getExecutions(); } catch (Exception e) { - logger.log(SEVERE, "Failed to get Excecutions:" + e, e); + logger.error("Failed to get Excecutions:" + e, e); return null; } if (executions != null && !executions.isEmpty()) { @@ -470,7 +477,7 @@ private String getPendingActionsJson(WorkflowRun run) { try { return new ObjectMapper().writeValueAsString(pendingInputActions); } catch (JsonProcessingException e) { - logger.log(SEVERE, "Failed to serialize pending actions. " + e, e); + logger.error("Failed to serialize pending actions. " + e, e); return null; } } @@ -486,25 +493,25 @@ private long getDuration(Run run) { private String runToBuildPhase(Run run) { if (run != null && !run.hasntStartedYet()) { if (run.isBuilding()) { - return BuildPhases.RUNNING; + return RUNNING; } else { Result result = run.getResult(); if (result != null) { - if (result.equals(Result.SUCCESS)) { - return BuildPhases.COMPLETE; - } else if (result.equals(Result.ABORTED)) { - return BuildPhases.CANCELLED; - } else if (result.equals(Result.FAILURE)) { - return BuildPhases.FAILED; - } else if (result.equals(Result.UNSTABLE)) { - return BuildPhases.FAILED; + if (result.equals(SUCCESS)) { + return COMPLETE; + } else if (result.equals(ABORTED)) { + return CANCELLED; + } else if (result.equals(FAILURE)) { + return FAILED; + } else if (result.equals(UNSTABLE)) { + return FAILED; } else { - return BuildPhases.PENDING; + return PENDING; } } } } - return BuildPhases.NEW; + return NEW; } /** diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java index 9708aa300..168666b06 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java @@ -23,8 +23,8 @@ public class BuildToActionMapper { - private static Map buildToParametersMap; - private static Map buildToCauseMap; + private static Map buildToParametersMap = new ConcurrentHashMap();; + private static Map buildToCauseMap = new ConcurrentHashMap(); private BuildToActionMapper() { } @@ -38,8 +38,7 @@ static synchronized void initialize() { } } - static synchronized void addParameterAction(String buildId, - ParametersAction params) { + static synchronized void addParameterAction(String buildId, ParametersAction params) { buildToParametersMap.put(buildId, params); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java index 449b8edbc..7efcb3ecd 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -16,26 +16,24 @@ package io.fabric8.jenkins.openshiftsync; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.CONFIGMAP; import java.util.List; import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import io.fabric8.kubernetes.api.model.ConfigMap; -import io.fabric8.kubernetes.api.model.ConfigMapList; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler { - private final static Logger LOGGER = Logger.getLogger(ConfigMapWatcher.class.getName()); - private static final long RESYNC_PERIOD = 30 * 1000L; + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private SharedIndexInformer informer; public ConfigMapInformer(String namespace) { @@ -44,19 +42,18 @@ public ConfigMapInformer(String namespace) { @Override public int getListIntervalInSeconds() { - return GlobalPluginConfiguration.get().getConfigMapListInterval(); + return 1_000 * GlobalPluginConfiguration.get().getConfigMapListInterval(); } public void start() { LOGGER.info("Starting configMap informer for {} !!" + namespace); - LOGGER.fine("listing ConfigMap resources"); + LOGGER.debug("listing ConfigMap resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); - this.informer = factory.sharedIndexInformerFor(ConfigMap.class, RESYNC_PERIOD); + this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getListIntervalInSeconds()); informer.addEventHandler(this); - factory.startAllRegisteredInformers(); LOGGER.info("ConfigMap informer started for namespace: {}" + namespace); - ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list(); - onInit(list.getItems()); +// ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list(); +// onInit(list.getItems()); } public void stop() { @@ -66,39 +63,45 @@ public void stop() { @Override public void onAdd(ConfigMap obj) { - LOGGER.fine("ConfigMap informer received add event for: {}" + obj); - ObjectMeta metadata = obj.getMetadata(); - String name = metadata.getName(); - LOGGER.info("ConfigMap informer received add event for: {}" + name); - List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); - String uid = metadata.getUid(); - String namespace = metadata.getNamespace(); - PodTemplateUtils.addAgents(podTemplates, CONFIGMAP, uid, name, namespace); + LOGGER.debug("ConfigMap informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("ConfigMap informer received add event for: {}" + name); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); + String uid = metadata.getUid(); + String namespace = metadata.getNamespace(); + PodTemplateUtils.addAgents(podTemplates, CONFIGMAP, uid, name, namespace); + } } @Override public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { - LOGGER.fine("ConfigMap informer received update event for: {} to: {}" + oldObj + newObj); - String oldResourceVersion = oldObj.getMetadata() != null ? oldObj.getMetadata().getResourceVersion() : null; - String newResourceVersion = newObj.getMetadata() != null ? newObj.getMetadata().getResourceVersion() : null; - LOGGER.info("Update event received resource versions: {} to: {}" + oldResourceVersion + newResourceVersion); - List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(newObj); - ObjectMeta metadata = newObj.getMetadata(); - String uid = metadata.getUid(); - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace); + LOGGER.debug("ConfigMap informer received update event for: {} to: {}" + oldObj + newObj); + if (oldObj != null) { + String oldResourceVersion = oldObj.getMetadata() != null ? oldObj.getMetadata().getResourceVersion() : null; + String newResourceVersion = newObj.getMetadata() != null ? newObj.getMetadata().getResourceVersion() : null; + LOGGER.info("Update event received resource versions: {} to: {}" + oldResourceVersion + newResourceVersion); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(newObj); + ObjectMeta metadata = newObj.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace); + } } @Override public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { - LOGGER.fine("ConfigMap informer received delete event for: {}" + obj); - List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); - ObjectMeta metadata = obj.getMetadata(); - String uid = metadata.getUid(); - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - PodTemplateUtils.deleteAgents(podTemplates, CONFIGMAP, uid, name, namespace); + LOGGER.debug("ConfigMap informer received delete event for: {}" + obj); + if (obj != null) { + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); + ObjectMeta metadata = obj.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + PodTemplateUtils.deleteAgents(podTemplates, CONFIGMAP, uid, name, namespace); + } } private void onInit(List list) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java new file mode 100644 index 000000000..1a39e8903 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java @@ -0,0 +1,30 @@ +package io.fabric8.jenkins.openshiftsync; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.fabric8.kubernetes.api.model.HasMetadata; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; + +public class GenericEventHandler implements ResourceEventHandler { + private Logger logger = LoggerFactory.getLogger(this.getClass().getName()); + + public void onAdd(T obj) { + String className = obj.getClass().getSimpleName(); + final String name = obj.getMetadata().getName(); + logger.info("{}/{} added", className, name); + } + + public void onUpdate(T oldObj, T newObj) { + String className = oldObj.getClass().getSimpleName(); + final String name = oldObj.getMetadata().getName(); + logger.info("{}/{} updated", className, name); + } + + public void onDelete(T obj, boolean deletedFinalStateUnknown) { + String className = obj.getClass().getSimpleName(); + final String name = obj.getMetadata().getName(); + logger.info("{}/{} deleted", className, name); + } + +} \ No newline at end of file diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index 2cb0677c5..6e59ebc75 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -2,6 +2,7 @@ import static hudson.init.InitMilestone.COMPLETED; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; import java.util.ArrayList; import java.util.List; @@ -66,29 +67,29 @@ protected void doRun() throws Exception { SecretInformer secretInformer = new SecretInformer(namespace); secretInformer.start(); watchers.add(secretInformer); - } - logger.info("All the watchers have been initialized!!"); - OpenShiftClient client = getAuthenticatedOpenShiftClient(); - SharedInformerFactory informerFactory = client.informers(); - informerFactory.startAllRegisteredInformers(); - - synchronized (watchers) { - List> globalWatchers = GlobalPluginConfiguration.getWatchers(); - synchronized (globalWatchers) { - logger.info("Existing watchers: " + globalWatchers); - for (BaseWatcher watch : globalWatchers) { - watch.stop(); - } - globalWatchers.clear(); - logger.info("Existing watchers: stopped and cleared : " + globalWatchers); - globalWatchers.addAll(watchers); - logger.info("New watchers created : " + globalWatchers.size()); + + logger.info("All the watchers have been registered!! ... starting all registered informers"); + getInformerFactory().startAllRegisteredInformers(); + logger.info("All registered informers have been started"); - } - } +// synchronized (watchers) { +// List> globalWatchers = GlobalPluginConfiguration.getWatchers(); +// synchronized (globalWatchers) { +// logger.info("Existing watchers: " + globalWatchers); +// for (BaseWatcher watch : globalWatchers) { +// watch.stop(); +// } +// globalWatchers.clear(); +// logger.info("Existing watchers: stopped and cleared : " + globalWatchers); +// globalWatchers.addAll(watchers); +// logger.info("New watchers created : " + globalWatchers.size()); +// +// } +// } } catch (Exception e) { logger.severe(e.toString()); + e.printStackTrace(); } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java index 3efa1382d..be3760d59 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java @@ -18,7 +18,6 @@ import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL; import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.IMAGESTREAM_TYPE; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; @@ -27,13 +26,13 @@ import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate; import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents; import static java.util.Collections.singletonMap; -import static java.util.logging.Level.SEVERE; import java.util.List; import java.util.Map; -import java.util.logging.Logger; import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.dsl.base.OperationContext; @@ -41,16 +40,16 @@ import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; import io.fabric8.openshift.api.model.ImageStream; -import io.fabric8.openshift.api.model.ImageStreamList; public class ImageStreamInformer extends ImageStreamWatcher implements ResourceEventHandler { + + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); + private SharedIndexInformer informer; + public ImageStreamInformer(String namespace) { super(namespace); } - private final static Logger LOGGER = Logger.getLogger(ImageStreamInformer.class.getName()); - private SharedIndexInformer informer; - @Override public int getListIntervalInSeconds() { return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval(); @@ -58,22 +57,15 @@ public int getListIntervalInSeconds() { public void start() { LOGGER.info("Starting ImageStream informer for {} !!" + namespace); - LOGGER.fine("Listing ImageStream resources"); + LOGGER.debug("Listing ImageStream resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE); OperationContext withLabels = new OperationContext().withLabels(labels); this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getListIntervalInSeconds()); informer.addEventHandler(this); - factory.startAllRegisteredInformers(); LOGGER.info("ImageStream informer started for namespace: {}" + namespace); - ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list(); - onInit(list.getItems()); - } - - public void startAfterOnClose(String namespace) { - synchronized (this.lock) { - start(); - } +// ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list(); +// onInit(list.getItems()); } public void stop() { @@ -83,35 +75,41 @@ public void stop() { @Override public void onAdd(ImageStream obj) { - LOGGER.fine("ImageStream informer received add event for: {}" + obj); - ObjectMeta metadata = obj.getMetadata(); - String name = metadata.getName(); - String uid = metadata.getUid(); - LOGGER.info("ImageStream informer received add event for: {}" + name); - List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); - addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + LOGGER.debug("ImageStream informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + String uid = metadata.getUid(); + LOGGER.info("ImageStream informer received add event for: {}" + name); + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); + addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } } @Override public void onUpdate(ImageStream oldObj, ImageStream newObj) { LOGGER.info("ImageStream informer received update event for: {} to: {}" + oldObj + newObj); - List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(newObj); - ObjectMeta metadata = newObj.getMetadata(); - String uid = metadata.getUid(); - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + if (newObj != null) { + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(newObj); + ObjectMeta metadata = newObj.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } } @Override public void onDelete(ImageStream obj, boolean deletedFinalStateUnknown) { LOGGER.info("ImageStream informer received delete event for: {}" + obj); - List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); - ObjectMeta metadata = obj.getMetadata(); - String uid = metadata.getUid(); - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + if (obj != null) { + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); + ObjectMeta metadata = obj.getMetadata(); + String uid = metadata.getUid(); + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } } @@ -127,7 +125,7 @@ private void onInit(List list) { } } } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to update job", e); + LOGGER.error("Failed to update job", e); } } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java index c996ddbfd..246338e87 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java @@ -20,6 +20,7 @@ import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_DEFAULT_NAMESPACE; import static java.util.logging.Level.FINE; +import static java.util.logging.Level.INFO; import java.io.BufferedReader; import java.io.File; @@ -75,6 +76,7 @@ import io.fabric8.openshift.client.OpenShiftClient; import io.fabric8.openshift.client.OpenShiftConfigBuilder; import jenkins.model.Jenkins; +import okhttp3.Dispatcher; /** */ @@ -84,8 +86,9 @@ public class OpenShiftUtils { private static OpenShiftClient openShiftClient; private static String jenkinsPodNamespace = null; - private static SharedInformerFactory factory; + private static final Jenkins JENKINS_INSTANCE = Jenkins.getInstanceOrNull(); + private static final Object lock = new Object(); static { jenkinsPodNamespace = System.getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME); @@ -130,18 +133,24 @@ public class OpenShiftUtils { * is running */ public synchronized static void initializeOpenShiftClient(String serverUrl) { + if (openShiftClient != null) { + logger.log(INFO, "Closing already initialized openshift client"); + openShiftClient.close(); + } OpenShiftConfigBuilder configBuilder = new OpenShiftConfigBuilder(); if (serverUrl != null && !serverUrl.isEmpty()) { configBuilder.withMasterUrl(serverUrl); } Config config = configBuilder.build(); - config.setUserAgent("openshift-sync-plugin-" - + Jenkins.getInstance().getPluginManager().getPlugin("openshift-sync").getVersion() + "/fabric8-" - + Version.clientVersion()); + String version = JENKINS_INSTANCE.getPluginManager().getPlugin("openshift-sync").getVersion(); + config.setUserAgent("openshift-sync-plugin-" + version + "/fabric8-" + Version.clientVersion()); openShiftClient = new DefaultOpenShiftClient(config); + logger.log(INFO, "New OpenShift client initialized: " + openShiftClient); + DefaultOpenShiftClient defClient = (DefaultOpenShiftClient) openShiftClient; - defClient.getHttpClient().dispatcher().setMaxRequestsPerHost(100); - defClient.getHttpClient().dispatcher().setMaxRequests(100); + Dispatcher dispatcher = defClient.getHttpClient().dispatcher(); + dispatcher.setMaxRequestsPerHost(100); + dispatcher.setMaxRequests(100); } public synchronized static OpenShiftClient getOpenShiftClient() { @@ -157,13 +166,14 @@ public synchronized static OpenShiftClient getAuthenticatedOpenShiftClient() { openShiftClient.getConfiguration().setOauthToken(token); } } - return openShiftClient; } - public synchronized static SharedInformerFactory getInformerFactory() { + public static SharedInformerFactory getInformerFactory() { if (factory == null) { - factory = getAuthenticatedOpenShiftClient().informers(); + synchronized (lock) { + factory = getAuthenticatedOpenShiftClient().informers(); + } } return factory; } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java index a0062ad4c..f6e1681ef 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java @@ -18,19 +18,17 @@ import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC; import static io.fabric8.jenkins.openshiftsync.Constants.VALUE_SECRET_SYNC; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; import static java.util.Collections.singletonMap; -import static java.util.logging.Level.SEVERE; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import java.util.logging.Logger; -import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.api.model.Secret; -import io.fabric8.kubernetes.api.model.SecretList; import io.fabric8.kubernetes.client.dsl.base.OperationContext; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; @@ -38,34 +36,32 @@ public class SecretInformer extends SecretWatcher implements ResourceEventHandler { - private final static Logger LOGGER = Logger.getLogger(SecretInformer.class.getName()); + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); + private final static ConcurrentHashMap trackedSecrets = new ConcurrentHashMap(); - private static final long RESYNC_PERIOD = 30 * 1000L; private SharedIndexInformer informer; - @SuppressFBWarnings("EI_EXPOSE_REP2") public SecretInformer(String namespace) { super(namespace); } @Override public int getListIntervalInSeconds() { - return GlobalPluginConfiguration.get().getSecretListInterval(); + return 1_000 * GlobalPluginConfiguration.get().getSecretListInterval(); } public void start() { LOGGER.info("Starting secret informer {} !!" + namespace); - LOGGER.fine("listing Secret resources"); + LOGGER.debug("listing Secret resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); Map labels = singletonMap(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC); OperationContext withLabels = new OperationContext().withLabels(labels); - this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, RESYNC_PERIOD); + this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getListIntervalInSeconds()); informer.addEventHandler(this); - factory.startAllRegisteredInformers(); LOGGER.info("Secret informer started for namespace: {}" + namespace); - SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list(); - onInit(list.getItems()); +// SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list(); +// onInit(list.getItems()); } public void stop() { @@ -75,23 +71,29 @@ public void stop() { @Override public void onAdd(Secret obj) { - LOGGER.fine("Secret informer received add event for: {}" + obj); - ObjectMeta metadata = obj.getMetadata(); - String name = metadata.getName(); - LOGGER.info("Secret informer received add event for: {}" + name); - insertOrUpdateCredentialFromSecret(obj); + LOGGER.debug("Secret informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String name = metadata.getName(); + LOGGER.info("Secret informer received add event for: {}" + name); + insertOrUpdateCredentialFromSecret(obj); + } } @Override public void onUpdate(Secret oldObj, Secret newObj) { LOGGER.info("Secret informer received update event for: {} to: {}" + oldObj + newObj); - updateCredential(newObj); + if (oldObj != null) { + updateCredential(newObj); + } } @Override public void onDelete(Secret obj, boolean deletedFinalStateUnknown) { LOGGER.info("Secret informer received delete event for: {}" + obj); - CredentialsUtils.deleteCredential(obj); + if (obj != null) { + CredentialsUtils.deleteCredential(obj); + } } private void onInit(List list) { @@ -102,7 +104,7 @@ private void onInit(List list) { trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); } } catch (Exception e) { - LOGGER.log(SEVERE, "Failed to update secred", e); + LOGGER.error("Failed to update secred", e); } } } From d59310613456d9da5f508d7ae5594c517ab665a5 Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Thu, 15 Apr 2021 17:27:41 +0200 Subject: [PATCH 13/22] Cluster Shared Informers --- .../openshiftsync/BuildClusterInformer.java | 140 +++++++ .../BuildConfigClusterInformer.java | 141 +++++++ .../openshiftsync/BuildConfigInformer.java | 189 ++------- .../openshiftsync/BuildConfigWatcher.java | 14 +- .../jenkins/openshiftsync/BuildInformer.java | 324 ++-------------- .../jenkins/openshiftsync/BuildManager.java | 365 ++++++++++++++++++ .../jenkins/openshiftsync/BuildWatcher.java | 236 +---------- .../ConfigMapClusterInformer.java | 143 +++++++ .../openshiftsync/ConfigMapInformer.java | 3 +- .../GlobalPluginConfiguration.java | 26 +- .../GlobalPluginConfigurationTimerTask.java | 88 +++-- .../ImageStreamClusterInformer.java | 147 +++++++ .../openshiftsync/ImageStreamInformer.java | 6 +- .../jenkins/openshiftsync/JenkinsUtils.java | 3 +- .../jenkins/openshiftsync/JobProcessor.java | 298 +++++++------- .../jenkins/openshiftsync/Lifecyclable.java | 7 + .../openshiftsync/SecretClusterInformer.java | 136 +++++++ .../jenkins/openshiftsync/SecretInformer.java | 18 +- .../jenkins/openshiftsync/SecretManager.java | 111 ++++++ .../jenkins/openshiftsync/SecretWatcher.java | 107 +---- 20 files changed, 1491 insertions(+), 1011 deletions(-) create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java create mode 100644 src/main/java/io/fabric8/jenkins/openshiftsync/SecretManager.java diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java new file mode 100644 index 000000000..28cf560f4 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java @@ -0,0 +1,140 @@ +/** + * Copyright (C) 2016 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.api.model.Build; +import io.fabric8.openshift.api.model.BuildConfig; + +public class BuildClusterInformer implements ResourceEventHandler, Lifecyclable { + + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); + private final static BuildComparator BUILD_COMPARATOR = new BuildComparator(); + private SharedIndexInformer informer; + private Set namespaces; + + public BuildClusterInformer(String[] namespaces) { + this.namespaces = new HashSet<>(Arrays.asList(namespaces)); + } + + /** + * now that listing interval is 5 minutes (used to be 10 seconds), we have seen + * timing windows where if the build watch events come before build config watch + * events when both are created in a simultaneous fashion, there is an up to 5 + * minutes delay before the job run gets kicked off started seeing duplicate + * builds getting kicked off so quit depending on so moved off of concurrent + * hash set to concurrent hash map using namepace/name key + */ + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval(); + } + + public void start() { + LOGGER.info("Starting Build informer for {} !!" + namespaces); + LOGGER.debug("Listing Build resources"); + SharedInformerFactory factory = getInformerFactory(); + this.informer = factory.sharedIndexInformerFor(Build.class, getListIntervalInSeconds()); + this.informer.addEventHandler(this); + LOGGER.info("Build informer started for namespace: {}" + namespaces); +// BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list(); +// onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping Builder informer {} !!" + namespaces); + this.informer.stop(); + } + + @Override + public void onAdd(Build obj) { + LOGGER.debug("Build informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = metadata.getName(); + LOGGER.info("Build informer received add event for: {}" + name); + try { + BuildManager.addEventToJenkinsJobRun(obj); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + } + + @Override + public void onUpdate(Build oldObj, Build newObj) { + LOGGER.debug("Build informer received update event for: {} to: {}" + oldObj + " " + newObj); + if (newObj != null) { + ObjectMeta metadata = oldObj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String oldRv = oldObj.getMetadata().getResourceVersion(); + String newRv = newObj.getMetadata().getResourceVersion(); + LOGGER.info("Build informer received update event for: {} to: {}" + oldRv + " " + newRv); + BuildManager.modifyEventToJenkinsJobRun(newObj); + } + } + } + + @Override + public void onDelete(Build obj, boolean deletedFinalStateUnknown) { + LOGGER.info("Build informer received delete event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + try { + BuildManager.deleteEventToJenkinsJobRun(obj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + } + + private static void onInit(List list) { + Collections.sort(list, BUILD_COMPARATOR); + // We need to sort the builds into their build configs so we can + // handle build run policies correctly. + Map buildConfigMap = new HashMap<>(); + Map> buildConfigBuildMap = new HashMap<>(list.size()); +// BuildManager.mapBuildToBuildConfigs(list, buildConfigMap, buildConfigBuildMap); +// BuildManager.mapBuildsToBuildConfigs(buildConfigBuildMap); + BuildManager.reconcileRunsAndBuilds(); + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java new file mode 100644 index 000000000..e41d09a0e --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java @@ -0,0 +1,141 @@ +/** + * Copyright (C) 2016 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.api.model.BuildConfig; + +/** + * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we + * ensure there is a suitable Jenkins Job object defined with the correct + * configuration + */ +public class BuildConfigClusterInformer implements ResourceEventHandler, Lifecyclable { + + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); + private SharedIndexInformer informer; + private Set namespaces; + + public BuildConfigClusterInformer(String[] namespaces) { + this.namespaces = new HashSet<>(Arrays.asList(namespaces)); + } + + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getBuildConfigListInterval(); + } + + public void start() { + LOGGER.info("Starting BuildConfig informer for {} !!" + namespaces); + LOGGER.debug("listing BuildConfig resources"); + SharedInformerFactory factory = getInformerFactory(); + this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getListIntervalInSeconds()); + informer.addEventHandler(this); + LOGGER.info("BuildConfig informer started for namespace: {}" + namespaces); + // BuildConfigList list = + // getOpenshiftClient().buildConfigs().inNamespace(namespace).list(); + // onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping secret informer {} !!" + namespaces); + this.informer.stop(); + } + + @Override + public void onAdd(BuildConfig obj) { + LOGGER.debug("BuildConfig informer received add event for: {}" + obj); + + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = metadata.getName(); + LOGGER.info("BuildConfig informer received add event for: {}" + name); + try { + BuildConfigWatcher.upsertJob(obj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + } + + @Override + public void onUpdate(BuildConfig oldObj, BuildConfig newObj) { + LOGGER.debug("BuildConfig informer received update event for: {} to: {}" + oldObj + " " + newObj); + if (newObj != null) { + ObjectMeta metadata = oldObj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String oldRv = oldObj.getMetadata().getResourceVersion(); + String newRv = newObj.getMetadata().getResourceVersion(); + LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldRv + " " + newRv); + try { + BuildConfigWatcher.modifyEventToJenkinsJob(newObj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + } + + @Override + public void onDelete(BuildConfig obj, boolean deletedFinalStateUnknown) { + LOGGER.info("BuildConfig informer received delete event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + try { + BuildConfigWatcher.deleteEventToJenkinsJob(obj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } + } + } + + private void onInit(List list) { + for (BuildConfig buildConfig : list) { + try { + BuildConfigWatcher.upsertJob(buildConfig); + } catch (Exception e) { + LOGGER.error("Failed to update job", e); + } + } + // poke the BuildWatcher builds with no BC list and see if we + // can create job + // runs for premature builds + BuildManager.flushBuildsWithNoBCList(); + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java index 72a0db1eb..e3a6be428 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java @@ -15,50 +15,34 @@ */ package io.fabric8.jenkins.openshiftsync; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig; -import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; -import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuildConfig; -import static java.util.concurrent.TimeUnit.MILLISECONDS; import java.util.List; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import hudson.model.Job; -import hudson.security.ACL; -import hudson.triggers.SafeTimerTask; import io.fabric8.kubernetes.api.model.ObjectMeta; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; import io.fabric8.openshift.api.model.BuildConfig; -import io.fabric8.openshift.api.model.BuildList; -import io.fabric8.openshift.client.OpenShiftClient; -import jenkins.model.Jenkins; -import jenkins.security.NotReallyRoleSensitiveCallable; -import jenkins.util.Timer; /** * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we * ensure there is a suitable Jenkins Job object defined with the correct * configuration */ -public class BuildConfigInformer extends BuildConfigWatcher implements ResourceEventHandler { +public class BuildConfigInformer implements ResourceEventHandler, Lifecyclable { private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private SharedIndexInformer informer; + private String namespace; public BuildConfigInformer(String namespace) { - super(namespace); + this.namespace = namespace; } - @Override public int getListIntervalInSeconds() { return 1_000 * GlobalPluginConfiguration.get().getBuildConfigListInterval(); } @@ -87,7 +71,12 @@ public void onAdd(BuildConfig obj) { ObjectMeta metadata = obj.getMetadata(); String name = metadata.getName(); LOGGER.info("BuildConfig informer received add event for: {}" + name); - upsertJob(obj); + try { + BuildConfigWatcher.upsertJob(obj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } } } @@ -98,7 +87,12 @@ public void onUpdate(BuildConfig oldObj, BuildConfig newObj) { String oldRv = oldObj.getMetadata().getResourceVersion(); String newRv = newObj.getMetadata().getResourceVersion(); LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldRv + " " + newRv); - modifyEventToJenkinsJob(newObj); + try { + BuildConfigWatcher.modifyEventToJenkinsJob(newObj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } } } @@ -106,123 +100,19 @@ public void onUpdate(BuildConfig oldObj, BuildConfig newObj) { public void onDelete(BuildConfig obj, boolean deletedFinalStateUnknown) { LOGGER.info("BuildConfig informer received delete event for: {}" + obj); if (obj != null) { - deleteEventToJenkinsJob(obj); - } - } - - @SuppressWarnings({ "deprecation", "serial" }) - private void cleanupJobsMissingStartBuildEvent(BuildConfig buildConfig) throws Exception { - boolean buildConfigNameNotNull = buildConfig != null && buildConfig.getMetadata() != null; - String name = buildConfigNameNotNull ? buildConfig.getMetadata().getName() : "null"; - // we employ impersonation here to insure we have "full access"; - // for example, can we actually - // read in jobs defs for verification? without impersonation here - // we would get null back when trying to read in the job from disk - ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { - @Override - public Void call() throws Exception { - // if bc event came after build events, let's poke the BuildWatcher builds with - // no BC list to create job runs - BuildWatcher.flushBuildsWithNoBCList(); - // now, if the build event was lost and never received, builds will stay in new - // for 5 minutes ... - // let's launch a background thread to clean them up at a quicker interval than - // the default 5 minute general build - // relist function - Runnable backupBuildQuery = new SafeTimerTask() { - @Override - public void doRun() { - if (!CredentialsUtils.hasCredentials()) { - LOGGER.debug("No Openshift Token credential defined."); - return; - } - final OpenShiftClient client = getAuthenticatedOpenShiftClient(); - BuildList buildList = client.builds().inNamespace(namespace) - .withField(OPENSHIFT_BUILD_STATUS_FIELD, NEW) - .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, name).list(); - if (buildList.getItems().size() > 0) { - LOGGER.info("build backup query for " + name + " found new builds"); - BuildWatcher.onInitialBuilds(buildList); - } - } - }; - Timer.get().schedule(backupBuildQuery, 10 * 1000, MILLISECONDS); - return null; - } - }); - } - - @SuppressWarnings({ "deprecation" }) - private void upsertJob(final BuildConfig buildConfig) { - if (isPipelineStrategyBuildConfig(buildConfig)) { - // sync on intern of name should guarantee sync on same actual obj - synchronized (buildConfig.getMetadata().getUid().intern()) { - try { - ACL.impersonate(ACL.SYSTEM, new JobProcessor(this, buildConfig)); - } catch (Exception e) { - LOGGER.error("Error while trying to insert JobRun: " + e); - - } - } - } - try { - cleanupJobsMissingStartBuildEvent(buildConfig); - } catch (Exception e) { - LOGGER.error("Error while trying to clean up orphan JobRuns: " + e); - e.printStackTrace(); - } - } - - private void modifyEventToJenkinsJob(BuildConfig buildConfig) { - if (isPipelineStrategyBuildConfig(buildConfig)) { - upsertJob(buildConfig); - return; - } - - // no longer a Jenkins build so lets delete it if it exists - deleteEventToJenkinsJob(buildConfig); - } - - // innerDeleteEventToJenkinsJob is the actual delete logic at the heart of - // deleteEventToJenkinsJob that is either in a sync block or not based on the - // presence of a BC uid - @SuppressWarnings({ "deprecation", "serial" }) - private void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { - Job job = getJobFromBuildConfig(buildConfig); - if (job != null) { - // employ intern of the BC UID to facilitate sync'ing on the same - // actual object - synchronized (buildConfig.getMetadata().getUid().intern()) { - ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { - @Override - public Void call() throws Exception { - try { - deleteInProgress( - buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); - job.delete(); - } finally { - removeJobWithBuildConfig(buildConfig); - Jenkins.getActiveInstance().rebuildDependencyGraphAsync(); - deleteCompleted( - buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName()); - } - return null; - } - }); - // if the bc has a source secret it is possible it should - // be deleted as well (called function will cross reference - // with secret watch) - CredentialsUtils.deleteSourceCredentials(buildConfig); + try { + BuildConfigWatcher.deleteEventToJenkinsJob(obj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); } - } - } private void onInit(List list) { for (BuildConfig buildConfig : list) { try { - upsertJob(buildConfig); + BuildConfigWatcher.upsertJob(buildConfig); } catch (Exception e) { LOGGER.error("Failed to update job", e); } @@ -230,42 +120,7 @@ private void onInit(List list) { // poke the BuildWatcher builds with no BC list and see if we // can create job // runs for premature builds - BuildWatcher.flushBuildsWithNoBCList(); + BuildManager.flushBuildsWithNoBCList(); } - // in response to receiving an openshift delete build config event, this - // method will drive - // the clean up of the Jenkins job the build config is mapped one to one - // with; as part of that - // clean up it will synchronize with the build event watcher to handle build - // config - // delete events and build delete events that arrive concurrently and in a - // nondeterministic - // order - private void deleteEventToJenkinsJob(final BuildConfig buildConfig) { - if (buildConfig != null) { - String bcUid = buildConfig.getMetadata().getUid(); - if (bcUid != null && bcUid.length() > 0) { - // employ intern of the BC UID to facilitate sync'ing on the same - // actual object - bcUid = bcUid.intern(); - synchronized (bcUid) { - try { - innerDeleteEventToJenkinsJob(buildConfig); - } catch (Exception e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - return; - } - } - // uid should not be null / empty, but just in case, still clean up - try { - innerDeleteEventToJenkinsJob(buildConfig); - } catch (Exception e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java index 62c2f1aa1..dd3eb2cfb 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java @@ -120,7 +120,7 @@ public void start() { // poke the BuildWatcher builds with no BC list and see if we // can create job // runs for premature builds - BuildWatcher.flushBuildsWithNoBCList(); + BuildManager.flushBuildsWithNoBCList(); } private void onInitialBuildConfigs(BuildConfigList buildConfigs) { @@ -175,7 +175,7 @@ public void eventReceived(Action action, BuildConfig buildConfig) { public Void call() throws Exception { // if bc event came after build events, let's poke the BuildWatcher builds with // no BC list to create job runs - BuildWatcher.flushBuildsWithNoBCList(); + BuildManager.flushBuildsWithNoBCList(); // now, if the build event was lost and never received, builds will stay in new // for 5 minutes ... // let's launch a background thread to clean them up at a quicker interval than @@ -208,16 +208,16 @@ public void doRun() { } } - private void upsertJob(final BuildConfig buildConfig) throws Exception { + static void upsertJob(final BuildConfig buildConfig) throws Exception { if (isPipelineStrategyBuildConfig(buildConfig)) { // sync on intern of name should guarantee sync on same actual obj synchronized (buildConfig.getMetadata().getUid().intern()) { - ACL.impersonate(ACL.SYSTEM, new JobProcessor(this, buildConfig)); + ACL.impersonate(ACL.SYSTEM, new JobProcessor(buildConfig)); } } } - private void modifyEventToJenkinsJob(BuildConfig buildConfig) throws Exception { + static void modifyEventToJenkinsJob(BuildConfig buildConfig) throws Exception { if (isPipelineStrategyBuildConfig(buildConfig)) { upsertJob(buildConfig); return; @@ -230,7 +230,7 @@ private void modifyEventToJenkinsJob(BuildConfig buildConfig) throws Exception { // innerDeleteEventToJenkinsJob is the actual delete logic at the heart of // deleteEventToJenkinsJob // that is either in a sync block or not based on the presence of a BC uid - private void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { + private static void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { final Job job = getJobFromBuildConfig(buildConfig); if (job != null) { // employ intern of the BC UID to facilitate sync'ing on the same @@ -271,7 +271,7 @@ public Void call() throws Exception { // delete events and build delete events that arrive concurrently and in a // nondeterministic // order - private void deleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { + static void deleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception { if (buildConfig != null) { String bcUid = buildConfig.getMetadata().getUid(); if (bcUid != null && bcUid.length() > 0) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java index 23d37a3cd..e2114bb23 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java @@ -15,60 +15,33 @@ */ package io.fabric8.jenkins.openshiftsync; -import static io.fabric8.jenkins.openshiftsync.Annotations.BUILDCONFIG_NAME; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfigNameNamespace; -import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.cancelBuild; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.deleteRun; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.getJobFromBuild; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.handleBuildList; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.triggerJob; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; -import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.lang.StringUtils; -import org.jenkinsci.plugins.workflow.job.WorkflowJob; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import hudson.security.ACL; import io.fabric8.kubernetes.api.model.ObjectMeta; -import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.client.informers.ResourceEventHandler; import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; import io.fabric8.openshift.api.model.Build; import io.fabric8.openshift.api.model.BuildConfig; -import io.fabric8.openshift.api.model.BuildList; -import io.fabric8.openshift.api.model.BuildStatus; -import io.fabric8.openshift.client.OpenShiftClient; -import jenkins.model.Jenkins; -import jenkins.security.NotReallyRoleSensitiveCallable; -public class BuildInformer extends BuildWatcher implements ResourceEventHandler { +public class BuildInformer implements ResourceEventHandler, Lifecyclable { private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private final static BuildComparator BUILD_COMPARATOR = new BuildComparator(); private SharedIndexInformer informer; + private String namespace; public BuildInformer(String namespace) { - super(namespace); + this.namespace = namespace; } /** @@ -79,7 +52,6 @@ public BuildInformer(String namespace) { * builds getting kicked off so quit depending on so moved off of concurrent * hash set to concurrent hash map using namepace/name key */ - @Override public int getListIntervalInSeconds() { return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval(); } @@ -95,6 +67,11 @@ public void start() { // onInit(list.getItems()); } + public void stop() { + LOGGER.info("Stopping Builder informer {} !!" + namespace); + this.informer.stop(); + } + @Override public void onAdd(Build obj) { LOGGER.debug("Build informer received add event for: {}" + obj); @@ -102,7 +79,12 @@ public void onAdd(Build obj) { ObjectMeta metadata = obj.getMetadata(); String name = metadata.getName(); LOGGER.info("Build informer received add event for: {}" + name); - addEventToJenkinsJobRun(obj); + try { + BuildManager.addEventToJenkinsJobRun(obj); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } } } @@ -113,7 +95,7 @@ public void onUpdate(Build oldObj, Build newObj) { String oldRv = oldObj.getMetadata().getResourceVersion(); String newRv = newObj.getMetadata().getResourceVersion(); LOGGER.info("Build informer received update event for: {} to: {}" + oldRv + " " + newRv); - modifyEventToJenkinsJobRun(newObj); + BuildManager.modifyEventToJenkinsJobRun(newObj); } } @@ -121,280 +103,24 @@ public void onUpdate(Build oldObj, Build newObj) { public void onDelete(Build obj, boolean deletedFinalStateUnknown) { LOGGER.info("Build informer received delete event for: {}" + obj); if (obj != null) { - deleteEventToJenkinsJobRun(obj); + try { + BuildManager.deleteEventToJenkinsJobRun(obj); + } catch (Exception e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } } } - public static void onInit(List list) { + private static void onInit(List list) { Collections.sort(list, BUILD_COMPARATOR); // We need to sort the builds into their build configs so we can // handle build run policies correctly. Map buildConfigMap = new HashMap<>(); Map> buildConfigBuildMap = new HashMap<>(list.size()); - mapBuildToBuildConfigs(list, buildConfigMap, buildConfigBuildMap); - mapBuildsToBuildConfigs(buildConfigBuildMap); - reconcileRunsAndBuilds(); - } - - private static void mapBuildsToBuildConfigs(Map> buildConfigBuildMap) { - // Now handle the builds. - for (Map.Entry> buildConfigBuilds : buildConfigBuildMap.entrySet()) { - BuildConfig bc = buildConfigBuilds.getKey(); - if (bc.getMetadata() == null) { - // Should never happen but let's be safe... - continue; - } - WorkflowJob job = getJobFromBuildConfig(bc); - if (job == null) { - List builds = buildConfigBuilds.getValue(); - for (Build b : builds) { - LOGGER.info("skipping listed new build " + b.getMetadata().getName() + " no job at this time"); - addBuildToNoBCList(b); - } - continue; - } - BuildConfigProjectProperty bcp = job.getProperty(BuildConfigProjectProperty.class); - if (bcp == null) { - List builds = buildConfigBuilds.getValue(); - for (Build b : builds) { - LOGGER.info("skipping listed new build " + b.getMetadata().getName() + " no prop at this time"); - addBuildToNoBCList(b); - } - continue; - } - List builds = buildConfigBuilds.getValue(); - handleBuildList(job, builds, bcp); - } - } - - private static void mapBuildToBuildConfigs(List list, Map buildConfigMap, - Map> buildConfigBuildMap) { - for (Build b : list) { - if (!OpenShiftUtils.isPipelineStrategyBuild(b)) { - continue; - } - String buildConfigName = b.getStatus().getConfig().getName(); - if (StringUtils.isEmpty(buildConfigName)) { - continue; - } - String namespace = b.getMetadata().getNamespace(); - String buildConfigNamespacedName = namespace + "/" + buildConfigName; - BuildConfig bc = buildConfigMap.get(buildConfigNamespacedName); - if (bc == null) { - final OpenShiftClient client = getAuthenticatedOpenShiftClient(); - bc = client.buildConfigs().inNamespace(namespace).withName(buildConfigName).get(); - if (bc == null) { - // if the bc is not there via a REST get, then it is not - // going to be, and we are not handling manual creation - // of pipeline build objects, so don't bother with "no bc list" - continue; - } - buildConfigMap.put(buildConfigNamespacedName, bc); - } - List bcBuilds = buildConfigBuildMap.get(bc); - if (bcBuilds == null) { - bcBuilds = new ArrayList<>(); - buildConfigBuildMap.put(bc, bcBuilds); - } - bcBuilds.add(b); - } - } - - private static void modifyEventToJenkinsJobRun(Build build) { - BuildStatus status = build.getStatus(); - if (status != null && isCancellable(status) && isCancelled(status)) { - WorkflowJob job = getJobFromBuild(build); - if (job != null) { - cancelBuild(job, build); - } else { - removeBuildFromNoBCList(build); - } - } else { - // see if any pre-BC cached builds can now be flushed - flushBuildsWithNoBCList(); - } - } - - public static boolean addEventToJenkinsJobRun(Build build) { - // should have been caught upstack, but just in case since public method - if (!OpenShiftUtils.isPipelineStrategyBuild(build)) - return false; - BuildStatus status = build.getStatus(); - if (status != null) { - if (isCancelled(status)) { - updateOpenShiftBuildPhase(build, CANCELLED); - return false; - } - if (!isNew(status)) { - return false; - } - } - - WorkflowJob job = getJobFromBuild(build); - if (job != null) { - try { - return triggerJob(job, build); - } catch (IOException e) { - LOGGER.error("Error while trying to trigger Job: " + e); - } - } - LOGGER.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time"); - addBuildToNoBCList(build); - return false; - } - - private static void addBuildToNoBCList(Build build) { - // should have been caught upstack, but just in case since public method - if (!OpenShiftUtils.isPipelineStrategyBuild(build)) - return; - try { - buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build); - } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException - | NullPointerException e) { - LOGGER.warn( "Failed to add item " + build.getMetadata().getName(), e); - } - } - - private static void removeBuildFromNoBCList(Build build) { - buildsWithNoBCList.remove(build.getMetadata().getNamespace() + build.getMetadata().getName()); - } - - // trigger any builds whose watch events arrived before the - // corresponding build config watch events - public static void flushBuildsWithNoBCList() { - - ConcurrentHashMap clone = null; - synchronized (buildsWithNoBCList) { - clone = new ConcurrentHashMap(buildsWithNoBCList); - } - boolean anyRemoveFailures = false; - for (Build build : clone.values()) { - WorkflowJob job = getJobFromBuild(build); - if (job != null) { - try { - LOGGER.info("triggering job run for previously skipped build " + build.getMetadata().getName()); - triggerJob(job, build); - } catch (IOException e) { - LOGGER.warn( "flushBuildsWithNoBCList", e); - } - try { - synchronized (buildsWithNoBCList) { - removeBuildFromNoBCList(build); - } - } catch (Throwable t) { - // TODO - // concurrent mod exceptions are not suppose to occur - // with concurrent hash set; this try/catch with log - // and the anyRemoveFailures post processing is a bit - // of safety paranoia until this proves to be true - // over extended usage ... probably can remove at some - // point - anyRemoveFailures = true; - LOGGER.warn( "flushBuildsWithNoBCList", t); - } - } - - synchronized (buildsWithNoBCList) { - if (anyRemoveFailures && buildsWithNoBCList.size() > 0) { - buildsWithNoBCList.clear(); - } - - } - } - } - - // innerDeleteEventToJenkinsJobRun is the actual delete logic at the heart of - // deleteEventToJenkinsJobRun that is either in a sync block or not based on the - // presence of a BC uid - @SuppressWarnings({ "deprecation", "serial" }) - private static void innerDeleteEventToJenkinsJobRun(final Build build) throws Exception { - final WorkflowJob job = getJobFromBuild(build); - if (job != null) { - ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { - @Override - public Void call() throws Exception { - cancelBuild(job, build, true); - return null; - } - }); - } else { - // in case build was created and deleted quickly, prior to seeing BC - // event, clear out from pre-BC cache - removeBuildFromNoBCList(build); - } - deleteRun(job, build); - } - - // in response to receiving an openshift delete build event, this method - // will drive the clean up of the Jenkins job run the build is mapped one to one - // with; as part of that clean up it will synchronize with the build config - // event watcher to handle build config delete events and build delete events - // that arrive concurrently and in a nondeterministic order - private static void deleteEventToJenkinsJobRun(final Build build) { - List ownerRefs = build.getMetadata().getOwnerReferences(); - String bcUid = null; - for (OwnerReference ref : ownerRefs) { - if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null && ref.getUid().length() > 0) { - // employ intern to facilitate sync'ing on the same actual object - bcUid = ref.getUid().intern(); - synchronized (bcUid) { - // if entire job already deleted via bc delete, just return - if (getJobFromBuildConfigNameNamespace(getAnnotation(build, BUILDCONFIG_NAME), - build.getMetadata().getNamespace()) == null) { - return; - } - try { - innerDeleteEventToJenkinsJobRun(build); - } catch (Exception e) { - LOGGER.error("Error while trying to delete JobRun: " + e); - } - return; - } - } - } - // otherwise, if something odd is up and there is no parent BC, just clean up - try { - innerDeleteEventToJenkinsJobRun(build); - } catch (Exception e) { - LOGGER.error("Error while trying to delete JobRun: " + e); - } - } - - /** - * Reconciles Jenkins job runs and OpenShift builds - * - * Deletes all job runs that do not have an associated build in OpenShift - */ - @SuppressWarnings("deprecation") - private static void reconcileRunsAndBuilds() { - LOGGER.debug("Reconciling job runs and builds"); - List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); - for (WorkflowJob job : jobs) { - BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); - if (property != null) { - String ns = property.getNamespace(); - String name = property.getName(); - if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) { - LOGGER.debug("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); - OpenShiftClient client = getAuthenticatedOpenShiftClient(); - BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list(); - for (WorkflowRun run : job.getBuilds()) { - boolean found = false; - BuildCause cause = run.getCause(BuildCause.class); - for (Build build : builds.getItems()) { - if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) { - found = true; - break; - } - } - if (!found) { - deleteRun(run); - } - } - } - } - - } +// BuildManager.mapBuildToBuildConfigs(list, buildConfigMap, buildConfigBuildMap); +// BuildManager.mapBuildsToBuildConfigs(buildConfigBuildMap); + BuildManager.reconcileRunsAndBuilds(); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java new file mode 100644 index 000000000..8aebca7c4 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java @@ -0,0 +1,365 @@ +/** + * Copyright (C) 2016 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Annotations.BUILDCONFIG_NAME; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; +import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfigNameNamespace; +import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED; +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.cancelBuild; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.deleteRun; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.getJobFromBuild; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.handleBuildList; +import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.triggerJob; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.ConcurrentModificationException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Level; +import java.util.logging.Logger; + +import org.apache.commons.lang.StringUtils; +import org.jenkinsci.plugins.workflow.job.WorkflowJob; +import org.jenkinsci.plugins.workflow.job.WorkflowRun; + +import hudson.security.ACL; +import io.fabric8.kubernetes.api.model.OwnerReference; +import io.fabric8.openshift.api.model.Build; +import io.fabric8.openshift.api.model.BuildConfig; +import io.fabric8.openshift.api.model.BuildList; +import io.fabric8.openshift.api.model.BuildStatus; +import io.fabric8.openshift.client.OpenShiftClient; +import jenkins.model.Jenkins; +import jenkins.security.NotReallyRoleSensitiveCallable; + +@SuppressWarnings({ "deprecation", "serial" }) +public class BuildManager { + private static final Logger logger = Logger.getLogger(BuildManager.class.getName()); + + /** + * now that listing interval is 5 minutes (used to be 10 seconds), we have seen + * timing windows where if the build watch events come before build config watch + * events when both are created in a simultaneous fashion, there is an up to 5 + * minute delay before the job run gets kicked off started seeing duplicate + * builds getting kicked off so quit depending on so moved off of concurrent + * hash set to concurrent hash map using namepace/name key + */ + protected static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap(); + + public static void onInitialBuilds(BuildList buildList) { + if (buildList == null) + return; + List items = buildList.getItems(); + if (items != null) { + Collections.sort(items, new Comparator() { + @Override + public int compare(Build b1, Build b2) { + if (b1.getMetadata().getAnnotations() == null + || b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { + logger.warning("cannot compare build " + b1.getMetadata().getName() + " from namespace " + + b1.getMetadata().getNamespace() + ", has bad annotations: " + + b1.getMetadata().getAnnotations()); + return 0; + } + if (b2.getMetadata().getAnnotations() == null + || b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) { + logger.warning("cannot compare build " + b2.getMetadata().getName() + " from namespace " + + b2.getMetadata().getNamespace() + ", has bad annotations: " + + b2.getMetadata().getAnnotations()); + return 0; + } + int rc = 0; + try { + rc = Long.compare( + + Long.parseLong( + b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)), + Long.parseLong( + b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER))); + } catch (Throwable t) { + logger.log(Level.FINE, "onInitialBuilds", t); + } + return rc; + } + }); + + // We need to sort the builds into their build configs so we can + // handle build run policies correctly. + Map buildConfigMap = new HashMap<>(); + Map> buildConfigBuildMap = new HashMap<>(items.size()); + for (Build b : items) { + if (!OpenShiftUtils.isPipelineStrategyBuild(b)) + continue; + String buildConfigName = b.getStatus().getConfig().getName(); + if (StringUtils.isEmpty(buildConfigName)) { + continue; + } + String namespace = b.getMetadata().getNamespace(); + String bcMapKey = namespace + "/" + buildConfigName; + BuildConfig bc = buildConfigMap.get(bcMapKey); + if (bc == null) { + bc = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace) + .withName(buildConfigName).get(); + if (bc == null) { + // if the bc is not there via a REST get, then it is not + // going to be, and we are not handling manual creation + // of pipeline build objects, so don't bother with "no bc list" + continue; + } + buildConfigMap.put(bcMapKey, bc); + } + List bcBuilds = buildConfigBuildMap.get(bc); + if (bcBuilds == null) { + bcBuilds = new ArrayList<>(); + buildConfigBuildMap.put(bc, bcBuilds); + } + bcBuilds.add(b); + } + + // Now handle the builds. + for (Map.Entry> buildConfigBuilds : buildConfigBuildMap.entrySet()) { + BuildConfig bc = buildConfigBuilds.getKey(); + if (bc.getMetadata() == null) { + // Should never happen but let's be safe... + continue; + } + WorkflowJob job = getJobFromBuildConfig(bc); + if (job == null) { + List builds = buildConfigBuilds.getValue(); + for (Build b : builds) { + logger.info("skipping listed new build " + b.getMetadata().getName() + " no job at this time"); + addBuildToNoBCList(b); + } + continue; + } + BuildConfigProjectProperty bcp = job.getProperty(BuildConfigProjectProperty.class); + if (bcp == null) { + List builds = buildConfigBuilds.getValue(); + for (Build b : builds) { + logger.info("skipping listed new build " + b.getMetadata().getName() + " no prop at this time"); + addBuildToNoBCList(b); + } + continue; + } + List builds = buildConfigBuilds.getValue(); + handleBuildList(job, builds, bcp); + } + } + } + + static void modifyEventToJenkinsJobRun(Build build) { + BuildStatus status = build.getStatus(); + if (status != null && isCancellable(status) && isCancelled(status)) { + WorkflowJob job = getJobFromBuild(build); + if (job != null) { + cancelBuild(job, build); + } else { + removeBuildFromNoBCList(build); + } + } else { + // see if any pre-BC cached builds can now be flushed + flushBuildsWithNoBCList(); + } + } + + public static boolean addEventToJenkinsJobRun(Build build) throws IOException { + // should have been caught upstack, but just in case since public method + if (!OpenShiftUtils.isPipelineStrategyBuild(build)) + return false; + BuildStatus status = build.getStatus(); + if (status != null) { + if (isCancelled(status)) { + updateOpenShiftBuildPhase(build, CANCELLED); + return false; + } + if (!isNew(status)) { + return false; + } + } + + WorkflowJob job = getJobFromBuild(build); + if (job != null) { + return triggerJob(job, build); + } + logger.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time"); + addBuildToNoBCList(build); + return false; + } + + static void addBuildToNoBCList(Build build) { + // should have been caught upstack, but just in case since public method + if (!OpenShiftUtils.isPipelineStrategyBuild(build)) + return; + try { + buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build); + } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException + | NullPointerException e) { + logger.log(Level.WARNING, "Failed to add item " + build.getMetadata().getName(), e); + } + } + + static void removeBuildFromNoBCList(Build build) { + buildsWithNoBCList.remove(build.getMetadata().getNamespace() + build.getMetadata().getName()); + } + + // trigger any builds whose watch events arrived before the + // corresponding build config watch events + public static void flushBuildsWithNoBCList() { + + ConcurrentHashMap clone = null; + synchronized (buildsWithNoBCList) { + clone = new ConcurrentHashMap(buildsWithNoBCList); + } + boolean anyRemoveFailures = false; + for (Build build : clone.values()) { + WorkflowJob job = getJobFromBuild(build); + if (job != null) { + try { + logger.info("triggering job run for previously skipped build " + build.getMetadata().getName()); + triggerJob(job, build); + } catch (IOException e) { + logger.log(Level.WARNING, "flushBuildsWithNoBCList", e); + } + try { + synchronized (buildsWithNoBCList) { + removeBuildFromNoBCList(build); + } + } catch (Throwable t) { + // TODO + // concurrent mod exceptions are not suppose to occur + // with concurrent hash set; this try/catch with log + // and the anyRemoveFailures post processing is a bit + // of safety paranoia until this proves to be true + // over extended usage ... probably can remove at some + // point + anyRemoveFailures = true; + logger.log(Level.WARNING, "flushBuildsWithNoBCList", t); + } + } + + synchronized (buildsWithNoBCList) { + if (anyRemoveFailures && buildsWithNoBCList.size() > 0) { + buildsWithNoBCList.clear(); + } + + } + } + } + + // innerDeleteEventToJenkinsJobRun is the actual delete logic at the heart + // of deleteEventToJenkinsJobRun + // that is either in a sync block or not based on the presence of a BC uid + private static void innerDeleteEventToJenkinsJobRun(final Build build) throws Exception { + final WorkflowJob job = getJobFromBuild(build); + if (job != null) { + ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { + @Override + public Void call() throws Exception { + cancelBuild(job, build, true); + return null; + } + }); + } else { + // in case build was created and deleted quickly, prior to seeing BC + // event, clear out from pre-BC cache + removeBuildFromNoBCList(build); + } + deleteRun(job, build); + } + + // in response to receiving an openshift delete build event, this method + // will drive + // the clean up of the Jenkins job run the build is mapped one to one with; + // as part of that + // clean up it will synchronize with the build config event watcher to + // handle build config + // delete events and build delete events that arrive concurrently and in a + // nondeterministic + // order + static void deleteEventToJenkinsJobRun(final Build build) throws Exception { + List ownerRefs = build.getMetadata().getOwnerReferences(); + String bcUid = null; + for (OwnerReference ref : ownerRefs) { + if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null && ref.getUid().length() > 0) { + // employ intern to facilitate sync'ing on the same actual + // object + bcUid = ref.getUid().intern(); + synchronized (bcUid) { + // if entire job already deleted via bc delete, just return + if (getJobFromBuildConfigNameNamespace(getAnnotation(build, BUILDCONFIG_NAME), + build.getMetadata().getNamespace()) == null) { + return; + } + innerDeleteEventToJenkinsJobRun(build); + return; + } + } + } + // otherwise, if something odd is up and there is no parent BC, just + // clean up + innerDeleteEventToJenkinsJobRun(build); + } + + /** + * Reconciles Jenkins job runs and OpenShift builds + * + * Deletes all job runs that do not have an associated build in OpenShift + */ + static void reconcileRunsAndBuilds() { + logger.fine("Reconciling job runs and builds"); + List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); + for (WorkflowJob job : jobs) { + BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); + if (property != null) { + String ns = property.getNamespace(); + String name = property.getName(); + if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) { + logger.fine("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); + OpenShiftClient client = getAuthenticatedOpenShiftClient(); + BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list(); + for (WorkflowRun run : job.getBuilds()) { + boolean found = false; + BuildCause cause = run.getCause(BuildCause.class); + for (Build build : builds.getItems()) { + if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) { + found = true; + break; + } + } + if (!found) { + deleteRun(run); + } + } + } + } + + } + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java index 9afa8d732..984786345 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java @@ -15,74 +15,41 @@ */ package io.fabric8.jenkins.openshiftsync; -import static io.fabric8.jenkins.openshiftsync.Annotations.BUILDCONFIG_NAME; import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; -import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfigNameNamespace; -import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED; import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.cancelBuild; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.deleteRun; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.getJobFromBuild; import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.handleBuildList; -import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.triggerJob; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenshiftClient; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuild; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase; import static java.util.logging.Level.SEVERE; import static java.util.logging.Level.WARNING; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; -import java.util.ConcurrentModificationException; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Level; import java.util.logging.Logger; import org.apache.commons.lang.StringUtils; import org.jenkinsci.plugins.workflow.job.WorkflowJob; -import org.jenkinsci.plugins.workflow.job.WorkflowRun; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; -import hudson.security.ACL; -import io.fabric8.kubernetes.api.model.OwnerReference; import io.fabric8.kubernetes.api.model.Status; import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.kubernetes.client.WatcherException; import io.fabric8.openshift.api.model.Build; import io.fabric8.openshift.api.model.BuildConfig; import io.fabric8.openshift.api.model.BuildList; -import io.fabric8.openshift.api.model.BuildStatus; import io.fabric8.openshift.client.OpenShiftClient; -import jenkins.model.Jenkins; -import jenkins.security.NotReallyRoleSensitiveCallable; public class BuildWatcher extends BaseWatcher { private static final Logger logger = Logger.getLogger(BuildWatcher.class.getName()); - // now that listing interval is 5 minutes (used to be 10 seconds), we have - // seen - // timing windows where if the build watch events come before build config - // watch events - // when both are created in a simultaneous fashion, there is an up to 5 - // minute delay - // before the job run gets kicked off - // started seeing duplicate builds getting kicked off so quit depending on - // so moved off of concurrent hash set to concurrent hash map using - // namepace/name key - protected static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap(); - @SuppressFBWarnings("EI_EXPOSE_REP2") public BuildWatcher(String namespace) { super(namespace); @@ -101,7 +68,7 @@ public void start() { // no BC list and see if we // can create job runs for premature builds we already know // about - BuildWatcher.flushBuildsWithNoBCList(); + BuildManager.flushBuildsWithNoBCList(); String ns = this.namespace; BuildList newBuilds = null; try { @@ -144,7 +111,7 @@ public void start() { } catch (Exception e) { logger.log(SEVERE, "Failed to load initial Builds: " + e, e); } - reconcileRunsAndBuilds(); + BuildManager.reconcileRunsAndBuilds(); } public void startAfterOnClose(String namespace) { @@ -165,13 +132,13 @@ public void eventReceived(Action action, Build build) { String name = build.getMetadata().getName(); switch (action) { case ADDED: - addEventToJenkinsJobRun(build); + BuildManager.addEventToJenkinsJobRun(build); break; case MODIFIED: - modifyEventToJenkinsJobRun(build); + BuildManager.modifyEventToJenkinsJobRun(build); break; case DELETED: - deleteEventToJenkinsJobRun(build); + BuildManager.deleteEventToJenkinsJobRun(build); break; case ERROR: logger.warning("watch for build " + name + " received error event "); @@ -269,7 +236,7 @@ public int compare(Build b1, Build b2) { List builds = buildConfigBuilds.getValue(); for (Build b : builds) { logger.info("skipping listed new build " + b.getMetadata().getName() + " no job at this time"); - addBuildToNoBCList(b); + BuildManager.addBuildToNoBCList(b); } continue; } @@ -278,7 +245,7 @@ public int compare(Build b1, Build b2) { List builds = buildConfigBuilds.getValue(); for (Build b : builds) { logger.info("skipping listed new build " + b.getMetadata().getName() + " no prop at this time"); - addBuildToNoBCList(b); + BuildManager.addBuildToNoBCList(b); } continue; } @@ -288,193 +255,4 @@ public int compare(Build b1, Build b2) { } } - private static void modifyEventToJenkinsJobRun(Build build) { - BuildStatus status = build.getStatus(); - if (status != null && isCancellable(status) && isCancelled(status)) { - WorkflowJob job = getJobFromBuild(build); - if (job != null) { - cancelBuild(job, build); - } else { - removeBuildFromNoBCList(build); - } - } else { - // see if any pre-BC cached builds can now be flushed - flushBuildsWithNoBCList(); - } - } - - public static boolean addEventToJenkinsJobRun(Build build) throws IOException { - // should have been caught upstack, but just in case since public method - if (!OpenShiftUtils.isPipelineStrategyBuild(build)) - return false; - BuildStatus status = build.getStatus(); - if (status != null) { - if (isCancelled(status)) { - updateOpenShiftBuildPhase(build, CANCELLED); - return false; - } - if (!isNew(status)) { - return false; - } - } - - WorkflowJob job = getJobFromBuild(build); - if (job != null) { - return triggerJob(job, build); - } - logger.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time"); - addBuildToNoBCList(build); - return false; - } - - private static void addBuildToNoBCList(Build build) { - // should have been caught upstack, but just in case since public method - if (!OpenShiftUtils.isPipelineStrategyBuild(build)) - return; - try { - buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build); - } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException - | NullPointerException e) { - logger.log(Level.WARNING, "Failed to add item " + build.getMetadata().getName(), e); - } - } - - private static void removeBuildFromNoBCList(Build build) { - buildsWithNoBCList.remove(build.getMetadata().getNamespace() + build.getMetadata().getName()); - } - - // trigger any builds whose watch events arrived before the - // corresponding build config watch events - public static void flushBuildsWithNoBCList() { - - ConcurrentHashMap clone = null; - synchronized (buildsWithNoBCList) { - clone = new ConcurrentHashMap(buildsWithNoBCList); - } - boolean anyRemoveFailures = false; - for (Build build : clone.values()) { - WorkflowJob job = getJobFromBuild(build); - if (job != null) { - try { - logger.info("triggering job run for previously skipped build " + build.getMetadata().getName()); - triggerJob(job, build); - } catch (IOException e) { - logger.log(Level.WARNING, "flushBuildsWithNoBCList", e); - } - try { - synchronized (buildsWithNoBCList) { - removeBuildFromNoBCList(build); - } - } catch (Throwable t) { - // TODO - // concurrent mod exceptions are not suppose to occur - // with concurrent hash set; this try/catch with log - // and the anyRemoveFailures post processing is a bit - // of safety paranoia until this proves to be true - // over extended usage ... probably can remove at some - // point - anyRemoveFailures = true; - logger.log(Level.WARNING, "flushBuildsWithNoBCList", t); - } - } - - synchronized (buildsWithNoBCList) { - if (anyRemoveFailures && buildsWithNoBCList.size() > 0) { - buildsWithNoBCList.clear(); - } - - } - } - } - - // innerDeleteEventToJenkinsJobRun is the actual delete logic at the heart - // of deleteEventToJenkinsJobRun - // that is either in a sync block or not based on the presence of a BC uid - private static void innerDeleteEventToJenkinsJobRun(final Build build) throws Exception { - final WorkflowJob job = getJobFromBuild(build); - if (job != null) { - ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() { - @Override - public Void call() throws Exception { - cancelBuild(job, build, true); - return null; - } - }); - } else { - // in case build was created and deleted quickly, prior to seeing BC - // event, clear out from pre-BC cache - removeBuildFromNoBCList(build); - } - deleteRun(job, build); - } - - // in response to receiving an openshift delete build event, this method - // will drive - // the clean up of the Jenkins job run the build is mapped one to one with; - // as part of that - // clean up it will synchronize with the build config event watcher to - // handle build config - // delete events and build delete events that arrive concurrently and in a - // nondeterministic - // order - private static void deleteEventToJenkinsJobRun(final Build build) throws Exception { - List ownerRefs = build.getMetadata().getOwnerReferences(); - String bcUid = null; - for (OwnerReference ref : ownerRefs) { - if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null && ref.getUid().length() > 0) { - // employ intern to facilitate sync'ing on the same actual - // object - bcUid = ref.getUid().intern(); - synchronized (bcUid) { - // if entire job already deleted via bc delete, just return - if (getJobFromBuildConfigNameNamespace(getAnnotation(build, BUILDCONFIG_NAME), - build.getMetadata().getNamespace()) == null) { - return; - } - innerDeleteEventToJenkinsJobRun(build); - return; - } - } - } - // otherwise, if something odd is up and there is no parent BC, just - // clean up - innerDeleteEventToJenkinsJobRun(build); - } - - /** - * Reconciles Jenkins job runs and OpenShift builds - * - * Deletes all job runs that do not have an associated build in OpenShift - */ - private static void reconcileRunsAndBuilds() { - logger.fine("Reconciling job runs and builds"); - List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class); - for (WorkflowJob job : jobs) { - BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class); - if (property != null) { - String ns = property.getNamespace(); - String name = property.getName(); - if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) { - logger.fine("Checking job " + job + " runs for BuildConfig " + ns + "/" + name); - OpenShiftClient client = getAuthenticatedOpenShiftClient(); - BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list(); - for (WorkflowRun run : job.getBuilds()) { - boolean found = false; - BuildCause cause = run.getCause(BuildCause.class); - for (Build build : builds.getItems()) { - if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) { - found = true; - break; - } - } - if (!found) { - deleteRun(run); - } - } - } - } - - } - } - } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java new file mode 100644 index 000000000..590fefa48 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java @@ -0,0 +1,143 @@ +/** + * Copyright (C) 2017 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.CONFIGMAP; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.fabric8.kubernetes.api.model.ConfigMap; +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; + +public class ConfigMapClusterInformer implements ResourceEventHandler, Lifecyclable { + + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); + private SharedIndexInformer informer; + private Set namespaces; + + public ConfigMapClusterInformer(String[] namespaces) { + this.namespaces = new HashSet<>(Arrays.asList(namespaces)); + } + + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getConfigMapListInterval(); + } + + public void start() { + LOGGER.info("Starting cluster wide configMap informer for {} !!" + namespaces); + LOGGER.debug("listing ConfigMap resources"); + SharedInformerFactory factory = getInformerFactory(); + this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getListIntervalInSeconds()); + informer.addEventHandler(this); + LOGGER.info("ConfigMap informer started for namespaces: {}" + namespaces); +// ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list(); +// onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping configMap informer {} !!" + namespaces); + this.informer.stop(); + } + + @Override + public void onAdd(ConfigMap obj) { + LOGGER.debug("ConfigMap informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = metadata.getName(); + LOGGER.info("ConfigMap informer received add event for: {}" + name); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); + String uid = metadata.getUid(); + PodTemplateUtils.addAgents(podTemplates, CONFIGMAP, uid, name, namespace); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + @Override + public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { + LOGGER.debug("ConfigMap informer received update event for: {} to: {}" + oldObj + newObj); + if (oldObj != null) { + ObjectMeta oldMetadata = oldObj.getMetadata(); + String namespace = oldMetadata.getNamespace(); + if (namespaces.contains(namespace)) { + String oldRv = oldMetadata != null ? oldMetadata.getResourceVersion() : null; + ObjectMeta newMetadata = newObj.getMetadata(); + String newResourceVersion = newMetadata != null ? newMetadata.getResourceVersion() : null; + LOGGER.info("Update event received resource versions: {} to: {}" + oldRv + newResourceVersion); + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(newObj); + ObjectMeta metadata = newMetadata; + String uid = metadata.getUid(); + String name = metadata.getName(); + LOGGER.info("ConfigMap informer received update event for: {}", name); + PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + @Override + public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) { + LOGGER.debug("ConfigMap informer received delete event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj); + String uid = metadata.getUid(); + String name = metadata.getName(); + PodTemplateUtils.deleteAgents(podTemplates, CONFIGMAP, uid, name, namespace); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + private void onInit(List list) { + if (list != null) { + for (ConfigMap configMap : list) { + PodTemplateUtils.addPodTemplateFromConfigMap(configMap); + } + } + } + + private void waitInformerSync(SharedIndexInformer informer) { + while (!informer.hasSynced()) { + LOGGER.info("Waiting informer to sync for " + namespaces); + try { + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + LOGGER.info("Interrupted waiting thread: " + e); + } + } + } +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java index 7efcb3ecd..2501f584c 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -31,7 +31,7 @@ import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; -public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler { +public class ConfigMapInformer extends ConfigMapWatcher implements ResourceEventHandler, Lifecyclable { private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private SharedIndexInformer informer; @@ -87,6 +87,7 @@ public void onUpdate(ConfigMap oldObj, ConfigMap newObj) { String uid = metadata.getUid(); String name = metadata.getName(); String namespace = metadata.getNamespace(); + LOGGER.info("ConfigMap informer received update event for: {}", name); PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java index afd7f5c37..c8b419f85 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java @@ -16,6 +16,7 @@ package io.fabric8.jenkins.openshiftsync; import static hudson.security.ACL.SYSTEM; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getNamespaceOrUseDefault; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenShiftClient; import static java.util.concurrent.TimeUnit.SECONDS; @@ -36,6 +37,7 @@ import hudson.Extension; import hudson.Util; import hudson.util.ListBoxModel; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.GlobalConfiguration; import jenkins.model.Jenkins; @@ -60,7 +62,7 @@ public class GlobalPluginConfiguration extends GlobalConfiguration { private int secretListInterval = 300; private int configMapListInterval = 300; private int imageStreamListInterval = 300; - + private static GlobalPluginConfigurationTimerTask TASK; private final static List> watchers = new ArrayList<>(); private transient ScheduledFuture schedule; @@ -247,14 +249,20 @@ void setNamespaces(String[] namespaces) { private synchronized void configChange() { logger.info("OpenShift Sync Plugin processing a newly supplied configuration"); synchronized (watchers) { - logger.info("Existing watchers: " + watchers); - for (BaseWatcher watch : watchers) { - watch.stop(); + OpenShiftClient client = OpenShiftUtils.getOpenShiftClient(); + if (client != null) { + if (TASK != null) { + TASK.stop(); + } + SharedInformerFactory informerFactory = getInformerFactory(); + if (informerFactory != null) { + informerFactory.stopAllRegisteredInformers(true); + } + logger.info("Existing watchers: stopped and cleared : " + watchers); + logger.info("Existing watchers: " + watchers); } - watchers.clear(); - logger.info("Existing watchers: stopped and cleared : " + watchers); - logger.info("Existing scheduled task: " + schedule); + logger.info("Existing scheduled task: " + schedule); if (this.schedule != null && !this.schedule.isCancelled()) { this.schedule.cancel(true); logger.info("Existing scheduled task cancelled: " + schedule); @@ -277,9 +285,9 @@ private synchronized void configChange() { this.namespaces = getNamespaceOrUseDefault(this.namespaces, openShiftClient); logger.info("OpenShift Client initialized: " + openShiftClient); - Runnable task = new GlobalPluginConfigurationTimerTask(this); + TASK = new GlobalPluginConfigurationTimerTask(this.namespaces); // lets give jenkins a while to get started ;) - this.schedule = Timer.get().schedule(task, 1, SECONDS); + this.schedule = Timer.get().schedule(TASK, 1, SECONDS); } catch (Exception e) { Throwable exceptionOrCause = (e.getCause() != null) ? e.getCause() : e; logger.log(SEVERE, "Failed to configure OpenShift Jenkins Sync Plugin: " + exceptionOrCause); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index 6e59ebc75..db03748cc 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -1,7 +1,6 @@ package io.fabric8.jenkins.openshiftsync; import static hudson.init.InitMilestone.COMPLETED; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; import java.util.ArrayList; @@ -10,18 +9,16 @@ import hudson.init.InitMilestone; import hudson.triggers.SafeTimerTask; -import io.fabric8.kubernetes.client.informers.SharedInformerFactory; -import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.Jenkins; public class GlobalPluginConfigurationTimerTask extends SafeTimerTask { private static final Logger logger = Logger.getLogger(GlobalPluginConfigurationTimerTask.class.getName()); + private String[] namespaces; + private final static List informers = new ArrayList<>(); - private GlobalPluginConfiguration globalPluginConfiguration; - - public GlobalPluginConfigurationTimerTask(GlobalPluginConfiguration globalPluginConfiguration) { - this.globalPluginConfiguration = globalPluginConfiguration; + public GlobalPluginConfigurationTimerTask(String[] namespaces) { + this.namespaces = namespaces; } @Override @@ -29,6 +26,7 @@ protected void doRun() throws Exception { try { logger.info("Confirming Jenkins is started"); while (true) { + @SuppressWarnings("deprecation") final Jenkins instance = Jenkins.getActiveInstance(); // We can look at Jenkins Init Level to see if we are ready to start. If we do // not wait, we risk the chance of a deadlock. @@ -45,51 +43,61 @@ protected void doRun() throws Exception { } } logger.info("Initializing all the watchers..."); - String[] namespaces = globalPluginConfiguration.getNamespaces(); - List> watchers = new ArrayList<>(); - for (String namespace : namespaces) { - BuildConfigInformer buildConfigInformer = new BuildConfigInformer(namespace); - watchers.add(buildConfigInformer); - buildConfigInformer.start(); + ConfigMapClusterInformer configMapInformer = new ConfigMapClusterInformer(namespaces); + configMapInformer.start(); + + SecretClusterInformer secretInformer = new SecretClusterInformer(namespaces); + secretInformer.start(); + + BuildConfigClusterInformer buildConfigInformer = new BuildConfigClusterInformer(namespaces); + buildConfigInformer.start(); - BuildInformer buildInformer = new BuildInformer(namespace); - buildInformer.start(); - watchers.add(buildInformer); + BuildClusterInformer buildInformer = new BuildClusterInformer(namespaces); + buildInformer.start(); - ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace); - configMapInformer.start(); - watchers.add(configMapInformer); + ImageStreamClusterInformer imageStreamInformer = new ImageStreamClusterInformer(namespaces); + imageStreamInformer.start(); +// + +// List> watchers = new ArrayList<>(); + for (String namespace : namespaces) { +// BuildConfigInformer buildConfigInformer = new BuildConfigInformer(namespace); +// informers.add(buildConfigInformer); +// buildConfigInformer.start(); - ImageStreamInformer imageStreamInformer = new ImageStreamInformer(namespace); - imageStreamInformer.start(); - watchers.add(imageStreamInformer); +// BuildInformer buildInformer = new BuildInformer(namespace); +// buildInformer.start(); +// informers.add(buildInformer); + +// ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace); +// configMapInformer.start(); +// informers.add(configMapInformer); +// +// ImageStreamInformer imageStreamInformer = new ImageStreamInformer(namespace); +// imageStreamInformer.start(); +// informers.add(imageStreamInformer); +// +// SecretInformer secretInformer = new SecretInformer(namespace); +// secretInformer.start(); +// informers.add(secretInformer); - SecretInformer secretInformer = new SecretInformer(namespace); - secretInformer.start(); - watchers.add(secretInformer); } - + logger.info("All the watchers have been registered!! ... starting all registered informers"); getInformerFactory().startAllRegisteredInformers(); logger.info("All registered informers have been started"); -// synchronized (watchers) { -// List> globalWatchers = GlobalPluginConfiguration.getWatchers(); -// synchronized (globalWatchers) { -// logger.info("Existing watchers: " + globalWatchers); -// for (BaseWatcher watch : globalWatchers) { -// watch.stop(); -// } -// globalWatchers.clear(); -// logger.info("Existing watchers: stopped and cleared : " + globalWatchers); -// globalWatchers.addAll(watchers); -// logger.info("New watchers created : " + globalWatchers.size()); -// -// } -// } } catch (Exception e) { logger.severe(e.toString()); e.printStackTrace(); } } + + public synchronized void stop() { + this.cancel(); + for (Lifecyclable informer : informers) { + informer.stop(); + } + informers.clear(); + } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java new file mode 100644 index 000000000..624e3e3b8 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java @@ -0,0 +1,147 @@ +/** + * Copyright (C) 2017 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL; +import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.IMAGESTREAM_TYPE; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.deleteAgents; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate; +import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents; +import static java.util.Collections.singletonMap; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.csanchez.jenkins.plugins.kubernetes.PodTemplate; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.client.dsl.base.OperationContext; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; +import io.fabric8.openshift.api.model.ImageStream; + +public class ImageStreamClusterInformer implements ResourceEventHandler, Lifecyclable { + + private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); + private SharedIndexInformer informer; + private Set namespaces; + + public ImageStreamClusterInformer(String[] namespaces) { + this.namespaces = new HashSet<>(Arrays.asList(namespaces)); + } + + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval(); + } + + public void start() { + LOGGER.info("Starting ImageStream informer for {} !!" + namespaces); + LOGGER.debug("Listing ImageStream resources"); + SharedInformerFactory factory = getInformerFactory(); + Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE); + OperationContext withLabels = new OperationContext().withLabels(labels); + this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getListIntervalInSeconds()); + informer.addEventHandler(this); + LOGGER.info("ImageStream informer started for namespace: {}" + namespaces); +// ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list(); +// onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping secret informer {} !!" + namespaces); + this.informer.stop(); + } + + @Override + public void onAdd(ImageStream obj) { + LOGGER.debug("ImageStream informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = metadata.getName(); + String uid = metadata.getUid(); + LOGGER.info("ImageStream informer received add event for: {}" + name); + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); + addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + @Override + public void onUpdate(ImageStream oldObj, ImageStream newObj) { + LOGGER.info("ImageStream informer received update event for: {} to: {}" + oldObj + newObj); + if (newObj != null) { + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(newObj); + ObjectMeta metadata = newObj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String uid = metadata.getUid(); + String name = metadata.getName(); + updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + @Override + public void onDelete(ImageStream obj, boolean deletedFinalStateUnknown) { + LOGGER.info("ImageStream informer received delete event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj); + String uid = metadata.getUid(); + String name = metadata.getName(); + deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + private void onInit(List list) { + for (ImageStream imageStream : list) { + try { + List agents = getPodTemplatesListFromImageStreams(imageStream); + for (PodTemplate podTemplate : agents) { + // watch event might beat the timer - put call is technically fine, but not + // addPodTemplate given k8s plugin issues + if (!hasPodTemplate(podTemplate)) { + addPodTemplate(podTemplate); + } + } + } catch (Exception e) { + LOGGER.error("Failed to update job", e); + } + } + } +} \ No newline at end of file diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java index be3760d59..9a70fba06 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java @@ -41,16 +41,16 @@ import io.fabric8.kubernetes.client.informers.SharedInformerFactory; import io.fabric8.openshift.api.model.ImageStream; -public class ImageStreamInformer extends ImageStreamWatcher implements ResourceEventHandler { +public class ImageStreamInformer implements ResourceEventHandler, Lifecyclable { private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); private SharedIndexInformer informer; + private String namespace; public ImageStreamInformer(String namespace) { - super(namespace); + this.namespace = namespace; } - @Override public int getListIntervalInSeconds() { return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval(); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java index 7ee221d67..d69ce79ae 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java @@ -21,7 +21,6 @@ import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING; import static io.fabric8.jenkins.openshiftsync.BuildRunPolicy.SERIAL; import static io.fabric8.jenkins.openshiftsync.BuildRunPolicy.SERIAL_LATEST_ONLY; -import static io.fabric8.jenkins.openshiftsync.BuildWatcher.addEventToJenkinsJobRun; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD; import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME; @@ -783,7 +782,7 @@ public int compare(Build b1, Build b2) { } boolean buildAdded = false; try { - buildAdded = addEventToJenkinsJobRun(b); + buildAdded = BuildManager.addEventToJenkinsJobRun(b); } catch (IOException e) { ObjectMeta meta = b.getMetadata(); LOGGER.log(WARNING, "Failed to add new build " + meta.getNamespace() + "/" + meta.getName(), e); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java b/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java index 90b1f9d5a..f1268cd39 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java @@ -1,6 +1,5 @@ package io.fabric8.jenkins.openshiftsync; - import static io.fabric8.jenkins.openshiftsync.Annotations.AUTOSTART; import static io.fabric8.jenkins.openshiftsync.Annotations.DISABLE_SYNC_CREATE; import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig; @@ -40,155 +39,154 @@ public class JobProcessor extends NotReallyRoleSensitiveCallable { - private final BuildConfigWatcher jobProcessor; - private final BuildConfig buildConfig; + private final BuildConfig buildConfig; private final static Logger logger = Logger.getLogger(BuildConfigToJobMap.class.getName()); - public JobProcessor(BuildConfigWatcher buildConfigWatcher, BuildConfig buildConfig) { - jobProcessor = buildConfigWatcher; - this.buildConfig = buildConfig; - } - - @Override - public Void call() throws Exception { - Jenkins activeInstance = Jenkins.getActiveInstance(); - ItemGroup parent = activeInstance; - - String jobName = jenkinsJobName(buildConfig); - String jobFullName = jenkinsJobFullName(buildConfig); - WorkflowJob job = getJobFromBuildConfig(buildConfig); - - if (job == null) { - job = (WorkflowJob) activeInstance.getItemByFullName(jobFullName); - } - boolean newJob = job == null; - - if (newJob) { - String disableOn = getAnnotation(buildConfig, DISABLE_SYNC_CREATE); - if (disableOn != null && disableOn.length() > 0) { - logger.fine("Not creating missing jenkins job " + jobFullName + " due to annotation: " - + DISABLE_SYNC_CREATE); - return null; - } - parent = getFullNameParent(activeInstance, jobFullName, getNamespace(buildConfig)); - job = new WorkflowJob(parent, jobName); - } - BulkChange bulkJob = new BulkChange(job); - - job.setDisplayName(jenkinsJobDisplayName(buildConfig)); - - FlowDefinition flowFromBuildConfig = mapBuildConfigToFlow(buildConfig); - if (flowFromBuildConfig == null) { - return null; - } - Map paramMap = createOrUpdateJob(activeInstance, parent, jobName, job, newJob, - flowFromBuildConfig); - bulkJob.commit(); - populateNamespaceFolder(activeInstance, parent, jobName, job, paramMap); - return null; - } - - private void populateNamespaceFolder(Jenkins activeInstance, ItemGroup parent, String jobName, WorkflowJob job, - Map paramMap) throws IOException, AbortException { - String fullName = job.getFullName(); - WorkflowJob workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class); - if (workflowJob == null && parent instanceof Folder) { - // we should never need this but just in - // case there's an - // odd timing issue or something... - Folder folder = (Folder) parent; - folder.add(job, jobName); - workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class); - - } - if (workflowJob == null) { - logger.warning("Could not find created job " + fullName + " for BuildConfig: " + getNamespace(buildConfig) - + "/" + getName(buildConfig)); - } else { - JenkinsUtils.verifyEnvVars(paramMap, workflowJob, buildConfig); - putJobWithBuildConfig(workflowJob, buildConfig); - } - } - - private Map createOrUpdateJob(Jenkins activeInstance, ItemGroup parent, String jobName, - WorkflowJob job, boolean newJob, FlowDefinition flowFromBuildConfig) throws IOException { - job.setDefinition(flowFromBuildConfig); - - String existingBuildRunPolicy = null; - - BuildConfigProjectProperty buildConfigProjectProperty = job.getProperty(BuildConfigProjectProperty.class); - existingBuildRunPolicy = populateBCProjectProperty(job, existingBuildRunPolicy, buildConfigProjectProperty); - - // (re)populate job param list with any envs - // from the build config - Map paramMap = JenkinsUtils.addJobParamForBuildEnvs(job, - buildConfig.getSpec().getStrategy().getJenkinsPipelineStrategy(), true); - - job.setConcurrentBuild(!(buildConfig.getSpec().getRunPolicy().equals(SERIAL) - || buildConfig.getSpec().getRunPolicy().equals(SERIAL_LATEST_ONLY))); - - InputStream jobStream = new StringInputStream(new XStream2().toXML(job)); - - if (newJob) { - try { - if (parent instanceof Folder) { - Folder folder = (Folder) parent; - folder.createProjectFromXML(jobName, jobStream).save(); - } else { - activeInstance.createProjectFromXML(jobName, jobStream).save(); - } - - logger.info("Created job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig) - + " with revision: " + buildConfig.getMetadata().getResourceVersion()); - - String autostart = getAnnotation(buildConfig, AUTOSTART); - if (Boolean.parseBoolean(autostart)) { - logger.info("Automatically starting job " + jobName + " from BuildConfig " - + NamespaceName.create(buildConfig) + " with revision: " + buildConfig.getMetadata().getResourceVersion()); - job.scheduleBuild2(0); - } - } catch (IllegalArgumentException e) { - // see - // https://github.com/openshift/jenkins-sync-plugin/issues/117, - // jenkins might reload existing jobs on - // startup between the - // newJob check above and when we make - // the createProjectFromXML call; if so, - // retry as an update - updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty); - logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig) - + " with revision: " + buildConfig.getMetadata().getResourceVersion()); - } - } else { - updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty); - logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig) - + " with revision: " + buildConfig.getMetadata().getResourceVersion()); - } - return paramMap; - } - - private String populateBCProjectProperty(WorkflowJob job, String existingBuildRunPolicy, - BuildConfigProjectProperty buildConfigProjectProperty) throws IOException { - if (buildConfigProjectProperty != null) { - existingBuildRunPolicy = buildConfigProjectProperty.getBuildRunPolicy(); - long updatedBCResourceVersion = parseResourceVersion(buildConfig); - long oldBCResourceVersion = parseResourceVersion(buildConfigProjectProperty.getResourceVersion()); - BuildConfigProjectProperty newProperty = new BuildConfigProjectProperty(buildConfig); - if (updatedBCResourceVersion <= oldBCResourceVersion - && newProperty.getUid().equals(buildConfigProjectProperty.getUid()) - && newProperty.getNamespace().equals(buildConfigProjectProperty.getNamespace()) - && newProperty.getName().equals(buildConfigProjectProperty.getName()) - && newProperty.getBuildRunPolicy().equals(buildConfigProjectProperty.getBuildRunPolicy())) { - return null; - } - buildConfigProjectProperty.setUid(newProperty.getUid()); - buildConfigProjectProperty.setNamespace(newProperty.getNamespace()); - buildConfigProjectProperty.setName(newProperty.getName()); - buildConfigProjectProperty.setResourceVersion(newProperty.getResourceVersion()); - buildConfigProjectProperty.setBuildRunPolicy(newProperty.getBuildRunPolicy()); - } else { - job.addProperty(new BuildConfigProjectProperty(buildConfig)); - } - return existingBuildRunPolicy; - } + public JobProcessor(BuildConfig buildConfig) { + this.buildConfig = buildConfig; + } + + @Override + public Void call() throws Exception { + Jenkins activeInstance = Jenkins.getActiveInstance(); + ItemGroup parent = activeInstance; + + String jobName = jenkinsJobName(buildConfig); + String jobFullName = jenkinsJobFullName(buildConfig); + WorkflowJob job = getJobFromBuildConfig(buildConfig); + + if (job == null) { + job = (WorkflowJob) activeInstance.getItemByFullName(jobFullName); + } + boolean newJob = job == null; + + if (newJob) { + String disableOn = getAnnotation(buildConfig, DISABLE_SYNC_CREATE); + if (disableOn != null && disableOn.length() > 0) { + logger.fine("Not creating missing jenkins job " + jobFullName + " due to annotation: " + + DISABLE_SYNC_CREATE); + return null; + } + parent = getFullNameParent(activeInstance, jobFullName, getNamespace(buildConfig)); + job = new WorkflowJob(parent, jobName); + } + BulkChange bulkJob = new BulkChange(job); + + job.setDisplayName(jenkinsJobDisplayName(buildConfig)); + + FlowDefinition flowFromBuildConfig = mapBuildConfigToFlow(buildConfig); + if (flowFromBuildConfig == null) { + return null; + } + Map paramMap = createOrUpdateJob(activeInstance, parent, jobName, job, newJob, + flowFromBuildConfig); + bulkJob.commit(); + populateNamespaceFolder(activeInstance, parent, jobName, job, paramMap); + return null; + } + + private void populateNamespaceFolder(Jenkins activeInstance, ItemGroup parent, String jobName, WorkflowJob job, + Map paramMap) throws IOException, AbortException { + String fullName = job.getFullName(); + WorkflowJob workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class); + if (workflowJob == null && parent instanceof Folder) { + // we should never need this but just in + // case there's an + // odd timing issue or something... + Folder folder = (Folder) parent; + folder.add(job, jobName); + workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class); + + } + if (workflowJob == null) { + logger.warning("Could not find created job " + fullName + " for BuildConfig: " + getNamespace(buildConfig) + + "/" + getName(buildConfig)); + } else { + JenkinsUtils.verifyEnvVars(paramMap, workflowJob, buildConfig); + putJobWithBuildConfig(workflowJob, buildConfig); + } + } + + private Map createOrUpdateJob(Jenkins activeInstance, ItemGroup parent, String jobName, + WorkflowJob job, boolean newJob, FlowDefinition flowFromBuildConfig) throws IOException { + job.setDefinition(flowFromBuildConfig); + + String existingBuildRunPolicy = null; + + BuildConfigProjectProperty buildConfigProjectProperty = job.getProperty(BuildConfigProjectProperty.class); + existingBuildRunPolicy = populateBCProjectProperty(job, existingBuildRunPolicy, buildConfigProjectProperty); + + // (re)populate job param list with any envs + // from the build config + Map paramMap = JenkinsUtils.addJobParamForBuildEnvs(job, + buildConfig.getSpec().getStrategy().getJenkinsPipelineStrategy(), true); + + job.setConcurrentBuild(!(buildConfig.getSpec().getRunPolicy().equals(SERIAL) + || buildConfig.getSpec().getRunPolicy().equals(SERIAL_LATEST_ONLY))); + + InputStream jobStream = new StringInputStream(new XStream2().toXML(job)); + + if (newJob) { + try { + if (parent instanceof Folder) { + Folder folder = (Folder) parent; + folder.createProjectFromXML(jobName, jobStream).save(); + } else { + activeInstance.createProjectFromXML(jobName, jobStream).save(); + } + + logger.info("Created job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig) + + " with revision: " + buildConfig.getMetadata().getResourceVersion()); + + String autostart = getAnnotation(buildConfig, AUTOSTART); + if (Boolean.parseBoolean(autostart)) { + logger.info("Automatically starting job " + jobName + " from BuildConfig " + + NamespaceName.create(buildConfig) + " with revision: " + + buildConfig.getMetadata().getResourceVersion()); + job.scheduleBuild2(0); + } + } catch (IllegalArgumentException e) { + // see + // https://github.com/openshift/jenkins-sync-plugin/issues/117, + // jenkins might reload existing jobs on + // startup between the + // newJob check above and when we make + // the createProjectFromXML call; if so, + // retry as an update + updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty); + logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig) + + " with revision: " + buildConfig.getMetadata().getResourceVersion()); + } + } else { + updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty); + logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig) + + " with revision: " + buildConfig.getMetadata().getResourceVersion()); + } + return paramMap; + } + + private String populateBCProjectProperty(WorkflowJob job, String existingBuildRunPolicy, + BuildConfigProjectProperty buildConfigProjectProperty) throws IOException { + if (buildConfigProjectProperty != null) { + existingBuildRunPolicy = buildConfigProjectProperty.getBuildRunPolicy(); + long updatedBCResourceVersion = parseResourceVersion(buildConfig); + long oldBCResourceVersion = parseResourceVersion(buildConfigProjectProperty.getResourceVersion()); + BuildConfigProjectProperty newProperty = new BuildConfigProjectProperty(buildConfig); + if (updatedBCResourceVersion <= oldBCResourceVersion + && newProperty.getUid().equals(buildConfigProjectProperty.getUid()) + && newProperty.getNamespace().equals(buildConfigProjectProperty.getNamespace()) + && newProperty.getName().equals(buildConfigProjectProperty.getName()) + && newProperty.getBuildRunPolicy().equals(buildConfigProjectProperty.getBuildRunPolicy())) { + return null; + } + buildConfigProjectProperty.setUid(newProperty.getUid()); + buildConfigProjectProperty.setNamespace(newProperty.getNamespace()); + buildConfigProjectProperty.setName(newProperty.getName()); + buildConfigProjectProperty.setResourceVersion(newProperty.getResourceVersion()); + buildConfigProjectProperty.setBuildRunPolicy(newProperty.getBuildRunPolicy()); + } else { + job.addProperty(new BuildConfigProjectProperty(buildConfig)); + } + return existingBuildRunPolicy; + } } \ No newline at end of file diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java b/src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java new file mode 100644 index 000000000..ca9bd080a --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java @@ -0,0 +1,7 @@ +package io.fabric8.jenkins.openshiftsync; + +public interface Lifecyclable { + public void stop(); + public void start(); + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java new file mode 100644 index 000000000..8f9eb616d --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java @@ -0,0 +1,136 @@ +/** + * Copyright (C) 2017 Red Hat, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.fabric8.jenkins.openshiftsync; + +import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC; +import static io.fabric8.jenkins.openshiftsync.Constants.VALUE_SECRET_SYNC; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static java.util.Collections.singletonMap; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.Secret; +import io.fabric8.kubernetes.client.dsl.base.OperationContext; +import io.fabric8.kubernetes.client.informers.ResourceEventHandler; +import io.fabric8.kubernetes.client.informers.SharedIndexInformer; +import io.fabric8.kubernetes.client.informers.SharedInformerFactory; + +public class SecretClusterInformer implements ResourceEventHandler, Lifecyclable { + + private static final Logger LOGGER = LoggerFactory.getLogger(SecretClusterInformer.class.getName()); + + private final static ConcurrentHashMap trackedSecrets = new ConcurrentHashMap(); + + private SharedIndexInformer informer; + private Set namespaces; + + public SecretClusterInformer(String[] namespaces) { + this.namespaces = new HashSet<>(Arrays.asList(namespaces)); + } + + public int getListIntervalInSeconds() { + return 1_000 * GlobalPluginConfiguration.get().getSecretListInterval(); + } + + public void start() { + LOGGER.info("Starting cluster wide secret informer {} !!" + namespaces); + LOGGER.debug("listing Secret resources"); + SharedInformerFactory factory = getInformerFactory(); + Map labels = singletonMap(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC); + OperationContext withLabels = new OperationContext().withLabels(labels); + this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getListIntervalInSeconds()); + informer.addEventHandler(this); + LOGGER.info("Secret informer started for namespace: {}" + namespaces); +// SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list(); +// onInit(list.getItems()); + } + + public void stop() { + LOGGER.info("Stopping secret informer {} !!" + namespaces); + this.informer.stop(); + } + + @Override + public void onAdd(Secret obj) { + LOGGER.debug("Secret informer received add event for: {}" + obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = metadata.getName(); + LOGGER.info("Secret informer received add event for: {}" + name); + SecretManager.insertOrUpdateCredentialFromSecret(obj); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + @Override + public void onUpdate(Secret oldObj, Secret newObj) { + LOGGER.debug("Secret informer received update event for: {} to: {}" + oldObj + newObj); + if (oldObj != null) { + ObjectMeta metadata = oldObj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = metadata.getName(); + LOGGER.info("Secret informer received update event for: {}", name); + SecretManager.updateCredential(newObj); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + @Override + public void onDelete(Secret obj, boolean deletedFinalStateUnknown) { + LOGGER.debug("Secret informer received delete event for: {}", obj); + if (obj != null) { + ObjectMeta metadata = obj.getMetadata(); + String namespace = metadata.getNamespace(); + if (namespaces.contains(namespace)) { + String name = obj.getMetadata().getName(); + LOGGER.info("Secret informer received delete event for: {}", name); + CredentialsUtils.deleteCredential(obj); + } else { + LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace); + } + } + } + + private void onInit(List list) { + for (Secret secret : list) { + try { + if (SecretManager.validSecret(secret) && SecretManager.shouldProcessSecret(secret)) { + SecretManager.insertOrUpdateCredentialFromSecret(secret); + trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); + } + } catch (Exception e) { + LOGGER.error("Failed to update secred", e); + } + } + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java index f6e1681ef..fe3d4b5e6 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java @@ -34,7 +34,7 @@ import io.fabric8.kubernetes.client.informers.SharedIndexInformer; import io.fabric8.kubernetes.client.informers.SharedInformerFactory; -public class SecretInformer extends SecretWatcher implements ResourceEventHandler { +public class SecretInformer extends SecretWatcher implements ResourceEventHandler, Lifecyclable { private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName()); @@ -76,22 +76,26 @@ public void onAdd(Secret obj) { ObjectMeta metadata = obj.getMetadata(); String name = metadata.getName(); LOGGER.info("Secret informer received add event for: {}" + name); - insertOrUpdateCredentialFromSecret(obj); + SecretManager.insertOrUpdateCredentialFromSecret(obj); } } @Override public void onUpdate(Secret oldObj, Secret newObj) { - LOGGER.info("Secret informer received update event for: {} to: {}" + oldObj + newObj); + LOGGER.debug("Secret informer received update event for: {} to: {}" + oldObj + newObj); if (oldObj != null) { - updateCredential(newObj); + final String name = oldObj.getMetadata().getName(); + LOGGER.info("Secret informer received update event for: {}", name); + SecretManager.updateCredential(newObj); } } @Override public void onDelete(Secret obj, boolean deletedFinalStateUnknown) { - LOGGER.info("Secret informer received delete event for: {}" + obj); + LOGGER.debug("Secret informer received delete event for: {}", obj); if (obj != null) { + final String name = obj.getMetadata().getName(); + LOGGER.info("Secret informer received delete event for: {}", name); CredentialsUtils.deleteCredential(obj); } } @@ -99,8 +103,8 @@ public void onDelete(Secret obj, boolean deletedFinalStateUnknown) { private void onInit(List list) { for (Secret secret : list) { try { - if (validSecret(secret) && shouldProcessSecret(secret)) { - insertOrUpdateCredentialFromSecret(secret); + if ( SecretManager.validSecret(secret) && SecretManager.shouldProcessSecret(secret)) { + SecretManager. insertOrUpdateCredentialFromSecret(secret); trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); } } catch (Exception e) { diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretManager.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretManager.java new file mode 100644 index 000000000..87ff5d845 --- /dev/null +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretManager.java @@ -0,0 +1,111 @@ +package io.fabric8.jenkins.openshiftsync; + +import static java.util.logging.Level.SEVERE; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.logging.Logger; + +import io.fabric8.kubernetes.api.model.ObjectMeta; +import io.fabric8.kubernetes.api.model.Secret; +import io.fabric8.kubernetes.api.model.SecretList; + +public class SecretManager { + + private final static Logger logger = Logger.getLogger(SecretManager.class.getName()); + private final static ConcurrentHashMap trackedSecrets = new ConcurrentHashMap<>(); + + public static void insertOrUpdateCredentialFromSecret(final Secret secret) { + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + logger.info("Upserting Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); + if (validSecret(secret)) { + try { + CredentialsUtils.upsertCredential(secret); + trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + } catch (IOException e) { + logger.log(SEVERE, "Credential has not been saved: " + e, e); + throw new RuntimeException(e); + } + } + } + } + } + + static void onInitialSecrets(SecretList secrets) { + if (secrets == null) + return; + List items = secrets.getItems(); + if (items != null) { + for (Secret secret : items) { + try { + if (validSecret(secret) && shouldProcessSecret(secret)) { + insertOrUpdateCredentialFromSecret(secret); + trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); + } + } catch (Exception e) { + logger.log(SEVERE, "Failed to update job", e); + } + } + } + } + + protected static void updateCredential(Secret secret) { + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + logger.info("Modifying Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); + if (validSecret(secret) && shouldProcessSecret(secret)) { + try { + CredentialsUtils.upsertCredential(secret); + trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); + } catch (IOException e) { + logger.log(SEVERE, "Secret has not been saved: " + e, e); + throw new RuntimeException(e); + } + } + } + } + } + + protected static boolean validSecret(Secret secret) { + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + String name = metadata.getName(); + String namespace = metadata.getNamespace(); + logger.info("Validating Secret with Uid " + metadata.getUid() + " with Name " + name); + return name != null && !name.isEmpty() && namespace != null && !namespace.isEmpty(); + } + } + return false; + } + + protected static boolean shouldProcessSecret(Secret secret) { + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + String uid = metadata.getUid(); + String rv = metadata.getResourceVersion(); + String oldResourceVersion = trackedSecrets.get(uid); + if (oldResourceVersion == null || !oldResourceVersion.equals(rv)) { + return true; + } + } + } + return false; + } + + static void deleteCredential(final Secret secret) throws Exception { + if (secret != null) { + ObjectMeta metadata = secret.getMetadata(); + if (metadata != null) { + trackedSecrets.remove(metadata.getUid()); + CredentialsUtils.deleteCredential(secret); + } + } + } + +} diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java index d9ef4f438..fd0ddda9d 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java @@ -38,13 +38,11 @@ * Jenkins */ public class SecretWatcher extends BaseWatcher { - private ConcurrentHashMap trackedSecrets; private final Logger logger = Logger.getLogger(getClass().getName()); @SuppressFBWarnings("EI_EXPOSE_REP2") public SecretWatcher(String namespace) { super(namespace); - this.trackedSecrets = new ConcurrentHashMap<>(); } @Override @@ -62,7 +60,7 @@ public void start() { logger.fine("listing Secrets resources"); secrets = getAuthenticatedOpenShiftClient().secrets().inNamespace(ns) .withLabel(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, Constants.VALUE_SECRET_SYNC).list(); - onInitialSecrets(secrets); + SecretManager.onInitialSecrets(secrets); logger.fine("handled Secrets resources"); } catch (Exception e) { logger.log(SEVERE, "Failed to load Secrets: " + e, e); @@ -91,25 +89,9 @@ public void start() { } } - private void onInitialSecrets(SecretList secrets) { - if (secrets == null) - return; - if (trackedSecrets == null) - trackedSecrets = new ConcurrentHashMap(); - List items = secrets.getItems(); - if (items != null) { - for (Secret secret : items) { - try { - if (validSecret(secret) && shouldProcessSecret(secret)) { - insertOrUpdateCredentialFromSecret(secret); - trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion()); - } - } catch (Exception e) { - logger.log(SEVERE, "Failed to update job", e); - } - } - } - } + + + @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT") @Override @@ -121,13 +103,13 @@ public void eventReceived(Action action, Secret secret) { try { switch (action) { case ADDED: - insertOrUpdateCredentialFromSecret(secret); + SecretManager.insertOrUpdateCredentialFromSecret(secret); break; case DELETED: - deleteCredential(secret); + SecretManager.deleteCredential(secret); break; case MODIFIED: - updateCredential(secret); + SecretManager.updateCredential(secret); break; case ERROR: logger.warning("watch for secret " + secret.getMetadata().getName() + " received error event "); @@ -142,77 +124,8 @@ public void eventReceived(Action action, Secret secret) { } } - protected void insertOrUpdateCredentialFromSecret(final Secret secret) { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - logger.info("Upserting Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); - if (validSecret(secret)) { - try { - CredentialsUtils.upsertCredential(secret); - trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); - } catch (IOException e) { - logger.log(SEVERE, "Credential has not been saved: " + e, e); - throw new RuntimeException(e); - } - } - } - } - } + - protected void updateCredential(Secret secret) { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - logger.info("Modifying Secret with Uid " + metadata.getUid() + " with Name " + metadata.getName()); - if (validSecret(secret) && shouldProcessSecret(secret)) { - try { - CredentialsUtils.upsertCredential(secret); - trackedSecrets.put(metadata.getUid(), metadata.getResourceVersion()); - } catch (IOException e) { - logger.log(SEVERE, "Secret has not been saved: " + e, e); - throw new RuntimeException(e); - } - } - } - } - } - - protected boolean validSecret(Secret secret) { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - String name = metadata.getName(); - String namespace = metadata.getNamespace(); - logger.info("Validating Secret with Uid " + metadata.getUid() + " with Name " + name); - return name != null && !name.isEmpty() && namespace != null && !namespace.isEmpty(); - } - } - return false; - } - - protected boolean shouldProcessSecret(Secret secret) { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - String uid = metadata.getUid(); - String rv = metadata.getResourceVersion(); - String oldResourceVersion = trackedSecrets.get(uid); - if (oldResourceVersion == null || !oldResourceVersion.equals(rv)) { - return true; - } - } - } - return false; - } - - private void deleteCredential(final Secret secret) throws Exception { - if (secret != null) { - ObjectMeta metadata = secret.getMetadata(); - if (metadata != null) { - trackedSecrets.remove(metadata.getUid()); - CredentialsUtils.deleteCredential(secret); - } - } - } + + } From 1e4d1dde1487860d5c3f207fdfb573d34c694d2d Mon Sep 17 00:00:00 2001 From: Akram Ben Aissi Date: Mon, 26 Apr 2021 16:05:08 +0200 Subject: [PATCH 14/22] Add cluster mode --- .../jenkins/openshiftsync/BaseWatcher.java | 2 +- .../openshiftsync/BuildClusterInformer.java | 1 + .../BuildConfigClusterInformer.java | 1 + .../openshiftsync/BuildConfigInformer.java | 5 +- .../openshiftsync/BuildConfigWatcher.java | 2 +- .../jenkins/openshiftsync/BuildInformer.java | 5 +- .../openshiftsync/BuildSyncRunListener.java | 5 +- .../jenkins/openshiftsync/BuildWatcher.java | 2 +- .../ConfigMapClusterInformer.java | 1 + .../openshiftsync/ConfigMapInformer.java | 5 +- .../openshiftsync/ConfigMapWatcher.java | 2 +- .../GlobalPluginConfiguration.java | 221 +++++++++++++----- .../GlobalPluginConfigurationTimerTask.java | 157 ++++++++----- .../ImageStreamClusterInformer.java | 5 +- .../openshiftsync/ImageStreamInformer.java | 5 +- .../openshiftsync/ImageStreamWatcher.java | 2 +- .../jenkins/openshiftsync/OpenShiftUtils.java | 40 +++- .../openshiftsync/SecretClusterInformer.java | 5 +- .../jenkins/openshiftsync/SecretInformer.java | 6 +- .../jenkins/openshiftsync/SecretWatcher.java | 2 +- .../GlobalPluginConfiguration/config.jelly | 79 ++++--- 21 files changed, 380 insertions(+), 173 deletions(-) diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java index 031eff507..2e3d4106b 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java @@ -42,7 +42,7 @@ public BaseWatcher(String namespace) { this.namespace = namespace; } - public abstract int getListIntervalInSeconds(); + public abstract int getResyncPeriodMilliseconds(); protected abstract void start(); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java index 28cf560f4..052e8dd5a 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java @@ -65,6 +65,7 @@ public void start() { SharedInformerFactory factory = getInformerFactory(); this.informer = factory.sharedIndexInformerFor(Build.class, getListIntervalInSeconds()); this.informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("Build informer started for namespace: {}" + namespaces); // BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java index e41d09a0e..40aaea47f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java @@ -56,6 +56,7 @@ public void start() { SharedInformerFactory factory = getInformerFactory(); this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getListIntervalInSeconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("BuildConfig informer started for namespace: {}" + namespaces); // BuildConfigList list = // getOpenshiftClient().buildConfigs().inNamespace(namespace).list(); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java index e3a6be428..e995a8468 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java @@ -43,7 +43,7 @@ public BuildConfigInformer(String namespace) { this.namespace = namespace; } - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getBuildConfigListInterval(); } @@ -51,8 +51,9 @@ public void start() { LOGGER.info("Starting BuildConfig informer for {} !!" + namespace); LOGGER.debug("listing BuildConfig resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); - this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getResyncPeriodMilliseconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("BuildConfig informer started for namespace: {}" + namespace); // BuildConfigList list = // getOpenshiftClient().buildConfigs().inNamespace(namespace).list(); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java index dd3eb2cfb..972143318 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java @@ -79,7 +79,7 @@ public BuildConfigWatcher(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return GlobalPluginConfiguration.get().getBuildConfigListInterval(); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java index e2114bb23..986e54045 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java @@ -52,7 +52,7 @@ public BuildInformer(String namespace) { * builds getting kicked off so quit depending on so moved off of concurrent * hash set to concurrent hash map using namepace/name key */ - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval(); } @@ -60,8 +60,9 @@ public void start() { LOGGER.info("Starting Build informer for {} !!" + namespace); LOGGER.debug("Listing Build resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); - this.informer = factory.sharedIndexInformerFor(Build.class, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(Build.class, getResyncPeriodMilliseconds()); this.informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("Build informer started for namespace: {}" + namespace); // BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java index a71b58876..c4d875f9e 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java @@ -137,9 +137,12 @@ public static String joinPaths(String... strings) { @Override public void onStarted(Run run, TaskListener listener) { + logger.info("Run started: " + run.getFullDisplayName()); if (shouldPollRun(run)) { + logger.info("Processing run: " + run.getDisplayName()); try { BuildCause cause = (BuildCause) run.getCause(BuildCause.class); + logger.info("Build cause for the run is: " + cause); if (cause != null) { // TODO This should be a link to the OpenShift console. run.setDescription(cause.getShortDescription()); @@ -152,7 +155,7 @@ public void onStarted(Run run, TaskListener listener) { } checkTimerStarted(); } else { - logger.trace("not polling polling build " + run.getUrl() + " as its not a WorkflowJob"); + logger.info("Not polling polling build " + run.getUrl() + " as its not a WorkflowJob"); } super.onStarted(run, listener); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java index 984786345..3fe323981 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java @@ -56,7 +56,7 @@ public BuildWatcher(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return GlobalPluginConfiguration.get().getBuildListInterval(); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java index 590fefa48..8c143a04c 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java @@ -54,6 +54,7 @@ public void start() { SharedInformerFactory factory = getInformerFactory(); this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getListIntervalInSeconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("ConfigMap informer started for namespaces: {}" + namespaces); // ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java index 2501f584c..75ff399f3 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java @@ -41,7 +41,7 @@ public ConfigMapInformer(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getConfigMapListInterval(); } @@ -49,8 +49,9 @@ public void start() { LOGGER.info("Starting configMap informer for {} !!" + namespace); LOGGER.debug("listing ConfigMap resources"); SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); - this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getResyncPeriodMilliseconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("ConfigMap informer started for namespace: {}" + namespace); // ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java index b47cf9440..d0327ab81 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java @@ -43,7 +43,7 @@ public ConfigMapWatcher(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return GlobalPluginConfiguration.get().getConfigMapListInterval(); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java index c8b419f85..0c11062cb 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java @@ -16,29 +16,33 @@ package io.fabric8.jenkins.openshiftsync; import static hudson.security.ACL.SYSTEM; -import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getNamespaceOrUseDefault; import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenShiftClient; +import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.shutdownOpenShiftClient; import static java.util.concurrent.TimeUnit.SECONDS; -import static java.util.logging.Level.SEVERE; import static jenkins.model.Jenkins.ADMINISTER; -import java.util.ArrayList; -import java.util.List; +import java.io.IOException; import java.util.concurrent.ScheduledFuture; import java.util.logging.Logger; +import javax.servlet.ServletException; + import org.apache.commons.lang.StringUtils; +import org.kohsuke.stapler.AncestorInPath; import org.kohsuke.stapler.DataBoundConstructor; +import org.kohsuke.stapler.QueryParameter; import org.kohsuke.stapler.StaplerRequest; +import org.kohsuke.stapler.verb.POST; import com.cloudbees.plugins.credentials.common.StandardListBoxModel; import hudson.Extension; import hudson.Util; +import hudson.model.Job; +import hudson.util.FormValidation; import hudson.util.ListBoxModel; -import io.fabric8.kubernetes.client.informers.SharedInformerFactory; -import io.fabric8.openshift.client.OpenShiftClient; import jenkins.model.GlobalConfiguration; import jenkins.model.Jenkins; import jenkins.util.Timer; @@ -51,8 +55,16 @@ public class GlobalPluginConfiguration extends GlobalConfiguration { private boolean enabled = true; private boolean foldersEnabled = true; + private boolean useClusterMode = false; + private boolean syncConfigMaps = true; + private boolean syncSecrets = true; + private boolean syncImageStreams = true; + private boolean syncBuildConfigsAndBuilds = true; + private String server; private String credentialsId = ""; + private int maxConnections = 100; + private String[] namespaces; private String jobNamePattern; private String skipOrganizationPrefix; @@ -62,20 +74,16 @@ public class GlobalPluginConfiguration extends GlobalConfiguration { private int secretListInterval = 300; private int configMapListInterval = 300; private int imageStreamListInterval = 300; - private static GlobalPluginConfigurationTimerTask TASK; - private final static List> watchers = new ArrayList<>(); - private transient ScheduledFuture schedule; - - public final static List> getWatchers() { - return watchers; - } + private static GlobalPluginConfigurationTimerTask TASK; + private static ScheduledFuture FUTURE; @DataBoundConstructor public GlobalPluginConfiguration(boolean enable, String server, String namespace, boolean foldersEnabled, String credentialsId, String jobNamePattern, String skipOrganizationPrefix, String skipBranchSuffix, int buildListInterval, int buildConfigListInterval, int configMapListInterval, int secretListInterval, - int imageStreamListInterval) { + int imageStreamListInterval, boolean useClusterMode, boolean syncConfigMaps, boolean syncSecrets, + boolean syncImageStreams, boolean syncBuildsConfigAndBuilds, int maxConnections) { this.enabled = enable; this.server = server; this.namespaces = StringUtils.isBlank(namespace) ? null : namespace.split(" "); @@ -89,6 +97,12 @@ public GlobalPluginConfiguration(boolean enable, String server, String namespace this.configMapListInterval = configMapListInterval; this.secretListInterval = secretListInterval; this.imageStreamListInterval = imageStreamListInterval; + this.useClusterMode = useClusterMode; + this.syncConfigMaps = syncConfigMaps; + this.syncSecrets = syncSecrets; + this.syncImageStreams = syncImageStreams; + this.syncBuildConfigsAndBuilds = syncBuildsConfigAndBuilds; + this.maxConnections = maxConnections; configChange(); } @@ -102,6 +116,100 @@ public static GlobalPluginConfiguration get() { return GlobalConfiguration.all().get(GlobalPluginConfiguration.class); } + private synchronized void configChange() { + logger.info("OpenShift Sync Plugin processing a newly supplied configuration"); + stop(); +// shutdownOpenShiftClient(); + start(); + } + + private void start() { + if (this.enabled) { + OpenShiftUtils.initializeOpenShiftClient(this.server, this.maxConnections); + this.namespaces = getNamespaceOrUseDefault(this.namespaces, getOpenShiftClient()); + if (TASK != null) { + logger.warning("Previously existing configuration task"); + } + TASK = new GlobalPluginConfigurationTimerTask(this.namespaces); + FUTURE = Timer.get().schedule(TASK, 1, SECONDS); // lets give jenkins a while to get started ;) + } else { + logger.info("OpenShift Sync Plugin has been disabled"); + } + } + + private void stop() { + if (FUTURE != null) { + boolean interrupted = FUTURE.cancel(true); + if (interrupted) { + logger.info("OpenShift Sync Plugin task has been interrupted"); + } + } + if (TASK != null) { + TASK.stop(); + TASK.cancel(); + TASK = null; + } + OpenShiftUtils.shutdownOpenShiftClient(); + } + + /** + * Validates OpenShift Sync Configuration form by chec + */ + @POST + public FormValidation doValidate(@QueryParameter("useClusterMode") final boolean useClusterMode, + @QueryParameter("syncConfigMaps") final boolean syncConfigMaps, + @QueryParameter("syncSecrets") final boolean syncSecrets, + @QueryParameter("syncImageStreams") final boolean syncImageStreams, + @QueryParameter("syncBuildConfigsAndBuilds") final boolean syncBuildConfigsAndBuilds, + @QueryParameter("maxConnections") final int maxConnections, + @QueryParameter("namespace") final String namespace, @SuppressWarnings("rawtypes") @AncestorInPath Job job) + throws IOException, ServletException { + if (useClusterMode) { + try { + int secrets = getAuthenticatedOpenShiftClient().secrets().inAnyNamespace().list().getItems().size(); + logger.info("Cluster secrets: " + secrets); + } catch (Exception e) { + StringBuilder message = new StringBuilder(); + message.append("The ServiceAccount used by Jenkins does not have cluster wide watch permissions.\n"); + message.append("To use cluster mode, you need to run the following commands an restart Jenkins: \n\n"); + message.append("oc create clusterrole jenkins-watcher --verb=get,list,watch \\\n"); + message.append(" --resource=configmaps,builds,buildconfigs,imagestreams,secrets\n\n"); + message.append("oc adm policy add-cluster-role-to-user jenkins-watcher -z jenkins\n"); + logger.severe("Error while trying to query secrets lists: " + e); + return FormValidation.error(message.toString()); + } + } else { + StringBuilder message = new StringBuilder(); + if (maxConnections > 200) { + message.append("Cluster mode is recommended if max connections is greater than 200."); + } + int requiredConnectionsCount = 0; + if (syncBuildConfigsAndBuilds) { + requiredConnectionsCount += 2; + } + if (syncImageStreams) { + requiredConnectionsCount++; + } + if (syncSecrets) { + requiredConnectionsCount++; + } + if (syncConfigMaps) { + requiredConnectionsCount++; + } + String[] namespaces = StringUtils.isBlank(namespace) ? new String[] {} : namespace.split(" "); + int namespacesCount = namespaces.length; + requiredConnectionsCount = namespacesCount * requiredConnectionsCount; + if (maxConnections < requiredConnectionsCount) { + message.append(String.format("Watching %s namespaces with your configuration requires %s connections.", + namespacesCount, requiredConnectionsCount)); + } + if (message.length() > 0) { + return FormValidation.warning(message.toString()); + } + } + return FormValidation.ok("Success"); + } + @Override public String getDisplayName() { return "OpenShift Jenkins Sync"; @@ -246,51 +354,52 @@ void setNamespaces(String[] namespaces) { this.namespaces = namespaces; } - private synchronized void configChange() { - logger.info("OpenShift Sync Plugin processing a newly supplied configuration"); - synchronized (watchers) { - OpenShiftClient client = OpenShiftUtils.getOpenShiftClient(); - if (client != null) { - if (TASK != null) { - TASK.stop(); - } - SharedInformerFactory informerFactory = getInformerFactory(); - if (informerFactory != null) { - informerFactory.stopAllRegisteredInformers(true); - } - logger.info("Existing watchers: stopped and cleared : " + watchers); - logger.info("Existing watchers: " + watchers); - } + public boolean isUseClusterMode() { + return useClusterMode; + } - logger.info("Existing scheduled task: " + schedule); - if (this.schedule != null && !this.schedule.isCancelled()) { - this.schedule.cancel(true); - logger.info("Existing scheduled task cancelled: " + schedule); - } - } + public void setUseClusterMode(boolean useClusterMode) { + this.useClusterMode = useClusterMode; + } - OpenShiftClient client = OpenShiftUtils.getOpenShiftClient(); - logger.info("Shutting down OpenShift Client: " + client + " ..."); - OpenShiftUtils.shutdownOpenShiftClient(); - logger.info("!!! OpenShift Client has been shutdown "); + public boolean isSyncConfigMaps() { + return syncConfigMaps; + } - if (!this.enabled) { - logger.info("OpenShift Sync Plugin has been disabled"); - return; - } - try { - logger.info("Initializing OpenShift Client..."); - OpenShiftUtils.initializeOpenShiftClient(this.server); - OpenShiftClient openShiftClient = getOpenShiftClient(); - this.namespaces = getNamespaceOrUseDefault(this.namespaces, openShiftClient); - logger.info("OpenShift Client initialized: " + openShiftClient); + public void setSyncConfigMaps(boolean syncConfigMaps) { + this.syncConfigMaps = syncConfigMaps; + } - TASK = new GlobalPluginConfigurationTimerTask(this.namespaces); - // lets give jenkins a while to get started ;) - this.schedule = Timer.get().schedule(TASK, 1, SECONDS); - } catch (Exception e) { - Throwable exceptionOrCause = (e.getCause() != null) ? e.getCause() : e; - logger.log(SEVERE, "Failed to configure OpenShift Jenkins Sync Plugin: " + exceptionOrCause); - } + public boolean isSyncSecrets() { + return syncSecrets; } + + public void setSyncSecrets(boolean syncSecrets) { + this.syncSecrets = syncSecrets; + } + + public boolean isSyncImageStreams() { + return syncImageStreams; + } + + public void setSyncImageStreams(boolean syncImageStreams) { + this.syncImageStreams = syncImageStreams; + } + + public boolean isSyncBuildConfigsAndBuilds() { + return syncBuildConfigsAndBuilds; + } + + public void setSyncBuildConfigsAndBuilds(boolean syncBuildConfigsAndBuilds) { + this.syncBuildConfigsAndBuilds = syncBuildConfigsAndBuilds; + } + + public int getMaxConnections() { + return maxConnections; + } + + public void setMaxConnections(int maxConnections) { + this.maxConnections = maxConnections; + } + } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java index db03748cc..a359802cc 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java @@ -23,81 +23,114 @@ public GlobalPluginConfigurationTimerTask(String[] namespaces) { @Override protected void doRun() throws Exception { - try { - logger.info("Confirming Jenkins is started"); - while (true) { - @SuppressWarnings("deprecation") - final Jenkins instance = Jenkins.getActiveInstance(); - // We can look at Jenkins Init Level to see if we are ready to start. If we do - // not wait, we risk the chance of a deadlock. - InitMilestone initLevel = instance.getInitLevel(); - logger.fine("Jenkins init level: " + initLevel); - if (initLevel == COMPLETED) { - break; - } - logger.info("Jenkins not ready..."); - try { - Thread.sleep(500); - } catch (InterruptedException e) { - logger.info("Interrupted while sleeping"); - } + logger.info("Confirming Jenkins is started"); + waitForJenkinsStartup(); + stop(); + start(); + } + + private void start() { + if (GlobalPluginConfiguration.get().isUseClusterMode()) { + startClusterInformers(); + logger.info("All the cluster informers have been registered!! ... starting all registered informers"); + } else { + startNamespaceInformers(); + logger.info("All the namespaced informers have been registered!! ... starting all registered informers"); + } + getInformerFactory().startAllRegisteredInformers(); + logger.info("All registered informers have been started"); + + } + + private void waitForJenkinsStartup() { + while (true) { + @SuppressWarnings("deprecation") + final Jenkins instance = Jenkins.getActiveInstance(); + // We can look at Jenkins Init Level to see if we are ready to start. If we do + // not wait, we risk the chance of a deadlock. + InitMilestone initLevel = instance.getInitLevel(); + logger.fine("Jenkins init level: " + initLevel); + if (initLevel == COMPLETED) { + break; } - logger.info("Initializing all the watchers..."); - ConfigMapClusterInformer configMapInformer = new ConfigMapClusterInformer(namespaces); - configMapInformer.start(); + logger.info("Jenkins not ready..."); + try { + Thread.sleep(500); + } catch (InterruptedException e) { + logger.info("Interrupted while sleeping"); + } + } + } - SecretClusterInformer secretInformer = new SecretClusterInformer(namespaces); - secretInformer.start(); + private void startNamespaceInformers() { + for (String namespace : namespaces) { + GlobalPluginConfiguration configuration = GlobalPluginConfiguration.get(); + if (configuration.isSyncBuildConfigsAndBuilds()) { + BuildConfigInformer buildConfigInformer = new BuildConfigInformer(namespace); + informers.add(buildConfigInformer); + buildConfigInformer.start(); + + BuildInformer buildInformer = new BuildInformer(namespace); + buildInformer.start(); + informers.add(buildInformer); + } + if (configuration.isSyncConfigMaps()) { + ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace); + configMapInformer.start(); + informers.add(configMapInformer); + } + if (configuration.isSyncImageStreams()) { + ImageStreamInformer imageStreamInformer = new ImageStreamInformer(namespace); + imageStreamInformer.start(); + informers.add(imageStreamInformer); + } + if (configuration.isSyncSecrets()) { + SecretInformer secretInformer = new SecretInformer(namespace); + secretInformer.start(); + informers.add(secretInformer); + } + } + } + private void startClusterInformers() { + logger.info("Initializing cluster informers ..."); + GlobalPluginConfiguration configuration = GlobalPluginConfiguration.get(); + if (configuration.isSyncBuildConfigsAndBuilds()) { BuildConfigClusterInformer buildConfigInformer = new BuildConfigClusterInformer(namespaces); + informers.add(buildConfigInformer); buildConfigInformer.start(); BuildClusterInformer buildInformer = new BuildClusterInformer(namespaces); + informers.add(buildInformer); buildInformer.start(); - + } + if (configuration.isSyncConfigMaps()) { + ConfigMapClusterInformer configMapInformer = new ConfigMapClusterInformer(namespaces); + informers.add(configMapInformer); + configMapInformer.start(); + } + if (configuration.isSyncImageStreams()) { ImageStreamClusterInformer imageStreamInformer = new ImageStreamClusterInformer(namespaces); + informers.add(imageStreamInformer); imageStreamInformer.start(); -// - -// List> watchers = new ArrayList<>(); - for (String namespace : namespaces) { -// BuildConfigInformer buildConfigInformer = new BuildConfigInformer(namespace); -// informers.add(buildConfigInformer); -// buildConfigInformer.start(); - -// BuildInformer buildInformer = new BuildInformer(namespace); -// buildInformer.start(); -// informers.add(buildInformer); - -// ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace); -// configMapInformer.start(); -// informers.add(configMapInformer); -// -// ImageStreamInformer imageStreamInformer = new ImageStreamInformer(namespace); -// imageStreamInformer.start(); -// informers.add(imageStreamInformer); -// -// SecretInformer secretInformer = new SecretInformer(namespace); -// secretInformer.start(); -// informers.add(secretInformer); - - } - - logger.info("All the watchers have been registered!! ... starting all registered informers"); - getInformerFactory().startAllRegisteredInformers(); - logger.info("All registered informers have been started"); - - } catch (Exception e) { - logger.severe(e.toString()); - e.printStackTrace(); + } + if (configuration.isSyncSecrets()) { + SecretClusterInformer secretInformer = new SecretClusterInformer(namespaces); + informers.add(secretInformer); + secretInformer.start(); } } - public synchronized void stop() { - this.cancel(); - for (Lifecyclable informer : informers) { - informer.stop(); + public void stop() { + logger.info("Stopping all informers ..."); + synchronized (this) { + for (Lifecyclable informer : informers) { + logger.info("Stopping informer: {}" + informer); + informer.stop(); + logger.info("Stopped informer: {}" + informer); + } + informers.clear(); + logger.info("Stopped all informers"); } - informers.clear(); } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java index 624e3e3b8..ad78dc352 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java @@ -54,7 +54,7 @@ public ImageStreamClusterInformer(String[] namespaces) { this.namespaces = new HashSet<>(Arrays.asList(namespaces)); } - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval(); } @@ -64,8 +64,9 @@ public void start() { SharedInformerFactory factory = getInformerFactory(); Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE); OperationContext withLabels = new OperationContext().withLabels(labels); - this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getResyncPeriodMilliseconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("ImageStream informer started for namespace: {}" + namespaces); // ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java index 9a70fba06..49420e96a 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java @@ -51,7 +51,7 @@ public ImageStreamInformer(String namespace) { this.namespace = namespace; } - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval(); } @@ -61,8 +61,9 @@ public void start() { SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE); OperationContext withLabels = new OperationContext().withLabels(labels); - this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getResyncPeriodMilliseconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("ImageStream informer started for namespace: {}" + namespace); // ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java index 2f71f447d..97a7f64d9 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java @@ -47,7 +47,7 @@ public ImageStreamWatcher(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return GlobalPluginConfiguration.get().getImageStreamListInterval(); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java index 246338e87..6170b7de8 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java @@ -38,6 +38,7 @@ import java.util.logging.Logger; import org.apache.commons.lang.StringUtils; +import org.apache.commons.lang.builder.ReflectionToStringBuilder; import org.apache.tools.ant.filters.StringInputStream; import org.joda.time.DateTime; import org.joda.time.format.DateTimeFormatter; @@ -86,9 +87,7 @@ public class OpenShiftUtils { private static OpenShiftClient openShiftClient; private static String jenkinsPodNamespace = null; - private static SharedInformerFactory factory; private static final Jenkins JENKINS_INSTANCE = Jenkins.getInstanceOrNull(); - private static final Object lock = new Object(); static { jenkinsPodNamespace = System.getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME); @@ -131,8 +130,9 @@ public class OpenShiftUtils { * * @param serverUrl the optional URL of where the OpenShift cluster API server * is running + * @param maxConnections2 */ - public synchronized static void initializeOpenShiftClient(String serverUrl) { + public synchronized static void initializeOpenShiftClient(String serverUrl, int maxConnections) { if (openShiftClient != null) { logger.log(INFO, "Closing already initialized openshift client"); openShiftClient.close(); @@ -142,6 +142,8 @@ public synchronized static void initializeOpenShiftClient(String serverUrl) { configBuilder.withMasterUrl(serverUrl); } Config config = configBuilder.build(); + logger.log(INFO, "Current OpenShift Client Configuration: " + ReflectionToStringBuilder.toString(config)); + String version = JENKINS_INSTANCE.getPluginManager().getPlugin("openshift-sync").getVersion(); config.setUserAgent("openshift-sync-plugin-" + version + "/fabric8-" + Version.clientVersion()); openShiftClient = new DefaultOpenShiftClient(config); @@ -149,8 +151,9 @@ public synchronized static void initializeOpenShiftClient(String serverUrl) { DefaultOpenShiftClient defClient = (DefaultOpenShiftClient) openShiftClient; Dispatcher dispatcher = defClient.getHttpClient().dispatcher(); - dispatcher.setMaxRequestsPerHost(100); - dispatcher.setMaxRequests(100); +// int maxConnections = 100;//GlobalPluginConfiguration.get().getMaxConnections(); + dispatcher.setMaxRequestsPerHost(maxConnections); + dispatcher.setMaxRequests(maxConnections); } public synchronized static OpenShiftClient getOpenShiftClient() { @@ -160,6 +163,10 @@ public synchronized static OpenShiftClient getOpenShiftClient() { // Get the current OpenShiftClient and configure to use the current Oauth // token. public synchronized static OpenShiftClient getAuthenticatedOpenShiftClient() { + if (openShiftClient == null) { + GlobalPluginConfiguration config = GlobalPluginConfiguration.get(); + initializeOpenShiftClient(config.getServer(), config.getMaxConnections()); + } if (openShiftClient != null) { String token = CredentialsUtils.getCurrentToken(); if (token.length() > 0) { @@ -170,18 +177,37 @@ public synchronized static OpenShiftClient getAuthenticatedOpenShiftClient() { } public static SharedInformerFactory getInformerFactory() { - if (factory == null) { + return getAuthenticatedOpenShiftClient().informers(); +/* if (factory == null) { synchronized (lock) { factory = getAuthenticatedOpenShiftClient().informers(); } } - return factory; + return factory;*/ } public synchronized static void shutdownOpenShiftClient() { + logger.info("Stopping openshift client: " + openShiftClient); if (openShiftClient != null) { + + // All this stuff is done by openShiftClient.close(); + +// DefaultOpenShiftClient client = (DefaultOpenShiftClient) openShiftClient; +// Dispatcher dispatcher = client.getHttpClient().dispatcher(); +// ExecutorService executorService = dispatcher.executorService(); +// try { +// dispatcher.cancelAll(); +// client.getHttpClient().connectionPool().evictAll(); +// //TODO Akram: shutting donw the executorService prevents other informers to re-attach to it. +// executorService.shutdown(); +// TimeUnit.SECONDS.sleep(1); +// } catch (Exception e) { +// logger.warning("Error while stopping executor thread"); +// executorService.shutdownNow(); +// } openShiftClient.close(); openShiftClient = null; +// factory = null; } } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java index 8f9eb616d..022085b7f 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java @@ -50,7 +50,7 @@ public SecretClusterInformer(String[] namespaces) { this.namespaces = new HashSet<>(Arrays.asList(namespaces)); } - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getSecretListInterval(); } @@ -60,8 +60,9 @@ public void start() { SharedInformerFactory factory = getInformerFactory(); Map labels = singletonMap(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC); OperationContext withLabels = new OperationContext().withLabels(labels); - this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getResyncPeriodMilliseconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("Secret informer started for namespace: {}" + namespaces); // SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list(); // onInit(list.getItems()); diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java index fe3d4b5e6..4eed4bf30 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java @@ -47,7 +47,7 @@ public SecretInformer(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return 1_000 * GlobalPluginConfiguration.get().getSecretListInterval(); } @@ -57,9 +57,11 @@ public void start() { SharedInformerFactory factory = getInformerFactory().inNamespace(namespace); Map labels = singletonMap(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC); OperationContext withLabels = new OperationContext().withLabels(labels); - this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getListIntervalInSeconds()); + this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getResyncPeriodMilliseconds()); informer.addEventHandler(this); + factory.startAllRegisteredInformers(); LOGGER.info("Secret informer started for namespace: {}" + namespace); + // SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list(); // onInit(list.getItems()); } diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java index fd0ddda9d..ee80125a2 100644 --- a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java +++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretWatcher.java @@ -46,7 +46,7 @@ public SecretWatcher(String namespace) { } @Override - public int getListIntervalInSeconds() { + public int getResyncPeriodMilliseconds() { return GlobalPluginConfiguration.get().getSecretListInterval(); } diff --git a/src/main/resources/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration/config.jelly b/src/main/resources/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration/config.jelly index 74f95824f..bad912b55 100644 --- a/src/main/resources/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration/config.jelly +++ b/src/main/resources/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration/config.jelly @@ -1,5 +1,4 @@