diff --git a/pom.xml b/pom.xml
index 18e4b800d..1d01e36c7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -22,10 +22,10 @@
false
true
true
- 2.263
+ 2.289.1
8
2.71
- 4.13.2
+ 5.4.1
DEBUG
3.0.4
false
@@ -68,6 +68,10 @@
+
+ incremental-repo.jenkins-ci.org
+ https://repo.jenkins-ci.org/incrementals/
+
repo.jenkins-ci.org
https://repo.jenkins-ci.org/public/
@@ -127,11 +131,6 @@
blueocean-rest
1.4.0
-
- com.openshift.jenkins.plugins
- openshift-client
- 1.0.34
-
org.jenkins-ci.plugins.pipeline-stage-view
pipeline-rest-api
@@ -141,11 +140,7 @@
org.slf4j
slf4j-api
-
-
- org.slf4j
- slf4j-simple
- ${slf4j.version}
+ provided
@@ -157,14 +152,18 @@
org.csanchez.jenkins.plugins
kubernetes
- 1.29.0
+ 1.30.0
- io.fabric8
- kubernetes-client
- 4.13.2
- provided
+ org.jenkins-ci.plugins
+ kubernetes-client-api
+ 5.4.1
+
+ org.jenkins-ci.plugins
+ jackson2-api
+ 2.12.3
+
org.eclipse.jetty
@@ -177,8 +176,8 @@
io.jenkins.tools.bom
- bom-2.263.x
- 21
+ bom-2.277.x
+ 26
import
pom
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/Annotations.java b/src/main/java/io/fabric8/jenkins/openshiftsync/Annotations.java
index 7ac39fc54..663903adc 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/Annotations.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/Annotations.java
@@ -18,11 +18,11 @@
/**
*/
public class Annotations {
- public static final String JENKINS_JOB_PATH = "jenkins.openshift.io/job-path";
- public static final String GENERATED_BY = "jenkins.openshift.io/generated-by";
- public static final String GENERATED_BY_JENKINS = "jenkins";
- public static final String DISABLE_SYNC_CREATE = "jenkins.openshift.io/disable-sync-create";
- public static final String BUILDCONFIG_NAME = "openshift.io/build-config.name";
- public static final String SECRET_NAME = "jenkins.openshift.io/secret.name";
- public static final String AUTOSTART = "jenkins.openshift.io/autostart";
+ public static final String JENKINS_JOB_PATH = "jenkins.openshift.io/job-path";
+ public static final String GENERATED_BY = "jenkins.openshift.io/generated-by";
+ public static final String GENERATED_BY_JENKINS = "jenkins";
+ public static final String DISABLE_SYNC_CREATE = "jenkins.openshift.io/disable-sync-create";
+ public static final String BUILDCONFIG_NAME = "openshift.io/build-config.name";
+ public static final String SECRET_NAME = "jenkins.openshift.io/secret.name";
+ public static final String AUTOSTART = "jenkins.openshift.io/autostart";
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java
deleted file mode 100644
index abb76cbff..000000000
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BaseWatcher.java
+++ /dev/null
@@ -1,166 +0,0 @@
-/**
- * Copyright (C) 2017 Red Hat, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package io.fabric8.jenkins.openshiftsync;
-
-import static java.net.HttpURLConnection.HTTP_GONE;
-
-import io.fabric8.kubernetes.client.KubernetesClientException;
-import io.fabric8.kubernetes.client.Watch;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.logging.Logger;
-
-import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import jenkins.util.Timer;
-
-public abstract class BaseWatcher {
- private final Logger LOGGER = Logger.getLogger(BaseWatcher.class.getName());
-
- protected ScheduledFuture relister;
- protected final String[] namespaces;
- protected ConcurrentHashMap watches;
- private final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name";
- private final String PT_NOT_OWNED = "The event for %s | %s | %s that no longer includes the pod template %s was ignored because the type %s was associated with that pod template";
-
- @SuppressFBWarnings("EI_EXPOSE_REP2")
- public BaseWatcher(String[] namespaces) {
- this.namespaces = namespaces;
- watches = new ConcurrentHashMap<>();
- }
-
- public abstract Runnable getStartTimerTask();
-
- public abstract int getListIntervalInSeconds();
-
- public abstract void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource);
-
- public synchronized void start() {
- // lets do this in a background thread to avoid errors like:
- // Tried proxying
- // io.fabric8.jenkins.openshiftsync.GlobalPluginConfiguration to support
- // a circular dependency, but it is not an interface.
- Runnable task = getStartTimerTask();
- relister = Timer.get().scheduleAtFixedRate(task, 100, // still do the
- // first run 100
- // milliseconds in
- getListIntervalInSeconds() * 1000,
- TimeUnit.MILLISECONDS);
-
- }
-
- public void stop() {
- if (relister != null && !relister.isDone()) {
- relister.cancel(true);
- relister = null;
- }
-
- for (Map.Entry entry : watches.entrySet()) {
- entry.getValue().close();
- watches.remove(entry.getKey());
- }
- }
-
- public void onClose(KubernetesClientException e, String namespace) {
- //scans of fabric client confirm this call be called with null
- //we do not want to totally ignore this, as the closing of the
- //watch can effect responsiveness
- LOGGER.info("Watch for type " + this.getClass().getName() + " closed for one of the following namespaces: " + watches.keySet().toString());
- if (e != null) {
- LOGGER.warning(e.toString());
-
- if (e.getStatus() != null && e.getStatus().getCode() == HTTP_GONE) {
- stop();
- start();
- }
- }
- // clearing the watches here will signal the extending classes
- // to attempt to re-establish the watch the next time they attempt
- // to list; should shield from rapid/repeated close/reopen cycles
- // doing it in this fashion
- watches.remove(namespace);
- }
-
- public void addWatch(String key, Watch desiredWatch) {
- Watch watch = watches.putIfAbsent(key, desiredWatch);
- if (watch != null) {
- watch.close();
- }
- }
-
- protected void processSlavesForAddEvent(List slaves, String type, String uid, String apiObjName, String namespace) {
- LOGGER.info("Adding PodTemplate(s) for ");
- List finalSlaveList = new ArrayList();
- for (PodTemplate podTemplate : slaves) {
- PodTemplateUtils.addPodTemplate(this, type, apiObjName, namespace, finalSlaveList, podTemplate);
- }
- PodTemplateUtils.updateTrackedPodTemplatesMap(uid, finalSlaveList);
- }
-
- protected void processSlavesForModifyEvent(List slaves, String type, String uid, String apiObjName, String namespace) {
- LOGGER.info("Modifying PodTemplates");
- boolean alreadyTracked = PodTemplateUtils.trackedPodTemplates.containsKey(uid);
- boolean hasSlaves = slaves.size() > 0; // Configmap has podTemplates
- if (alreadyTracked) {
- if (hasSlaves) {
- // Since the user could have change the immutable image
- // that a PodTemplate uses, we just
- // recreate the PodTemplate altogether. This makes it so
- // that any changes from within
- // Jenkins is undone.
-
- // Check if there are new PodTemplates added or removed to the configmap,
- // if they are, add them to or remove them from trackedPodTemplates
- List podTemplatesToTrack = new ArrayList();
- PodTemplateUtils.purgeTemplates(this, type, uid, apiObjName, namespace);
- for(PodTemplate pt: slaves){
- podTemplatesToTrack = PodTemplateUtils.onlyTrackPodTemplate(this, type,apiObjName,namespace,podTemplatesToTrack, pt);
- }
- PodTemplateUtils.updateTrackedPodTemplatesMap(uid, podTemplatesToTrack);
- for (PodTemplate podTemplate : podTemplatesToTrack) {
- // still do put here in case this is a new item from the last
- // update on this ConfigMap/ImageStream
- PodTemplateUtils.addPodTemplate(this, type,null,null,null, podTemplate);
- }
- } else {
- // The user modified the configMap to no longer be a
- // jenkins-slave.
- PodTemplateUtils.purgeTemplates(this, type, uid, apiObjName, namespace);
- }
- } else {
- if (hasSlaves) {
- List finalSlaveList = new ArrayList();
- for (PodTemplate podTemplate : slaves) {
- // The user modified the api obj to be a jenkins-slave
- PodTemplateUtils.addPodTemplate(this, type, apiObjName, namespace, finalSlaveList, podTemplate);
- }
- PodTemplateUtils.updateTrackedPodTemplatesMap(uid, finalSlaveList);
- }
- }
- }
-
- protected void processSlavesForDeleteEvent(List slaves, String type, String uid, String apiObjName, String namespace) {
- if (PodTemplateUtils.trackedPodTemplates.containsKey(uid)) {
- PodTemplateUtils.purgeTemplates(this, type, uid, apiObjName, namespace);
- }
- }
-}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildCause.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildCause.java
index 52e30bb35..8baf14e00 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildCause.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildCause.java
@@ -18,133 +18,125 @@
import hudson.model.Cause;
import io.fabric8.kubernetes.api.model.ObjectMeta;
import io.fabric8.openshift.api.model.Build;
+import io.fabric8.openshift.api.model.BuildSource;
+import io.fabric8.openshift.api.model.BuildSpec;
import io.fabric8.openshift.api.model.GitBuildSource;
+import io.fabric8.openshift.api.model.SourceRevision;
+
import org.apache.commons.lang.StringUtils;
public class BuildCause extends Cause {
- private String uid;
-
- private String namespace;
-
- private String name;
-
- private String gitUri;
-
- private String commit;
-
- private String buildConfigUid;
-
- private int numStages = -1;
-
- private int numFlowNodes = -1;
-
- private long lastUpdateToOpenshift = -1;
-
- public BuildCause(String uid, String namespace, String name, String gitUri,
- String commit, String buildConfigUid) {
- this.uid = uid;
- this.namespace = namespace;
- this.name = name;
- this.gitUri = gitUri;
- this.commit = commit;
- this.buildConfigUid = buildConfigUid;
- }
-
- public BuildCause(String uid, String namespace, String name, String gitUri,
- String commit, String buildConfigUid, int numStages,
- int numFlowNodes, long lastUpdateToOpenshift) {
- this(uid, namespace, name, gitUri, commit, buildConfigUid);
- this.numStages = numStages;
- this.numFlowNodes = numFlowNodes;
- this.lastUpdateToOpenshift = lastUpdateToOpenshift;
- }
-
- public BuildCause(Build build, String buildConfigUid) {
- this.buildConfigUid = buildConfigUid;
- if (build == null || build.getMetadata() == null) {
- return;
- }
- ObjectMeta meta = build.getMetadata();
- uid = meta.getUid();
- namespace = meta.getNamespace();
- name = meta.getName();
-
- if (build.getSpec() != null) {
- if (build.getSpec().getSource() != null
- && build.getSpec().getSource().getGit() != null) {
- GitBuildSource git = build.getSpec().getSource().getGit();
- gitUri = git.getUri();
- }
-
- if (build.getSpec().getRevision() != null
- && build.getSpec().getRevision().getGit() != null) {
- commit = build.getSpec().getRevision().getGit().getCommit();
- }
- }
- }
-
- @Override
- public String getShortDescription() {
- StringBuilder sb = new StringBuilder("OpenShift Build ")
- .append(namespace).append("/").append(name);
-
- if (StringUtils.isNotBlank(gitUri)) {
- sb.append(" from ").append(gitUri);
- if (StringUtils.isNotBlank(commit)) {
- sb.append(", commit ").append(commit);
- }
- }
-
- return sb.toString();
- }
-
- public String getUid() {
- return uid;
- }
-
- public String getNamespace() {
- return namespace;
- }
-
- public String getName() {
- return name;
- }
-
- public String getGitUri() {
- return gitUri;
- }
-
- public String getCommit() {
- return commit;
- }
-
- public String getBuildConfigUid() {
- return buildConfigUid;
- }
-
- public int getNumStages() {
- return numStages;
- }
-
- public void setNumStages(int numStages) {
- this.numStages = numStages;
- }
-
- public int getNumFlowNodes() {
- return numFlowNodes;
- }
-
- public void setNumFlowNodes(int numFlowNodes) {
- this.numFlowNodes = numFlowNodes;
- }
-
- public long getLastUpdateToOpenshift() {
- return lastUpdateToOpenshift;
- }
-
- public void setLastUpdateToOpenshift(long lastUpdateToOpenshift) {
- this.lastUpdateToOpenshift = lastUpdateToOpenshift;
- }
+ private String uid;
+ private String namespace;
+ private String name;
+ private String gitUri;
+ private String commit;
+ private String buildConfigUid;
+ private int numStages = -1;
+ private int numFlowNodes = -1;
+ private long lastUpdateToOpenshift = -1;
+
+ public BuildCause(String uid, String namespace, String name, String gitUri, String commit, String buildConfigUid) {
+ this.uid = uid;
+ this.namespace = namespace;
+ this.name = name;
+ this.gitUri = gitUri;
+ this.commit = commit;
+ this.buildConfigUid = buildConfigUid;
+ }
+
+ public BuildCause(String uid, String namespace, String name, String gitUri, String commit, String buildConfigUid,
+ int numStages, int numFlowNodes, long lastUpdateToOpenshift) {
+ this(uid, namespace, name, gitUri, commit, buildConfigUid);
+ this.numStages = numStages;
+ this.numFlowNodes = numFlowNodes;
+ this.lastUpdateToOpenshift = lastUpdateToOpenshift;
+ }
+
+ public BuildCause(Build build, String buildConfigUid) {
+ this.buildConfigUid = buildConfigUid;
+ if (build == null || build.getMetadata() == null) {
+ return;
+ }
+ ObjectMeta meta = build.getMetadata();
+ uid = meta.getUid();
+ namespace = meta.getNamespace();
+ name = meta.getName();
+
+ BuildSpec spec = build.getSpec();
+ if (spec != null) {
+ BuildSource source = spec.getSource();
+ if (source != null && source.getGit() != null) {
+ GitBuildSource git = source.getGit();
+ gitUri = git.getUri();
+ }
+
+ SourceRevision revision = spec.getRevision();
+ if (revision != null && revision.getGit() != null) {
+ commit = revision.getGit().getCommit();
+ }
+ }
+ }
+
+ @Override
+ public String getShortDescription() {
+ StringBuilder sb = new StringBuilder("OpenShift Build ").append(namespace).append("/").append(name);
+ if (StringUtils.isNotBlank(gitUri)) {
+ sb.append(" from ").append(gitUri);
+ if (StringUtils.isNotBlank(commit)) {
+ sb.append(", commit ").append(commit);
+ }
+ }
+ return sb.toString();
+ }
+
+ public String getUid() {
+ return uid;
+ }
+
+ public String getNamespace() {
+ return namespace;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public String getGitUri() {
+ return gitUri;
+ }
+
+ public String getCommit() {
+ return commit;
+ }
+
+ public String getBuildConfigUid() {
+ return buildConfigUid;
+ }
+
+ public int getNumStages() {
+ return numStages;
+ }
+
+ public void setNumStages(int numStages) {
+ this.numStages = numStages;
+ }
+
+ public int getNumFlowNodes() {
+ return numFlowNodes;
+ }
+
+ public void setNumFlowNodes(int numFlowNodes) {
+ this.numFlowNodes = numFlowNodes;
+ }
+
+ public long getLastUpdateToOpenshift() {
+ return lastUpdateToOpenshift;
+ }
+
+ public void setLastUpdateToOpenshift(long lastUpdateToOpenshift) {
+ this.lastUpdateToOpenshift = lastUpdateToOpenshift;
+ }
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java
new file mode 100644
index 000000000..785dbe338
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildClusterInformer.java
@@ -0,0 +1,144 @@
+/**
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+import io.fabric8.openshift.api.model.Build;
+import io.fabric8.openshift.api.model.BuildConfig;
+
+public class BuildClusterInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private final static BuildComparator BUILD_COMPARATOR = new BuildComparator();
+ private SharedIndexInformer informer;
+ private Set namespaces;
+
+ public BuildClusterInformer(String[] namespaces) {
+ this.namespaces = new HashSet<>(Arrays.asList(namespaces));
+ }
+
+ /**
+ * now that listing interval is 5 minutes (used to be 10 seconds), we have seen
+ * timing windows where if the build watch events come before build config watch
+ * events when both are created in a simultaneous fashion, there is an up to 5
+ * minutes delay before the job run gets kicked off started seeing duplicate
+ * builds getting kicked off so quit depending on so moved off of concurrent
+ * hash set to concurrent hash map using namepace/name key
+ */
+ public int getListIntervalInSeconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting Build informer for {} !!" + namespaces);
+ LOGGER.debug("Listing Build resources");
+ SharedInformerFactory factory = getInformerFactory();
+ this.informer = factory.sharedIndexInformerFor(Build.class, getListIntervalInSeconds());
+ this.informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("Build informer started for namespace: {}" + namespaces);
+// BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespaces);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+
+ @Override
+ public void onAdd(Build obj) {
+ LOGGER.debug("Build informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = metadata.getName();
+ LOGGER.info("Build informer received add event for: {}" + name);
+ try {
+ BuildManager.addEventToJenkinsJobRun(obj);
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(Build oldObj, Build newObj) {
+ LOGGER.debug("Build informer received update event for: {} to: {}" + oldObj + " " + newObj);
+ if (newObj != null) {
+ ObjectMeta metadata = oldObj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String oldRv = oldObj.getMetadata().getResourceVersion();
+ String newRv = newObj.getMetadata().getResourceVersion();
+ LOGGER.info("Build informer received update event for: {} to: {}" + oldRv + " " + newRv);
+ BuildManager.modifyEventToJenkinsJobRun(newObj);
+ }
+ }
+ }
+
+ @Override
+ public void onDelete(Build obj, boolean deletedFinalStateUnknown) {
+ LOGGER.info("Build informer received delete event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ try {
+ BuildManager.deleteEventToJenkinsJobRun(obj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ private static void onInit(List list) {
+ Collections.sort(list, BUILD_COMPARATOR);
+ // We need to sort the builds into their build configs so we can
+ // handle build run policies correctly.
+ Map buildConfigMap = new HashMap<>();
+ Map> buildConfigBuildMap = new HashMap<>(list.size());
+// BuildManager.mapBuildToBuildConfigs(list, buildConfigMap, buildConfigBuildMap);
+// BuildManager.mapBuildsToBuildConfigs(buildConfigBuildMap);
+ BuildManager.reconcileRunsAndBuilds();
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java
new file mode 100644
index 000000000..02348f63d
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildComparator.java
@@ -0,0 +1,44 @@
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER;
+
+import java.util.Comparator;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.openshift.api.model.Build;
+
+public class BuildComparator implements Comparator {
+ private static final Logger LOGGER = Logger.getLogger(BuildInformer.class.getName());
+
+ @Override
+ public int compare(Build b1, Build b2) {
+ ObjectMeta b1Metadata = b1.getMetadata();
+ Map b1Annotations = b1Metadata.getAnnotations();
+ String b1BuildNumber = b1Annotations.get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER);
+ if (b1Annotations == null || b1BuildNumber == null) {
+ String b1Namespace = b1Metadata.getNamespace();
+ String b1Name = b1Metadata.getName();
+ LOGGER.warning("Build " + b1Namespace + "/" + b1Name + ", has bad annotations: " + b1Annotations);
+ return 0;
+ }
+ ObjectMeta b2Metadata = b2.getMetadata();
+ Map b2Annotations = b2Metadata.getAnnotations();
+ String b2BuildNumber = b2Annotations.get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER);
+ if (b2Annotations == null || b2BuildNumber == null) {
+ String b2Namespace = b2Metadata.getNamespace();
+ String b2Name = b2Metadata.getName();
+ LOGGER.warning("Build " + b2Namespace + "/" + b2Name + ", has bad annotations: " + b2Annotations);
+ return 0;
+ }
+ int rc = 0;
+ try {
+ rc = Long.compare(Long.parseLong(b1BuildNumber), Long.parseLong(b2BuildNumber));
+ } catch (Throwable t) {
+ LOGGER.log(Level.FINE, "onInitialBuilds", t);
+ }
+ return rc;
+ }
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java
new file mode 100644
index 000000000..f5ea810eb
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigClusterInformer.java
@@ -0,0 +1,145 @@
+/**
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+import io.fabric8.openshift.api.model.BuildConfig;
+
+/**
+ * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we
+ * ensure there is a suitable Jenkins Job object defined with the correct
+ * configuration
+ */
+public class BuildConfigClusterInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private SharedIndexInformer informer;
+ private Set namespaces;
+
+ public BuildConfigClusterInformer(String[] namespaces) {
+ this.namespaces = new HashSet<>(Arrays.asList(namespaces));
+ }
+
+ public int getListIntervalInSeconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getBuildConfigListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting BuildConfig informer for {} !!" + namespaces);
+ LOGGER.debug("listing BuildConfig resources");
+ SharedInformerFactory factory = getInformerFactory();
+ this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getListIntervalInSeconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("BuildConfig informer started for namespace: {}" + namespaces);
+ // BuildConfigList list =
+ // getOpenshiftClient().buildConfigs().inNamespace(namespace).list();
+ // onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespaces);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+
+ @Override
+ public void onAdd(BuildConfig obj) {
+ LOGGER.debug("BuildConfig informer received add event for: {}" + obj);
+
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = metadata.getName();
+ LOGGER.info("BuildConfig informer received add event for: {}" + name);
+ try {
+ BuildConfigManager.upsertJob(obj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(BuildConfig oldObj, BuildConfig newObj) {
+ LOGGER.debug("BuildConfig informer received update event for: {} to: {}" + oldObj + " " + newObj);
+ if (newObj != null) {
+ ObjectMeta metadata = oldObj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String oldRv = oldObj.getMetadata().getResourceVersion();
+ String newRv = newObj.getMetadata().getResourceVersion();
+ LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldRv + " " + newRv);
+ try {
+ BuildConfigManager.modifyEventToJenkinsJob(newObj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ @Override
+ public void onDelete(BuildConfig obj, boolean deletedFinalStateUnknown) {
+ LOGGER.info("BuildConfig informer received delete event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ try {
+ BuildConfigManager.deleteEventToJenkinsJob(obj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+ }
+
+ private void onInit(List list) {
+ for (BuildConfig buildConfig : list) {
+ try {
+ BuildConfigManager.upsertJob(buildConfig);
+ } catch (Exception e) {
+ LOGGER.error("Failed to update job", e);
+ }
+ }
+ // poke the BuildWatcher builds with no BC list and see if we
+ // can create job
+ // runs for premature builds
+ BuildManager.flushBuildsWithNoBCList();
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java
new file mode 100644
index 000000000..68da852ae
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigInformer.java
@@ -0,0 +1,129 @@
+/**
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+import io.fabric8.openshift.api.model.BuildConfig;
+
+/**
+ * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we
+ * ensure there is a suitable Jenkins Job object defined with the correct
+ * configuration
+ */
+public class BuildConfigInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private SharedIndexInformer informer;
+ private String namespace;
+
+ public BuildConfigInformer(String namespace) {
+ this.namespace = namespace;
+ }
+
+ public int getResyncPeriodMilliseconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getBuildConfigListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting BuildConfig informer for {} !!" + namespace);
+ LOGGER.debug("listing BuildConfig resources");
+ SharedInformerFactory factory = getInformerFactory().inNamespace(namespace);
+ this.informer = factory.sharedIndexInformerFor(BuildConfig.class, getResyncPeriodMilliseconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("BuildConfig informer started for namespace: {}" + namespace);
+ // BuildConfigList list =
+ // getOpenshiftClient().buildConfigs().inNamespace(namespace).list();
+ // onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespace);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+ @Override
+ public void onAdd(BuildConfig obj) {
+ LOGGER.debug("BuildConfig informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String name = metadata.getName();
+ LOGGER.info("BuildConfig informer received add event for: {}" + name);
+ try {
+ BuildConfigManager.upsertJob(obj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(BuildConfig oldObj, BuildConfig newObj) {
+ LOGGER.debug("BuildConfig informer received update event for: {} to: {}" + oldObj + " " + newObj);
+ if (newObj != null) {
+ String oldRv = oldObj.getMetadata().getResourceVersion();
+ String newRv = newObj.getMetadata().getResourceVersion();
+ LOGGER.info("BuildConfig informer received update event for: {} to: {}" + oldRv + " " + newRv);
+ try {
+ BuildConfigManager.modifyEventToJenkinsJob(newObj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void onDelete(BuildConfig obj, boolean deletedFinalStateUnknown) {
+ LOGGER.info("BuildConfig informer received delete event for: {}" + obj);
+ if (obj != null) {
+ try {
+ BuildConfigManager.deleteEventToJenkinsJob(obj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+ private void onInit(List list) {
+ for (BuildConfig buildConfig : list) {
+ try {
+ BuildConfigManager.upsertJob(buildConfig);
+ } catch (Exception e) {
+ LOGGER.error("Failed to update job", e);
+ }
+ }
+ // poke the BuildWatcher builds with no BC list and see if we
+ // can create job
+ // runs for premature builds
+ BuildManager.flushBuildsWithNoBCList();
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigManager.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigManager.java
new file mode 100644
index 000000000..8f2fc0039
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigManager.java
@@ -0,0 +1,116 @@
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig;
+import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isPipelineStrategyBuildConfig;
+
+import org.eclipse.jetty.util.ConcurrentHashSet;
+
+import hudson.model.Job;
+import hudson.security.ACL;
+import io.fabric8.openshift.api.model.BuildConfig;
+import jenkins.model.Jenkins;
+import jenkins.security.NotReallyRoleSensitiveCallable;
+
+public class BuildConfigManager {
+ /**
+ * for coordinating between ItemListener.onUpdate and onDeleted both getting
+ * called when we delete a job; ID should be combo of namespace and name for BC
+ * to properly differentiate; we don't use UUID since when we filter on the
+ * ItemListener side the UUID may not be available
+ **/
+ private static final ConcurrentHashSet deletesInProgress = new ConcurrentHashSet();
+
+ public static boolean isDeleteInProgress(String bcID) {
+ return deletesInProgress.contains(bcID);
+ }
+
+ public static void deleteCompleted(String bcID) {
+ deletesInProgress.remove(bcID);
+ }
+
+ public static void deleteInProgress(String bcName) {
+ deletesInProgress.add(bcName);
+ }
+
+ static void modifyEventToJenkinsJob(BuildConfig buildConfig) throws Exception {
+ if (isPipelineStrategyBuildConfig(buildConfig)) {
+ upsertJob(buildConfig);
+ return;
+ }
+
+ // no longer a Jenkins build so lets delete it if it exists
+ deleteEventToJenkinsJob(buildConfig);
+ }
+
+ static void upsertJob(final BuildConfig buildConfig) throws Exception {
+ if (isPipelineStrategyBuildConfig(buildConfig)) {
+ // sync on intern of name should guarantee sync on same actual obj
+ synchronized (buildConfig.getMetadata().getUid().intern()) {
+ ACL.impersonate(ACL.SYSTEM, new JobProcessor(buildConfig));
+ }
+ }
+ }
+
+ // in response to receiving an openshift delete build config event, this
+ // method will drive
+ // the clean up of the Jenkins job the build config is mapped one to one
+ // with; as part of that
+ // clean up it will synchronize with the build event watcher to handle build
+ // config
+ // delete events and build delete events that arrive concurrently and in a
+ // nondeterministic
+ // order
+ static void deleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception {
+ if (buildConfig != null) {
+ String bcUid = buildConfig.getMetadata().getUid();
+ if (bcUid != null && bcUid.length() > 0) {
+ // employ intern of the BC UID to facilitate sync'ing on the same
+ // actual object
+ bcUid = bcUid.intern();
+ synchronized (bcUid) {
+ innerDeleteEventToJenkinsJob(buildConfig);
+ return;
+ }
+ }
+ // uid should not be null / empty, but just in case, still clean up
+ innerDeleteEventToJenkinsJob(buildConfig);
+ }
+ }
+
+ // innerDeleteEventToJenkinsJob is the actual delete logic at the heart of
+ // deleteEventToJenkinsJob
+ // that is either in a sync block or not based on the presence of a BC uid
+ private static void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception {
+ final Job job = getJobFromBuildConfig(buildConfig);
+ if (job != null) {
+ // employ intern of the BC UID to facilitate sync'ing on the same
+ // actual object
+ synchronized (buildConfig.getMetadata().getUid().intern()) {
+ ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() {
+ @Override
+ public Void call() throws Exception {
+ try {
+ deleteInProgress(
+ buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName());
+ job.delete();
+ } finally {
+ removeJobWithBuildConfig(buildConfig);
+ Jenkins.getActiveInstance().rebuildDependencyGraphAsync();
+ deleteCompleted(
+ buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName());
+ }
+ return null;
+ }
+ });
+ // if the bc has a source secret it is possible it should
+ // be deleted as well (called function will cross reference
+ // with secret watch)
+ CredentialsUtils.deleteSourceCredentials(buildConfig);
+ }
+
+ }
+
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java
deleted file mode 100644
index 14d6386a8..000000000
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigSecretToCredentialsMap.java
+++ /dev/null
@@ -1,36 +0,0 @@
-/**
- * Copyright (C) 2017 Red Hat, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package io.fabric8.jenkins.openshiftsync;
-
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-public class BuildConfigSecretToCredentialsMap {
-
- private static Map buildConfigSecretToCredentialMap = new ConcurrentHashMap();
-
- private BuildConfigSecretToCredentialsMap() {
- }
-
- static void linkBCSecretToCredential(String bc, String credential) {
- buildConfigSecretToCredentialMap.put(bc, credential);
- }
-
- static String unlinkBCSecretToCrendential(String bc) {
- return buildConfigSecretToCredentialMap.remove(bc);
- }
-
-}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java
index 05e19588e..d21ada839 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigToJobMap.java
@@ -10,39 +10,37 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.logging.Logger;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.jenkinsJobName;
import static org.apache.commons.lang.StringUtils.isBlank;
import static org.apache.commons.lang.StringUtils.isNotBlank;
public class BuildConfigToJobMap {
private final static Logger logger = Logger.getLogger(BuildConfigToJobMap.class.getName());
- private static ConcurrentHashMap buildConfigToJobMap;
-
+ private static ConcurrentHashMap buildConfigToJobMap = new ConcurrentHashMap();
+
private BuildConfigToJobMap() {
}
static synchronized void initializeBuildConfigToJobMap() {
if (buildConfigToJobMap == null) {
- List jobs = Jenkins.getActiveInstance().getAllItems(
- WorkflowJob.class);
+ List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class);
buildConfigToJobMap = new ConcurrentHashMap<>(jobs.size());
for (WorkflowJob job : jobs) {
- BuildConfigProjectProperty buildConfigProjectProperty = job
- .getProperty(BuildConfigProjectProperty.class);
- if (buildConfigProjectProperty == null) {
- continue;
- }
- String namespace = buildConfigProjectProperty.getNamespace();
- String name = buildConfigProjectProperty.getName();
- if (isNotBlank(namespace) && isNotBlank(name)) {
- buildConfigToJobMap.put(OpenShiftUtils.jenkinsJobName(namespace, name), job);
+ BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class);
+ if (property != null) {
+ String namespace = property.getNamespace();
+ String name = property.getName();
+ if (isNotBlank(namespace) && isNotBlank(name)) {
+ String jenkinsJobName = jenkinsJobName(namespace, name);
+ buildConfigToJobMap.put(jenkinsJobName, job);
+ }
}
}
}
}
- static WorkflowJob getJobFromBuildConfig(
- BuildConfig buildConfig) {
+ static WorkflowJob getJobFromBuildConfig(BuildConfig buildConfig) {
ObjectMeta meta = buildConfig.getMetadata();
if (meta == null) {
return null;
@@ -57,8 +55,7 @@ static WorkflowJob getJobFromBuildConfigNameNamespace(String name, String namesp
return buildConfigToJobMap.get(OpenShiftUtils.jenkinsJobName(namespace, name));
}
- static void putJobWithBuildConfig(WorkflowJob job,
- BuildConfig buildConfig) {
+ static void putJobWithBuildConfig(WorkflowJob job, BuildConfig buildConfig) {
if (buildConfig == null) {
throw new IllegalArgumentException("BuildConfig cannot be null");
}
@@ -67,17 +64,14 @@ static void putJobWithBuildConfig(WorkflowJob job,
}
ObjectMeta meta = buildConfig.getMetadata();
if (meta == null) {
- throw new IllegalArgumentException(
- "BuildConfig must contain valid metadata");
+ throw new IllegalArgumentException("BuildConfig must contain valid metadata");
}
putJobWithBuildConfigNameNamespace(job, meta.getName(), meta.getNamespace());
}
- static void putJobWithBuildConfigNameNamespace(WorkflowJob job,
- String name, String namespace) {
+ static void putJobWithBuildConfigNameNamespace(WorkflowJob job, String name, String namespace) {
if (isBlank(name) || isBlank(namespace)) {
- throw new IllegalArgumentException(
- "BuildConfig name and namespace must not be blank");
+ throw new IllegalArgumentException("BuildConfig name and namespace must not be blank");
}
buildConfigToJobMap.put(OpenShiftUtils.jenkinsJobName(namespace, name), job);
}
@@ -88,16 +82,14 @@ static void removeJobWithBuildConfig(BuildConfig buildConfig) {
}
ObjectMeta meta = buildConfig.getMetadata();
if (meta == null) {
- throw new IllegalArgumentException(
- "BuildConfig must contain valid metadata");
+ throw new IllegalArgumentException("BuildConfig must contain valid metadata");
}
removeJobWithBuildConfigNameNamespace(meta.getName(), meta.getNamespace());
}
static void removeJobWithBuildConfigNameNamespace(String name, String namespace) {
if (isBlank(name) || isBlank(namespace)) {
- throw new IllegalArgumentException(
- "BuildConfig name/namepsace must not be blank");
+ throw new IllegalArgumentException("BuildConfig name/namepsace must not be blank");
}
buildConfigToJobMap.remove(OpenShiftUtils.jenkinsJobName(namespace, name));
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java
deleted file mode 100644
index e8e8a0a0f..000000000
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildConfigWatcher.java
+++ /dev/null
@@ -1,303 +0,0 @@
-/**
- * Copyright (C) 2016 Red Hat, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package io.fabric8.jenkins.openshiftsync;
-
-import com.cloudbees.hudson.plugins.folder.Folder;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import hudson.BulkChange;
-import hudson.model.ItemGroup;
-import hudson.model.Job;
-import hudson.model.ParameterDefinition;
-import hudson.security.ACL;
-import hudson.triggers.SafeTimerTask;
-import hudson.util.XStream2;
-import io.fabric8.kubernetes.client.Watcher.Action;
-import io.fabric8.openshift.api.model.BuildConfig;
-import io.fabric8.openshift.api.model.BuildConfigList;
-import io.fabric8.openshift.api.model.BuildList;
-import jenkins.model.Jenkins;
-import jenkins.security.NotReallyRoleSensitiveCallable;
-import jenkins.util.Timer;
-
-import org.apache.tools.ant.filters.StringInputStream;
-import org.eclipse.jetty.util.ConcurrentHashSet;
-import org.jenkinsci.plugins.workflow.flow.FlowDefinition;
-import org.jenkinsci.plugins.workflow.job.WorkflowJob;
-
-import java.io.InputStream;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig;
-import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.initializeBuildConfigToJobMap;
-import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.removeJobWithBuildConfig;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME;
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.*;
-import static java.util.logging.Level.SEVERE;
-
-/**
- * Watches {@link BuildConfig} objects in OpenShift and for WorkflowJobs we
- * ensure there is a suitable Jenkins Job object defined with the correct
- * configuration
- */
-public class BuildConfigWatcher extends BaseWatcher {
- private final Logger logger = Logger.getLogger(getClass().getName());
-
- // for coordinating between ItemListener.onUpdate and onDeleted both
- // getting called when we delete a job; ID should be combo of namespace
- // and name for BC to properly differentiate; we don't use UUID since
- // when we filter on the ItemListener side the UUID may not be
- // available
- private static final ConcurrentHashSet deletesInProgress = new ConcurrentHashSet();
-
- public static void deleteInProgress(String bcName) {
- deletesInProgress.add(bcName);
- }
-
- public static boolean isDeleteInProgress(String bcID) {
- return deletesInProgress.contains(bcID);
- }
-
- public static void deleteCompleted(String bcID) {
- deletesInProgress.remove(bcID);
- }
-
- @SuppressFBWarnings("EI_EXPOSE_REP2")
- public BuildConfigWatcher(String[] namespaces) {
- super(namespaces);
- }
-
- @Override
- public int getListIntervalInSeconds() {
- return GlobalPluginConfiguration.get().getBuildConfigListInterval();
- }
-
- public Runnable getStartTimerTask() {
- return new SafeTimerTask() {
- @Override
- public void doRun() {
- if (!CredentialsUtils.hasCredentials()) {
- logger.fine("No Openshift Token credential defined.");
- return;
- }
- for (String namespace : namespaces) {
- BuildConfigList buildConfigs = null;
- try {
- logger.fine("listing BuildConfigs resources");
- buildConfigs = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace).list();
- onInitialBuildConfigs(buildConfigs);
- logger.fine("handled BuildConfigs resources");
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e);
- }
- try {
- String resourceVersion = "0";
- if (buildConfigs == null) {
- logger.warning("Unable to get build config list; impacts resource version used for watch");
- } else {
- resourceVersion = buildConfigs.getMetadata().getResourceVersion();
- }
- if (watches.get(namespace) == null) {
- logger.info("creating BuildConfig watch for namespace " + namespace + " and resource version " + resourceVersion);
- addWatch(namespace, getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace).withResourceVersion(resourceVersion).watch(new WatcherCallback(BuildConfigWatcher.this,namespace)));
- }
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to load BuildConfigs: " + e, e);
- }
- }
- // poke the BuildWatcher builds with no BC list and see if we
- // can create job
- // runs for premature builds
- BuildWatcher.flushBuildsWithNoBCList();
- }
- };
- }
-
- public void start() {
- initializeBuildConfigToJobMap();
- logger.info("Now handling startup build configs!!");
- super.start();
-
- }
-
- private void onInitialBuildConfigs(BuildConfigList buildConfigs) {
- if (buildConfigs == null)
- return;
- List items = buildConfigs.getItems();
- if (items != null) {
- for (BuildConfig buildConfig : items) {
- try {
- upsertJob(buildConfig);
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to update job", e);
- }
- }
- }
- }
-
- @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT")
- public void eventReceived(Action action, BuildConfig buildConfig) {
- try {
- switch (action) {
- case ADDED:
- upsertJob(buildConfig);
- break;
- case DELETED:
- deleteEventToJenkinsJob(buildConfig);
- break;
- case MODIFIED:
- modifyEventToJenkinsJob(buildConfig);
- break;
- case ERROR:
- logger.warning("watch for buildconfig " + buildConfig.getMetadata().getName() + " received error event ");
- break;
- default:
- logger.warning("watch for buildconfig " + buildConfig.getMetadata().getName() + " received unknown event " + action);
- break;
- }
- // we employ impersonation here to insure we have "full access";
- // for example, can we actually
- // read in jobs defs for verification? without impersonation here
- // we would get null back when trying to read in the job from disk
- ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() {
- @Override
- public Void call() throws Exception {
- // if bc event came after build events, let's
- // poke the BuildWatcher builds with no BC list to
- // create job
- // runs
- BuildWatcher.flushBuildsWithNoBCList();
- // now, if the build event was lost and never
- // received, builds
- // will stay in
- // new for 5 minutes ... let's launch a background
- // thread to
- // clean them up
- // at a quicker interval than the default 5 minute
- // general build
- // relist function
- if (action == Action.ADDED) {
- Runnable backupBuildQuery = new SafeTimerTask() {
- @Override
- public void doRun() {
- if (!CredentialsUtils.hasCredentials()) {
- logger.fine("No Openshift Token credential defined.");
- return;
- }
- BuildList buildList = getAuthenticatedOpenShiftClient().builds().inNamespace(buildConfig.getMetadata().getNamespace()).withField(OPENSHIFT_BUILD_STATUS_FIELD, BuildPhases.NEW)
- .withLabel(OPENSHIFT_LABELS_BUILD_CONFIG_NAME, buildConfig.getMetadata().getName()).list();
- if (buildList.getItems().size() > 0) {
- logger.info("build backup query for " + buildConfig.getMetadata().getName() + " found new builds");
- BuildWatcher.onInitialBuilds(buildList);
- }
- }
- };
- Timer.get().schedule(backupBuildQuery, 10 * 1000, TimeUnit.MILLISECONDS);
- }
- return null;
- }
- });
- } catch (Exception e) {
- logger.log(Level.WARNING, "Caught: " + e, e);
- }
- }
- @Override
- public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) {
- BuildConfig bc = (BuildConfig)resource;
- eventReceived(action, bc);
- }
-
- private void upsertJob(final BuildConfig buildConfig) throws Exception {
- if (isPipelineStrategyBuildConfig(buildConfig)) {
- // sync on intern of name should guarantee sync on same actual obj
- synchronized (buildConfig.getMetadata().getUid().intern()) {
- ACL.impersonate(ACL.SYSTEM, new JobProcessor(this, buildConfig));
- }
- }
- }
-
- private void modifyEventToJenkinsJob(BuildConfig buildConfig) throws Exception {
- if (isPipelineStrategyBuildConfig(buildConfig)) {
- upsertJob(buildConfig);
- return;
- }
-
- // no longer a Jenkins build so lets delete it if it exists
- deleteEventToJenkinsJob(buildConfig);
- }
-
- // innerDeleteEventToJenkinsJob is the actual delete logic at the heart of
- // deleteEventToJenkinsJob
- // that is either in a sync block or not based on the presence of a BC uid
- private void innerDeleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception {
- final Job job = getJobFromBuildConfig(buildConfig);
- if (job != null) {
- // employ intern of the BC UID to facilitate sync'ing on the same
- // actual object
- synchronized (buildConfig.getMetadata().getUid().intern()) {
- ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() {
- @Override
- public Void call() throws Exception {
- try {
- deleteInProgress(buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName());
- job.delete();
- } finally {
- removeJobWithBuildConfig(buildConfig);
- Jenkins.getActiveInstance().rebuildDependencyGraphAsync();
- deleteCompleted(buildConfig.getMetadata().getNamespace() + buildConfig.getMetadata().getName());
- }
- return null;
- }
- });
- // if the bc has a source secret it is possible it should
- // be deleted as well (called function will cross reference
- // with secret watch)
- CredentialsUtils.deleteSourceCredentials(buildConfig);
- }
-
- }
-
- }
-
- // in response to receiving an openshift delete build config event, this
- // method will drive
- // the clean up of the Jenkins job the build config is mapped one to one
- // with; as part of that
- // clean up it will synchronize with the build event watcher to handle build
- // config
- // delete events and build delete events that arrive concurrently and in a
- // nondeterministic
- // order
- private void deleteEventToJenkinsJob(final BuildConfig buildConfig) throws Exception {
- String bcUid = buildConfig.getMetadata().getUid();
- if (bcUid != null && bcUid.length() > 0) {
- // employ intern of the BC UID to facilitate sync'ing on the same
- // actual object
- bcUid = bcUid.intern();
- synchronized (bcUid) {
- innerDeleteEventToJenkinsJob(buildConfig);
- return;
- }
- }
- // uid should not be null / empty, but just in case, still clean up
- innerDeleteEventToJenkinsJob(buildConfig);
- }
-}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java
index 8f6fe8a4b..a8b034fb4 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildDecisionHandler.java
@@ -77,7 +77,7 @@ public boolean shouldSchedule(Queue.Task p, List actions) {
ParametersAction params = dumpParams(actions);
if (LOGGER.isLoggable(Level.FINE)) {
- LOGGER.fine("ParametersAction: " + params.toString());
+ LOGGER.fine("ParametersAction: " + params);
}
if (params != null && ret != null)
BuildToActionMapper.addParameterAction(ret.getMetadata()
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java
new file mode 100644
index 000000000..30cbd0468
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildInformer.java
@@ -0,0 +1,129 @@
+/**
+ * Copyright (C) 2016 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+import io.fabric8.openshift.api.model.Build;
+import io.fabric8.openshift.api.model.BuildConfig;
+
+public class BuildInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private final static BuildComparator BUILD_COMPARATOR = new BuildComparator();
+ private SharedIndexInformer informer;
+ private String namespace;
+
+ public BuildInformer(String namespace) {
+ this.namespace = namespace;
+ }
+
+ /**
+ * now that listing interval is 5 minutes (used to be 10 seconds), we have seen
+ * timing windows where if the build watch events come before build config watch
+ * events when both are created in a simultaneous fashion, there is an up to 5
+ * minutes delay before the job run gets kicked off started seeing duplicate
+ * builds getting kicked off so quit depending on so moved off of concurrent
+ * hash set to concurrent hash map using namepace/name key
+ */
+ public int getResyncPeriodMilliseconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getBuildListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting Build informer for {} !!" + namespace);
+ LOGGER.debug("Listing Build resources");
+ SharedInformerFactory factory = getInformerFactory().inNamespace(namespace);
+ this.informer = factory.sharedIndexInformerFor(Build.class, getResyncPeriodMilliseconds());
+ this.informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("Build informer started for namespace: {}" + namespace);
+// BuildList list = getOpenshiftClient().builds().inNamespace(namespace).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespace);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+ @Override
+ public void onAdd(Build obj) {
+ LOGGER.debug("Build informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String name = metadata.getName();
+ LOGGER.info("Build informer received add event for: {}" + name);
+ try {
+ BuildManager.addEventToJenkinsJobRun(obj);
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(Build oldObj, Build newObj) {
+ LOGGER.debug("Build informer received update event for: {} to: {}" + oldObj + " " + newObj);
+ if (newObj != null) {
+ String oldRv = oldObj.getMetadata().getResourceVersion();
+ String newRv = newObj.getMetadata().getResourceVersion();
+ LOGGER.info("Build informer received update event for: {} to: {}" + oldRv + " " + newRv);
+ BuildManager.modifyEventToJenkinsJobRun(newObj);
+ }
+ }
+
+ @Override
+ public void onDelete(Build obj, boolean deletedFinalStateUnknown) {
+ LOGGER.info("Build informer received delete event for: {}" + obj);
+ if (obj != null) {
+ try {
+ BuildManager.deleteEventToJenkinsJobRun(obj);
+ } catch (Exception e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+ private static void onInit(List list) {
+ Collections.sort(list, BUILD_COMPARATOR);
+ // We need to sort the builds into their build configs so we can
+ // handle build run policies correctly.
+ Map buildConfigMap = new HashMap<>();
+ Map> buildConfigBuildMap = new HashMap<>(list.size());
+// BuildManager.mapBuildToBuildConfigs(list, buildConfigMap, buildConfigBuildMap);
+// BuildManager.mapBuildsToBuildConfigs(buildConfigBuildMap);
+ BuildManager.reconcileRunsAndBuilds();
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java
similarity index 53%
rename from src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java
rename to src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java
index 5762d1b40..8aebca7c4 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildWatcher.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildManager.java
@@ -15,206 +15,82 @@
*/
package io.fabric8.jenkins.openshiftsync;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import hudson.security.ACL;
-import hudson.triggers.SafeTimerTask;
-import io.fabric8.kubernetes.api.model.OwnerReference;
-import io.fabric8.kubernetes.client.Watcher.Action;
-import io.fabric8.openshift.api.model.Build;
-import io.fabric8.openshift.api.model.BuildConfig;
-import io.fabric8.openshift.api.model.BuildList;
-import io.fabric8.openshift.api.model.BuildStatus;
-import jenkins.model.Jenkins;
-import jenkins.security.NotReallyRoleSensitiveCallable;
-
-import org.apache.commons.lang.StringUtils;
-import org.jenkinsci.plugins.workflow.job.WorkflowJob;
-import org.jenkinsci.plugins.workflow.job.WorkflowRun;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.ConcurrentModificationException;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
import static io.fabric8.jenkins.openshiftsync.Annotations.BUILDCONFIG_NAME;
import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig;
import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfigNameNamespace;
import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED;
import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD;
import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.cancelBuild;
import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.deleteRun;
import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.getJobFromBuild;
import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.handleBuildList;
import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.triggerJob;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancellable;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isCancelled;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.isNew;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.updateOpenShiftBuildPhase;
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAnnotation;
-import static java.util.logging.Level.WARNING;
-
-public class BuildWatcher extends BaseWatcher {
- private static final Logger logger = Logger.getLogger(BuildWatcher.class
- .getName());
-
- // now that listing interval is 5 minutes (used to be 10 seconds), we have
- // seen
- // timing windows where if the build watch events come before build config
- // watch events
- // when both are created in a simultaneous fashion, there is an up to 5
- // minute delay
- // before the job run gets kicked off
- // started seeing duplicate builds getting kicked off so quit depending on
- // so moved off of concurrent hash set to concurrent hash map using
- // namepace/name key
- private static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap();
- @SuppressFBWarnings("EI_EXPOSE_REP2")
- public BuildWatcher(String[] namespaces) {
- super(namespaces);
- }
-
- @Override
- public int getListIntervalInSeconds() {
- return GlobalPluginConfiguration.get().getBuildListInterval();
- }
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.ConcurrentModificationException;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
- @Override
- public Runnable getStartTimerTask() {
- return new SafeTimerTask() {
- @Override
- public void doRun() {
- if (!CredentialsUtils.hasCredentials()) {
- logger.fine("No Openshift Token credential defined.");
- return;
- }
- // prior to finding new builds poke the BuildWatcher builds with
- // no BC list and see if we
- // can create job runs for premature builds we already know
- // about
- BuildWatcher.flushBuildsWithNoBCList();
- for (String namespace : namespaces) {
- BuildList newBuilds = null;
- try {
- logger.fine("listing Build resources");
- newBuilds = getAuthenticatedOpenShiftClient()
- .builds()
- .inNamespace(namespace)
- .withField(OPENSHIFT_BUILD_STATUS_FIELD,
- BuildPhases.NEW).list();
- onInitialBuilds(newBuilds);
- logger.fine("handled Build resources");
- } catch (Exception e) {
- logger.log(Level.SEVERE,
- "Failed to load initial Builds: " + e, e);
- }
- try {
- String resourceVersion = "0";
- if (newBuilds == null) {
- logger.warning("Unable to get build list; impacts resource version used for watch");
- } else {
- resourceVersion = newBuilds.getMetadata()
- .getResourceVersion();
- }
- if (watches.get(namespace) == null) {
- logger.info("creating Build watch for namespace "
- + namespace
- + " and resource version "
- + resourceVersion);
+import org.apache.commons.lang.StringUtils;
+import org.jenkinsci.plugins.workflow.job.WorkflowJob;
+import org.jenkinsci.plugins.workflow.job.WorkflowRun;
- addWatch(namespace, getAuthenticatedOpenShiftClient()
- .builds()
- .inNamespace(namespace)
- .withResourceVersion(
- resourceVersion)
- .watch(new WatcherCallback(
- BuildWatcher.this,
- namespace)));
- }
- } catch (Exception e) {
- logger.log(Level.SEVERE,
- "Failed to load initial Builds: " + e, e);
- }
- }
- reconcileRunsAndBuilds();
- }
- };
- }
+import hudson.security.ACL;
+import io.fabric8.kubernetes.api.model.OwnerReference;
+import io.fabric8.openshift.api.model.Build;
+import io.fabric8.openshift.api.model.BuildConfig;
+import io.fabric8.openshift.api.model.BuildList;
+import io.fabric8.openshift.api.model.BuildStatus;
+import io.fabric8.openshift.client.OpenShiftClient;
+import jenkins.model.Jenkins;
+import jenkins.security.NotReallyRoleSensitiveCallable;
- public void start() {
- BuildToActionMapper.initialize();
- super.start();
- }
+@SuppressWarnings({ "deprecation", "serial" })
+public class BuildManager {
+ private static final Logger logger = Logger.getLogger(BuildManager.class.getName());
- @SuppressFBWarnings("SF_SWITCH_NO_DEFAULT")
- public void eventReceived(Action action, Build build) {
- if (!OpenShiftUtils.isPipelineStrategyBuild(build))
- return;
- try {
- switch (action) {
- case ADDED:
- addEventToJenkinsJobRun(build);
- break;
- case MODIFIED:
- modifyEventToJenkinsJobRun(build);
- break;
- case DELETED:
- deleteEventToJenkinsJobRun(build);
- break;
- case ERROR:
- logger.warning("watch for build " + build.getMetadata().getName() + " received error event ");
- break;
- default:
- logger.warning("watch for build " + build.getMetadata().getName() + " received unknown event " + action);
- break;
- }
- } catch (Exception e) {
- logger.log(WARNING, "Caught: " + e, e);
- }
- }
- @Override
- public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) {
- Build build = (Build)resource;
- eventReceived(action, build);
- }
+ /**
+ * now that listing interval is 5 minutes (used to be 10 seconds), we have seen
+ * timing windows where if the build watch events come before build config watch
+ * events when both are created in a simultaneous fashion, there is an up to 5
+ * minute delay before the job run gets kicked off started seeing duplicate
+ * builds getting kicked off so quit depending on so moved off of concurrent
+ * hash set to concurrent hash map using namepace/name key
+ */
+ protected static final ConcurrentHashMap buildsWithNoBCList = new ConcurrentHashMap();
public static void onInitialBuilds(BuildList buildList) {
if (buildList == null)
return;
List items = buildList.getItems();
if (items != null) {
-
Collections.sort(items, new Comparator() {
@Override
public int compare(Build b1, Build b2) {
if (b1.getMetadata().getAnnotations() == null
- || b1.getMetadata().getAnnotations()
- .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) {
- logger.warning("cannot compare build "
- + b1.getMetadata().getName()
- + " from namespace "
- + b1.getMetadata().getNamespace()
- + ", has bad annotations: "
+ || b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) {
+ logger.warning("cannot compare build " + b1.getMetadata().getName() + " from namespace "
+ + b1.getMetadata().getNamespace() + ", has bad annotations: "
+ b1.getMetadata().getAnnotations());
return 0;
}
if (b2.getMetadata().getAnnotations() == null
- || b2.getMetadata().getAnnotations()
- .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) {
- logger.warning("cannot compare build "
- + b2.getMetadata().getName()
- + " from namespace "
- + b2.getMetadata().getNamespace()
- + ", has bad annotations: "
+ || b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER) == null) {
+ logger.warning("cannot compare build " + b2.getMetadata().getName() + " from namespace "
+ + b2.getMetadata().getNamespace() + ", has bad annotations: "
+ b2.getMetadata().getAnnotations());
return 0;
}
@@ -222,14 +98,10 @@ public int compare(Build b1, Build b2) {
try {
rc = Long.compare(
- Long.parseLong(b1
- .getMetadata()
- .getAnnotations()
- .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)),
- Long.parseLong(b2
- .getMetadata()
- .getAnnotations()
- .get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)));
+ Long.parseLong(
+ b1.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)),
+ Long.parseLong(
+ b2.getMetadata().getAnnotations().get(OPENSHIFT_ANNOTATIONS_BUILD_NUMBER)));
} catch (Throwable t) {
logger.log(Level.FINE, "onInitialBuilds", t);
}
@@ -240,8 +112,7 @@ public int compare(Build b1, Build b2) {
// We need to sort the builds into their build configs so we can
// handle build run policies correctly.
Map buildConfigMap = new HashMap<>();
- Map> buildConfigBuildMap = new HashMap<>(
- items.size());
+ Map> buildConfigBuildMap = new HashMap<>(items.size());
for (Build b : items) {
if (!OpenShiftUtils.isPipelineStrategyBuild(b))
continue;
@@ -253,9 +124,8 @@ public int compare(Build b1, Build b2) {
String bcMapKey = namespace + "/" + buildConfigName;
BuildConfig bc = buildConfigMap.get(bcMapKey);
if (bc == null) {
- bc = getAuthenticatedOpenShiftClient().buildConfigs()
- .inNamespace(namespace).withName(buildConfigName)
- .get();
+ bc = getAuthenticatedOpenShiftClient().buildConfigs().inNamespace(namespace)
+ .withName(buildConfigName).get();
if (bc == null) {
// if the bc is not there via a REST get, then it is not
// going to be, and we are not handling manual creation
@@ -273,8 +143,7 @@ public int compare(Build b1, Build b2) {
}
// Now handle the builds.
- for (Map.Entry> buildConfigBuilds : buildConfigBuildMap
- .entrySet()) {
+ for (Map.Entry> buildConfigBuilds : buildConfigBuildMap.entrySet()) {
BuildConfig bc = buildConfigBuilds.getKey();
if (bc.getMetadata() == null) {
// Should never happen but let's be safe...
@@ -284,21 +153,16 @@ public int compare(Build b1, Build b2) {
if (job == null) {
List builds = buildConfigBuilds.getValue();
for (Build b : builds) {
- logger.info("skipping listed new build "
- + b.getMetadata().getName()
- + " no job at this time");
+ logger.info("skipping listed new build " + b.getMetadata().getName() + " no job at this time");
addBuildToNoBCList(b);
}
continue;
}
- BuildConfigProjectProperty bcp = job
- .getProperty(BuildConfigProjectProperty.class);
+ BuildConfigProjectProperty bcp = job.getProperty(BuildConfigProjectProperty.class);
if (bcp == null) {
List builds = buildConfigBuilds.getValue();
for (Build b : builds) {
- logger.info("skipping listed new build "
- + b.getMetadata().getName()
- + " no prop at this time");
+ logger.info("skipping listed new build " + b.getMetadata().getName() + " no prop at this time");
addBuildToNoBCList(b);
}
continue;
@@ -309,7 +173,7 @@ public int compare(Build b1, Build b2) {
}
}
- private static void modifyEventToJenkinsJobRun(Build build) {
+ static void modifyEventToJenkinsJobRun(Build build) {
BuildStatus status = build.getStatus();
if (status != null && isCancellable(status) && isCancelled(status)) {
WorkflowJob job = getJobFromBuild(build);
@@ -324,8 +188,7 @@ private static void modifyEventToJenkinsJobRun(Build build) {
}
}
- public static boolean addEventToJenkinsJobRun(Build build)
- throws IOException {
+ public static boolean addEventToJenkinsJobRun(Build build) throws IOException {
// should have been caught upstack, but just in case since public method
if (!OpenShiftUtils.isPipelineStrategyBuild(build))
return false;
@@ -344,36 +207,34 @@ public static boolean addEventToJenkinsJobRun(Build build)
if (job != null) {
return triggerJob(job, build);
}
- logger.info("skipping watch event for build "
- + build.getMetadata().getName() + " no job at this time");
+ logger.info("skipping watch event for build " + build.getMetadata().getName() + " no job at this time");
addBuildToNoBCList(build);
return false;
}
- private static void addBuildToNoBCList(Build build) {
+ static void addBuildToNoBCList(Build build) {
// should have been caught upstack, but just in case since public method
if (!OpenShiftUtils.isPipelineStrategyBuild(build))
return;
try {
- buildsWithNoBCList.put(build.getMetadata().getNamespace()+build.getMetadata().getName(), build);
- } catch (ConcurrentModificationException | IllegalArgumentException |
- UnsupportedOperationException | NullPointerException e) {
- logger.log(Level.WARNING,"Failed to add item " +
- build.getMetadata().getName(), e);
+ buildsWithNoBCList.put(build.getMetadata().getNamespace() + build.getMetadata().getName(), build);
+ } catch (ConcurrentModificationException | IllegalArgumentException | UnsupportedOperationException
+ | NullPointerException e) {
+ logger.log(Level.WARNING, "Failed to add item " + build.getMetadata().getName(), e);
}
}
- private static void removeBuildFromNoBCList(Build build) {
- buildsWithNoBCList.remove(build.getMetadata().getNamespace()+build.getMetadata().getName());
+ static void removeBuildFromNoBCList(Build build) {
+ buildsWithNoBCList.remove(build.getMetadata().getNamespace() + build.getMetadata().getName());
}
// trigger any builds whose watch events arrived before the
// corresponding build config watch events
public static void flushBuildsWithNoBCList() {
-
- ConcurrentHashMap clone = null;
- synchronized(buildsWithNoBCList) {
- clone = new ConcurrentHashMap(buildsWithNoBCList);
+
+ ConcurrentHashMap clone = null;
+ synchronized (buildsWithNoBCList) {
+ clone = new ConcurrentHashMap(buildsWithNoBCList);
}
boolean anyRemoveFailures = false;
for (Build build : clone.values()) {
@@ -386,7 +247,7 @@ public static void flushBuildsWithNoBCList() {
logger.log(Level.WARNING, "flushBuildsWithNoBCList", e);
}
try {
- synchronized(buildsWithNoBCList) {
+ synchronized (buildsWithNoBCList) {
removeBuildFromNoBCList(build);
}
} catch (Throwable t) {
@@ -401,12 +262,12 @@ public static void flushBuildsWithNoBCList() {
logger.log(Level.WARNING, "flushBuildsWithNoBCList", t);
}
}
-
- synchronized(buildsWithNoBCList) {
+
+ synchronized (buildsWithNoBCList) {
if (anyRemoveFailures && buildsWithNoBCList.size() > 0) {
buildsWithNoBCList.clear();
- }
-
+ }
+
}
}
}
@@ -414,17 +275,15 @@ public static void flushBuildsWithNoBCList() {
// innerDeleteEventToJenkinsJobRun is the actual delete logic at the heart
// of deleteEventToJenkinsJobRun
// that is either in a sync block or not based on the presence of a BC uid
- private static void innerDeleteEventToJenkinsJobRun(
- final Build build) throws Exception {
+ private static void innerDeleteEventToJenkinsJobRun(final Build build) throws Exception {
final WorkflowJob job = getJobFromBuild(build);
if (job != null) {
- ACL.impersonate(ACL.SYSTEM,
- new NotReallyRoleSensitiveCallable() {
- @Override
- public Void call() throws Exception {
- cancelBuild(job, build, true);
- return null;
- }
+ ACL.impersonate(ACL.SYSTEM, new NotReallyRoleSensitiveCallable() {
+ @Override
+ public Void call() throws Exception {
+ cancelBuild(job, build, true);
+ return null;
+ }
});
} else {
// in case build was created and deleted quickly, prior to seeing BC
@@ -443,14 +302,11 @@ public Void call() throws Exception {
// delete events and build delete events that arrive concurrently and in a
// nondeterministic
// order
- private static void deleteEventToJenkinsJobRun(
- final Build build) throws Exception {
- List ownerRefs = build.getMetadata()
- .getOwnerReferences();
+ static void deleteEventToJenkinsJobRun(final Build build) throws Exception {
+ List ownerRefs = build.getMetadata().getOwnerReferences();
String bcUid = null;
for (OwnerReference ref : ownerRefs) {
- if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null
- && ref.getUid().length() > 0) {
+ if ("BuildConfig".equals(ref.getKind()) && ref.getUid() != null && ref.getUid().length() > 0) {
// employ intern to facilitate sync'ing on the same actual
// object
bcUid = ref.getUid().intern();
@@ -458,7 +314,7 @@ private static void deleteEventToJenkinsJobRun(
// if entire job already deleted via bc delete, just return
if (getJobFromBuildConfigNameNamespace(getAnnotation(build, BUILDCONFIG_NAME),
build.getMetadata().getNamespace()) == null) {
- return;
+ return;
}
innerDeleteEventToJenkinsJobRun(build);
return;
@@ -470,41 +326,40 @@ private static void deleteEventToJenkinsJobRun(
innerDeleteEventToJenkinsJobRun(build);
}
- /**
- * Reconciles Jenkins job runs and OpenShift builds
- *
- * Deletes all job runs that do not have an associated build in OpenShift
- */
- private static void reconcileRunsAndBuilds() {
- logger.info("Reconciling job runs and builds");
-
- List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class);
-
- for (WorkflowJob job : jobs) {
- BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class);
- if (property == null || StringUtils.isBlank(property.getNamespace()) || StringUtils.isBlank(property.getName())) {
- continue;
- }
-
- logger.info("Checking job " + job.toString() + " runs for BuildConfig " + property.getNamespace() + "/" + property.getName());
-
- BuildList buildList = getAuthenticatedOpenShiftClient().builds()
- .inNamespace(property.getNamespace()).withLabel("buildconfig=" + property.getName()).list();
+ /**
+ * Reconciles Jenkins job runs and OpenShift builds
+ *
+ * Deletes all job runs that do not have an associated build in OpenShift
+ */
+ static void reconcileRunsAndBuilds() {
+ logger.fine("Reconciling job runs and builds");
+ List jobs = Jenkins.getActiveInstance().getAllItems(WorkflowJob.class);
+ for (WorkflowJob job : jobs) {
+ BuildConfigProjectProperty property = job.getProperty(BuildConfigProjectProperty.class);
+ if (property != null) {
+ String ns = property.getNamespace();
+ String name = property.getName();
+ if (StringUtils.isNotBlank(ns) && StringUtils.isNotBlank(name)) {
+ logger.fine("Checking job " + job + " runs for BuildConfig " + ns + "/" + name);
+ OpenShiftClient client = getAuthenticatedOpenShiftClient();
+ BuildList builds = client.builds().inNamespace(ns).withLabel("buildconfig=" + name).list();
+ for (WorkflowRun run : job.getBuilds()) {
+ boolean found = false;
+ BuildCause cause = run.getCause(BuildCause.class);
+ for (Build build : builds.getItems()) {
+ if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ deleteRun(run);
+ }
+ }
+ }
+ }
- for (WorkflowRun run : job.getBuilds()) {
- boolean found = false;
- BuildCause cause = run.getCause(BuildCause.class);
- for (Build build : buildList.getItems()) {
- if (cause != null && cause.getUid().equals(build.getMetadata().getUid())) {
- found = true;
- break;
- }
- }
- if (!found) {
- deleteRun(run);
}
- }
}
- }
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java
index c5246b4f6..c4d875f9e 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildSyncRunListener.java
@@ -15,6 +15,50 @@
*/
package io.fabric8.jenkins.openshiftsync;
+import static hudson.model.Result.ABORTED;
+import static hudson.model.Result.FAILURE;
+import static hudson.model.Result.SUCCESS;
+import static hudson.model.Result.UNSTABLE;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.CANCELLED;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.COMPLETE;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.FAILED;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON;
+import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.maybeScheduleNext;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.formatTimestamp;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
+import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
+
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.Method;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import javax.annotation.Nonnull;
+
+import org.apache.commons.httpclient.HttpStatus;
+import org.jenkinsci.plugins.workflow.job.WorkflowRun;
+import org.jenkinsci.plugins.workflow.support.steps.input.InputAction;
+import org.jenkinsci.plugins.workflow.support.steps.input.InputStepExecution;
+import org.kohsuke.stapler.DataBoundConstructor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
import com.cloudbees.workflow.rest.external.AtomFlowNodeExt;
import com.cloudbees.workflow.rest.external.FlowNodeExt;
import com.cloudbees.workflow.rest.external.PendingInputActionsExt;
@@ -33,8 +77,7 @@
import hudson.triggers.SafeTimerTask;
import io.fabric8.kubernetes.client.KubernetesClientException;
import io.fabric8.openshift.api.model.Build;
-import io.fabric8.openshift.api.model.BuildFluent;
-import io.fabric8.openshift.api.model.DoneableBuild;
+import io.fabric8.openshift.api.model.BuildBuilder;
import io.jenkins.blueocean.rest.factory.BlueRunFactory;
import io.jenkins.blueocean.rest.model.BluePipelineNode;
import io.jenkins.blueocean.rest.model.BlueRun;
@@ -42,52 +85,18 @@
import jenkins.model.Jenkins;
import jenkins.util.Timer;
-import org.apache.commons.httpclient.HttpStatus;
-import org.jenkinsci.plugins.workflow.job.WorkflowRun;
-import org.jenkinsci.plugins.workflow.support.steps.input.InputAction;
-import org.jenkinsci.plugins.workflow.support.steps.input.InputStepExecution;
-import org.kohsuke.stapler.DataBoundConstructor;
-
-import javax.annotation.Nonnull;
-
-import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Method;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON;
-import static io.fabric8.jenkins.openshiftsync.JenkinsUtils.maybeScheduleNext;
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.formatTimestamp;
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
-import static java.net.HttpURLConnection.HTTP_NOT_FOUND;
-import static java.util.logging.Level.FINE;
-import static java.util.logging.Level.SEVERE;
-import static java.util.logging.Level.WARNING;
-
/**
* Listens to Jenkins Job build {@link Run} start and stop then ensure there's a
* suitable {@link Build} object in OpenShift thats updated correctly with the
* current status, logsURL and metrics
*/
@Extension
+@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" })
public class BuildSyncRunListener extends RunListener {
- private static final Logger logger = Logger
- .getLogger(BuildSyncRunListener.class.getName());
+ private static final String KUBERNETES_NAMESPACE = "KUBERNETES_NAMESPACE";
+ private static final Logger logger = LoggerFactory.getLogger(BuildSyncRunListener.class.getName());
- private long pollPeriodMs = 1000 * 5; // 5 seconds
+ private long pollPeriodMs = 1000 * 5; // 5 seconds
private long delayPollPeriodMs = 1000; // 1 seconds
private static final long maxDelay = 30000;
@@ -104,14 +113,13 @@ public BuildSyncRunListener(long pollPeriodMs) {
}
/**
- * Joins all the given strings, ignoring nulls so that they form a URL with
- * / between the paths without a // if the previous path ends with / and the
- * next path starts with / unless a path item is blank
+ * Joins all the given strings, ignoring nulls so that they form a URL with /
+ * between the paths without a // if the previous path ends with / and the next
+ * path starts with / unless a path item is blank
*
- * @param strings
- * the sequence of strings to join
- * @return the strings concatenated together with / while avoiding a double
- * // between non blank strings.
+ * @param strings the sequence of strings to join
+ * @return the strings concatenated together with / while avoiding a double //
+ * between non blank strings.
*/
public static String joinPaths(String... strings) {
StringBuilder sb = new StringBuilder();
@@ -124,29 +132,30 @@ public static String joinPaths(String... strings) {
String joined = sb.toString();
// And normalize it...
- return joined.replaceAll("/+", "/").replaceAll("/\\?", "?")
- .replaceAll("/#", "#").replaceAll(":/", "://");
+ return joined.replaceAll("/+", "/").replaceAll("/\\?", "?").replaceAll("/#", "#").replaceAll(":/", "://");
}
@Override
public void onStarted(Run run, TaskListener listener) {
+ logger.info("Run started: " + run.getFullDisplayName());
if (shouldPollRun(run)) {
+ logger.info("Processing run: " + run.getDisplayName());
try {
BuildCause cause = (BuildCause) run.getCause(BuildCause.class);
+ logger.info("Build cause for the run is: " + cause);
if (cause != null) {
// TODO This should be a link to the OpenShift console.
run.setDescription(cause.getShortDescription());
}
} catch (IOException e) {
- logger.log(WARNING, "Cannot set build description: " + e);
+ logger.warn("Cannot set build description: " + e);
}
if (runsToPoll.add(run)) {
logger.info("starting polling build " + run.getUrl());
}
checkTimerStarted();
} else {
- logger.fine("not polling polling build " + run.getUrl()
- + " as its not a WorkflowJob");
+ logger.info("Not polling polling build " + run.getUrl() + " as its not a WorkflowJob");
}
super.onStarted(run, listener);
}
@@ -159,8 +168,7 @@ protected void doRun() throws Exception {
pollLoop();
}
};
- Timer.get().scheduleAtFixedRate(task, delayPollPeriodMs, pollPeriodMs,
- TimeUnit.MILLISECONDS);
+ Timer.get().scheduleAtFixedRate(task, delayPollPeriodMs, pollPeriodMs, TimeUnit.MILLISECONDS);
}
}
@@ -191,7 +199,8 @@ public void onFinalized(Run run) {
if (shouldPollRun(run)) {
runsToPoll.remove(run);
pollRun(run);
- logger.info("onFinalized " + run.getUrl());
+ String jenkinsURL = Jenkins.get().getRootUrl();
+ logger.info("Run COMPLETED: Build details can be accessed at: " + jenkinsURL + run.getUrl());
}
super.onFinalized(run);
}
@@ -219,7 +228,7 @@ protected void pollRun(Run run) {
// bumped
// by another dependency vs. our bumping it explicitly, I want to
// find out quickly that we need to switch methods again
- logger.log(Level.WARNING, "pollRun", t);
+ logger.warn("pollRun", t);
}
try {
@@ -227,25 +236,22 @@ protected void pollRun(Run run) {
} catch (KubernetesClientException e) {
if (e.getCode() == HttpStatus.SC_UNPROCESSABLE_ENTITY) {
runsToPoll.remove(run);
- logger.log(WARNING, "Cannot update status: {0}", e.getMessage());
+ logger.warn("Cannot update status: {0}", e.getMessage());
return;
}
throw e;
}
}
- private boolean shouldUpdateOpenShiftBuild(BuildCause cause,
- int latestStageNum, int latestNumFlowNodes, StatusExt status) {
+ private boolean shouldUpdateOpenShiftBuild(BuildCause cause, int latestStageNum, int latestNumFlowNodes,
+ StatusExt status) {
long currTime = TimeUnit.NANOSECONDS.toMillis(System.nanoTime());
- logger.fine(String.format(
+ logger.debug(String.format(
"shouldUpdateOpenShiftBuild curr time %s last update %s curr stage num %s last stage num %s"
+ "curr flow num %s last flow num %s status %s",
- String.valueOf(currTime),
- String.valueOf(cause.getLastUpdateToOpenshift()),
- String.valueOf(latestStageNum),
- String.valueOf(cause.getNumStages()),
- String.valueOf(latestNumFlowNodes),
- String.valueOf(cause.getNumFlowNodes()), status.toString()));
+ String.valueOf(currTime), String.valueOf(cause.getLastUpdateToOpenshift()),
+ String.valueOf(latestStageNum), String.valueOf(cause.getNumStages()),
+ String.valueOf(latestNumFlowNodes), String.valueOf(cause.getNumFlowNodes()), status.toString()));
// if we have not updated in maxDelay time, update
if (currTime > (cause.getLastUpdateToOpenshift() + maxDelay)) {
@@ -263,8 +269,7 @@ private boolean shouldUpdateOpenShiftBuild(BuildCause cause,
}
// if the run is in some sort of terminal state, update
- if (status != StatusExt.IN_PROGRESS &&
- status != StatusExt.PAUSED_PENDING_INPUT) {
+ if (status != StatusExt.IN_PROGRESS && status != StatusExt.PAUSED_PENDING_INPUT) {
return true;
}
@@ -282,10 +287,10 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
}
String namespace = OpenShiftUtils.getNamespacefromPodInputs();
+ String ns = cause.getNamespace();
if (namespace == null)
- namespace = cause.getNamespace();
- String rootUrl = OpenShiftUtils.getJenkinsURL(
- getAuthenticatedOpenShiftClient(), namespace);
+ namespace = ns;
+ String rootUrl = OpenShiftUtils.getJenkinsURL(getAuthenticatedOpenShiftClient(), namespace);
String buildUrl = joinPaths(rootUrl, run.getUrl());
String logsUrl = joinPaths(buildUrl, "/consoleText");
String logsConsoleUrl = joinPaths(buildUrl, "/console");
@@ -309,26 +314,20 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
.loadClass("org.jenkinsci.plugins.blueoceandisplayurl.BlueOceanDisplayURLImpl");
Constructor ctor = weburlbldr.getConstructor();
Object displayURL = ctor.newInstance();
- Method getRunURLMethod = weburlbldr.getMethod(
- "getRunURL", hudson.model.Run.class);
- Object blueOceanURI = getRunURLMethod.invoke(
- displayURL, run);
+ Method getRunURLMethod = weburlbldr.getMethod("getRunURL", hudson.model.Run.class);
+ Object blueOceanURI = getRunURLMethod.invoke(displayURL, run);
logsBlueOceanUrl = blueOceanURI.toString();
- logsBlueOceanUrl = logsBlueOceanUrl.replaceAll(
- "http://unconfigured-jenkins-location/", "");
- if (logsBlueOceanUrl.startsWith("http://")
- || logsBlueOceanUrl.startsWith("https://"))
+ logsBlueOceanUrl = logsBlueOceanUrl.replaceAll("http://unconfigured-jenkins-location/", "");
+ if (logsBlueOceanUrl.startsWith("http://") || logsBlueOceanUrl.startsWith("https://"))
// still normalize string
logsBlueOceanUrl = joinPaths("", logsBlueOceanUrl);
else
- logsBlueOceanUrl = joinPaths(rootUrl,
- logsBlueOceanUrl);
+ logsBlueOceanUrl = joinPaths(rootUrl, logsBlueOceanUrl);
}
}
}
} catch (Throwable t) {
- if (logger.isLoggable(Level.FINE))
- logger.log(Level.FINE, "upsertBuild", t);
+ logger.error("upsertBuild", t);
}
Map blueRunResults = new HashMap();
@@ -343,8 +342,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
}
boolean pendingInput = false;
if (!wfRunExt.get_links().self.href.matches("^https?://.*$")) {
- wfRunExt.get_links().self.setHref(joinPaths(rootUrl,
- wfRunExt.get_links().self.href));
+ wfRunExt.get_links().self.setHref(joinPaths(rootUrl, wfRunExt.get_links().self.href));
}
int newNumStages = wfRunExt.getStages().size();
int newNumFlowNodes = 0;
@@ -356,9 +354,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
// we leverage the blue ocean state machine to determine this
BlueRunResult result = blueRunResults.get(stage.getName());
if (result != null && result == BlueRunResult.NOT_BUILT) {
- logger.info("skipping stage "
- + stage.getName()
- + " for the status JSON for pipeline run "
+ logger.info("skipping stage " + stage.getName() + " for the status JSON for pipeline run "
+ run.getDisplayName()
+ " because it was not executed (most likely because of a failure in another stage)");
continue;
@@ -369,22 +365,17 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
if (!links.self.href.matches("^https?://.*$")) {
links.self.setHref(joinPaths(rootUrl, links.self.href));
}
- if (links.getLog() != null
- && !links.getLog().href.matches("^https?://.*$")) {
+ if (links.getLog() != null && !links.getLog().href.matches("^https?://.*$")) {
links.getLog().setHref(joinPaths(rootUrl, links.getLog().href));
}
- newNumFlowNodes = newNumFlowNodes
- + stage.getStageFlowNodes().size();
+ newNumFlowNodes = newNumFlowNodes + stage.getStageFlowNodes().size();
for (AtomFlowNodeExt node : stage.getStageFlowNodes()) {
FlowNodeExt.FlowNodeLinks nodeLinks = node.get_links();
if (!nodeLinks.self.href.matches("^https?://.*$")) {
- nodeLinks.self.setHref(joinPaths(rootUrl,
- nodeLinks.self.href));
+ nodeLinks.self.setHref(joinPaths(rootUrl, nodeLinks.self.href));
}
- if (nodeLinks.getLog() != null
- && !nodeLinks.getLog().href.matches("^https?://.*$")) {
- nodeLinks.getLog().setHref(
- joinPaths(rootUrl, nodeLinks.getLog().href));
+ if (nodeLinks.getLog() != null && !nodeLinks.getLog().href.matches("^https?://.*$")) {
+ nodeLinks.getLog().setHref(joinPaths(rootUrl, nodeLinks.getLog().href));
}
}
@@ -396,8 +387,8 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
// override stages in case declarative has fooled base pipeline support
wfRunExt.setStages(validStageList);
- boolean needToUpdate = this.shouldUpdateOpenShiftBuild(cause,
- newNumStages, newNumFlowNodes, wfRunExt.getStatus());
+ boolean needToUpdate = this.shouldUpdateOpenShiftBuild(cause, newNumStages, newNumFlowNodes,
+ wfRunExt.getStatus());
if (!needToUpdate) {
return;
}
@@ -406,13 +397,13 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
try {
json = new ObjectMapper().writeValueAsString(wfRunExt);
} catch (JsonProcessingException e) {
- logger.log(SEVERE, "Failed to serialize workflow run. " + e, e);
+ logger.error("Failed to serialize workflow run. " + e, e);
return;
}
- String pendingActionsJson = null;
+ String pendingActions = null;
if (pendingInput && run instanceof WorkflowRun) {
- pendingActionsJson = getPendingActionsJson((WorkflowRun) run);
+ pendingActions = getPendingActionsJson((WorkflowRun) run);
}
String phase = runToBuildPhase(run);
@@ -429,42 +420,31 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
}
}
- logger.log(FINE, "Patching build {0}/{1}: setting phase to {2}",
- new Object[] { cause.getNamespace(), cause.getName(), phase });
+ String name = cause.getName();
+ logger.debug("Patching build {0}/{1}: setting phase to {2}", new Object[] { ns, name, phase });
try {
- BuildFluent.MetadataNested builder = getAuthenticatedOpenShiftClient()
- .builds()
- .inNamespace(cause.getNamespace())
- .withName(cause.getName())
- .edit()
- .editMetadata()
- .addToAnnotations(
- OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON, json)
- .addToAnnotations(OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI,
- buildUrl)
- .addToAnnotations(OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL,
- logsUrl)
- .addToAnnotations(
- Constants.OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL,
- logsConsoleUrl)
- .addToAnnotations(
- Constants.OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL,
- logsBlueOceanUrl);
-
- String jenkinsNamespace = System.getenv("KUBERNETES_NAMESPACE");
+
+ Map annotations = new HashMap();
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_STATUS_JSON, json);
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_BUILD_URI, buildUrl);
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_LOG_URL, logsUrl);
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_CONSOLE_LOG_URL, logsConsoleUrl);
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_BLUEOCEAN_LOG_URL, logsBlueOceanUrl);
+ String jenkinsNamespace = System.getenv(KUBERNETES_NAMESPACE);
if (jenkinsNamespace != null && !jenkinsNamespace.isEmpty()) {
- builder.addToAnnotations(
- OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE,
- jenkinsNamespace);
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE, jenkinsNamespace);
}
- if (pendingActionsJson != null && !pendingActionsJson.isEmpty()) {
- builder.addToAnnotations(
- OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON,
- pendingActionsJson);
+ if (pendingActions != null && !pendingActions.isEmpty()) {
+ annotations.put(OPENSHIFT_ANNOTATIONS_JENKINS_PENDING_INPUT_ACTION_JSON, pendingActions);
}
- builder.endMetadata().editStatus().withPhase(phase)
- .withStartTimestamp(startTime)
- .withCompletionTimestamp(completionTime).endStatus().done();
+ final String finalStartTime = startTime;
+ final String finalCompletionTime = completionTime;
+ logger.info("Setting build status values to: {}:[ {} ]: {}->{}", name, phase, startTime, completionTime);
+ logger.debug("Setting build annotations values to: {} ]", annotations);
+ getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name)
+ .edit(b -> new BuildBuilder(b).editMetadata().withAnnotations(annotations).endMetadata()
+ .editStatus().withPhase(phase).withStartTimestamp(finalStartTime)
+ .withCompletionTimestamp(finalCompletionTime).endStatus().build());
} catch (KubernetesClientException e) {
if (HTTP_NOT_FOUND == e.getCode()) {
runsToPoll.remove(run);
@@ -475,8 +455,7 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
cause.setNumFlowNodes(newNumFlowNodes);
cause.setNumStages(newNumStages);
- cause.setLastUpdateToOpenshift(TimeUnit.NANOSECONDS.toMillis(System
- .nanoTime()));
+ cause.setLastUpdateToOpenshift(TimeUnit.NANOSECONDS.toMillis(System.nanoTime()));
}
// annotate the Build with pending input JSON so consoles can do the
@@ -484,25 +463,24 @@ private void upsertBuild(Run run, RunExt wfRunExt, BlueRun blueRun) {
private String getPendingActionsJson(WorkflowRun run) {
List pendingInputActions = new ArrayList();
InputAction inputAction = run.getAction(InputAction.class);
-List executions = null;
+ List executions = null;
if (inputAction != null) {
- try {
- executions = inputAction.getExecutions();
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to get Excecutions:" + e, e);
- return null;
- }
+ try {
+ executions = inputAction.getExecutions();
+ } catch (Exception e) {
+ logger.error("Failed to get Excecutions:" + e, e);
+ return null;
+ }
if (executions != null && !executions.isEmpty()) {
for (InputStepExecution inputStepExecution : executions) {
- pendingInputActions.add(PendingInputActionsExt.create(
- inputStepExecution, run));
+ pendingInputActions.add(PendingInputActionsExt.create(inputStepExecution, run));
}
}
}
try {
return new ObjectMapper().writeValueAsString(pendingInputActions);
} catch (JsonProcessingException e) {
- logger.log(SEVERE, "Failed to serialize pending actions. " + e, e);
+ logger.error("Failed to serialize pending actions. " + e, e);
return null;
}
}
@@ -518,37 +496,35 @@ private long getDuration(Run run) {
private String runToBuildPhase(Run run) {
if (run != null && !run.hasntStartedYet()) {
if (run.isBuilding()) {
- return BuildPhases.RUNNING;
+ return RUNNING;
} else {
Result result = run.getResult();
if (result != null) {
- if (result.equals(Result.SUCCESS)) {
- return BuildPhases.COMPLETE;
- } else if (result.equals(Result.ABORTED)) {
- return BuildPhases.CANCELLED;
- } else if (result.equals(Result.FAILURE)) {
- return BuildPhases.FAILED;
- } else if (result.equals(Result.UNSTABLE)) {
- return BuildPhases.FAILED;
+ if (result.equals(SUCCESS)) {
+ return COMPLETE;
+ } else if (result.equals(ABORTED)) {
+ return CANCELLED;
+ } else if (result.equals(FAILURE)) {
+ return FAILED;
+ } else if (result.equals(UNSTABLE)) {
+ return FAILED;
} else {
- return BuildPhases.PENDING;
+ return PENDING;
}
}
}
}
- return BuildPhases.NEW;
+ return NEW;
}
/**
* Returns true if we should poll the status of this run
*
- * @param run
- * the Run to test against
+ * @param run the Run to test against
* @return true if the should poll the status of this build run
*/
protected boolean shouldPollRun(Run run) {
- return run instanceof WorkflowRun
- && run.getCause(BuildCause.class) != null
+ return run instanceof WorkflowRun && run.getCause(BuildCause.class) != null
&& GlobalPluginConfiguration.get().isEnabled();
}
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java
index 9708aa300..168666b06 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/BuildToActionMapper.java
@@ -23,8 +23,8 @@
public class BuildToActionMapper {
- private static Map buildToParametersMap;
- private static Map buildToCauseMap;
+ private static Map buildToParametersMap = new ConcurrentHashMap();;
+ private static Map buildToCauseMap = new ConcurrentHashMap();
private BuildToActionMapper() {
}
@@ -38,8 +38,7 @@ static synchronized void initialize() {
}
}
- static synchronized void addParameterAction(String buildId,
- ParametersAction params) {
+ static synchronized void addParameterAction(String buildId, ParametersAction params) {
buildToParametersMap.put(buildId, params);
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java
new file mode 100644
index 000000000..0ba8c6c3b
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapClusterInformer.java
@@ -0,0 +1,146 @@
+/**
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.CONFIGMAP;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+
+public class ConfigMapClusterInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private SharedIndexInformer informer;
+ private Set namespaces;
+
+ public ConfigMapClusterInformer(String[] namespaces) {
+ this.namespaces = new HashSet<>(Arrays.asList(namespaces));
+ }
+
+ public int getListIntervalInSeconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getConfigMapListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting cluster wide configMap informer for {} !!" + namespaces);
+ LOGGER.debug("listing ConfigMap resources");
+ SharedInformerFactory factory = getInformerFactory();
+ this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getListIntervalInSeconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("ConfigMap informer started for namespaces: {}" + namespaces);
+// ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespaces);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+ @Override
+ public void onAdd(ConfigMap obj) {
+ LOGGER.debug("ConfigMap informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = metadata.getName();
+ LOGGER.info("ConfigMap informer received add event for: {}" + name);
+ List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj);
+ String uid = metadata.getUid();
+ PodTemplateUtils.addAgents(podTemplates, CONFIGMAP, uid, name, namespace);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(ConfigMap oldObj, ConfigMap newObj) {
+ LOGGER.debug("ConfigMap informer received update event for: {} to: {}" + oldObj + newObj);
+ if (oldObj != null) {
+ ObjectMeta oldMetadata = oldObj.getMetadata();
+ String namespace = oldMetadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String oldRv = oldMetadata != null ? oldMetadata.getResourceVersion() : null;
+ ObjectMeta newMetadata = newObj.getMetadata();
+ String newResourceVersion = newMetadata != null ? newMetadata.getResourceVersion() : null;
+ LOGGER.info("Update event received resource versions: {} to: {}" + oldRv + newResourceVersion);
+ List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(newObj);
+ ObjectMeta metadata = newMetadata;
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ LOGGER.info("ConfigMap informer received update event for: {}", name);
+ PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ @Override
+ public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) {
+ LOGGER.debug("ConfigMap informer received delete event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj);
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ PodTemplateUtils.deleteAgents(podTemplates, CONFIGMAP, uid, name, namespace);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ private void onInit(List list) {
+ if (list != null) {
+ for (ConfigMap configMap : list) {
+ PodTemplateUtils.addPodTemplateFromConfigMap(configMap);
+ }
+ }
+ }
+
+ private void waitInformerSync(SharedIndexInformer informer) {
+ while (!informer.hasSynced()) {
+ LOGGER.info("Waiting informer to sync for " + namespaces);
+ try {
+ TimeUnit.SECONDS.sleep(5);
+ } catch (InterruptedException e) {
+ LOGGER.info("Interrupted waiting thread: " + e);
+ }
+ }
+ }
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java
new file mode 100644
index 000000000..591b85367
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapInformer.java
@@ -0,0 +1,131 @@
+/**
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.CONFIGMAP;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+
+import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+
+public class ConfigMapInformer implements ResourceEventHandler, Lifecyclable,Resyncable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private SharedIndexInformer informer;
+ private String namespace;
+
+ public ConfigMapInformer(String namespace) {
+ this.namespace = namespace;
+ }
+
+ @Override
+ public long getResyncPeriodMilliseconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getConfigMapListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting configMap informer for {} !!" + namespace);
+ LOGGER.debug("listing ConfigMap resources");
+ SharedInformerFactory factory = getInformerFactory().inNamespace(namespace);
+ this.informer = factory.sharedIndexInformerFor(ConfigMap.class, getResyncPeriodMilliseconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("ConfigMap informer started for namespace: {}" + namespace);
+// ConfigMapList list = getOpenshiftClient().configMaps().inNamespace(namespace).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespace);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+
+ @Override
+ public void onAdd(ConfigMap obj) {
+ LOGGER.debug("ConfigMap informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String name = metadata.getName();
+ LOGGER.info("ConfigMap informer received add event for: {}" + name);
+ List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj);
+ String uid = metadata.getUid();
+ String namespace = metadata.getNamespace();
+ PodTemplateUtils.addAgents(podTemplates, CONFIGMAP, uid, name, namespace);
+ }
+ }
+
+ @Override
+ public void onUpdate(ConfigMap oldObj, ConfigMap newObj) {
+ LOGGER.debug("ConfigMap informer received update event for: {} to: {}" + oldObj + newObj);
+ if (oldObj != null) {
+ String oldResourceVersion = oldObj.getMetadata() != null ? oldObj.getMetadata().getResourceVersion() : null;
+ String newResourceVersion = newObj.getMetadata() != null ? newObj.getMetadata().getResourceVersion() : null;
+ LOGGER.info("Update event received resource versions: {} to: {}" + oldResourceVersion + newResourceVersion);
+ List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(newObj);
+ ObjectMeta metadata = newObj.getMetadata();
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ String namespace = metadata.getNamespace();
+ LOGGER.info("ConfigMap informer received update event for: {}", name);
+ PodTemplateUtils.updateAgents(podTemplates, CONFIGMAP, uid, name, namespace);
+ }
+ }
+
+ @Override
+ public void onDelete(ConfigMap obj, boolean deletedFinalStateUnknown) {
+ LOGGER.debug("ConfigMap informer received delete event for: {}" + obj);
+ if (obj != null) {
+ List podTemplates = PodTemplateUtils.podTemplatesFromConfigMap(obj);
+ ObjectMeta metadata = obj.getMetadata();
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ String namespace = metadata.getNamespace();
+ PodTemplateUtils.deleteAgents(podTemplates, CONFIGMAP, uid, name, namespace);
+ }
+ }
+
+ private void onInit(List list) {
+ if (list != null) {
+ for (ConfigMap configMap : list) {
+ PodTemplateUtils.addPodTemplateFromConfigMap(configMap);
+ }
+ }
+ }
+
+ private void waitInformerSync(SharedIndexInformer informer) {
+ while (!informer.hasSynced()) {
+ LOGGER.info("Waiting informer to sync for " + namespace);
+ try {
+ TimeUnit.SECONDS.sleep(5);
+ } catch (InterruptedException e) {
+ LOGGER.info("Interrupted waiting thread: " + e);
+ }
+ }
+ }
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java
deleted file mode 100644
index 10b99d86f..000000000
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/ConfigMapWatcher.java
+++ /dev/null
@@ -1,159 +0,0 @@
-/**
- * Copyright (C) 2017 Red Hat, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package io.fabric8.jenkins.openshiftsync;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import hudson.triggers.SafeTimerTask;
-import io.fabric8.kubernetes.api.model.ConfigMap;
-import io.fabric8.kubernetes.api.model.ConfigMapList;
-import io.fabric8.kubernetes.client.Watcher.Action;
-
-import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
-
-import java.util.List;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.logging.Logger;
-
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
-import static java.util.logging.Level.SEVERE;
-import static java.util.logging.Level.WARNING;
-
-public class ConfigMapWatcher extends BaseWatcher {
- private final Logger LOGGER = Logger.getLogger(getClass().getName());
-
- @SuppressFBWarnings("EI_EXPOSE_REP2")
- public ConfigMapWatcher(String[] namespaces) {
- super(namespaces);
- }
-
- @Override
- public int getListIntervalInSeconds() {
- return GlobalPluginConfiguration.get().getConfigMapListInterval();
- }
-
- public Runnable getStartTimerTask() {
- return new SafeTimerTask() {
- @Override
- public void doRun() {
- if (!CredentialsUtils.hasCredentials()) {
- LOGGER.fine("No Openshift Token credential defined.");
- return;
- }
- for (String namespace : namespaces) {
- ConfigMapList configMaps = null;
- try {
- LOGGER.fine("listing ConfigMap resources");
- configMaps = getAuthenticatedOpenShiftClient()
- .configMaps().inNamespace(namespace).list();
- onInitialConfigMaps(configMaps);
- LOGGER.fine("handled ConfigMap resources");
- } catch (Exception e) {
- LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e);
- }
- try {
- String resourceVersion = "0";
- if (configMaps == null) {
- LOGGER.warning("Unable to get config map list; impacts resource version used for watch");
- } else {
- resourceVersion = configMaps.getMetadata().getResourceVersion();
- }
- if (watches.get(namespace) == null) {
- LOGGER.info("creating ConfigMap watch for namespace "
- + namespace
- + " and resource version "
- + resourceVersion);
- addWatch(namespace,
- getAuthenticatedOpenShiftClient()
- .configMaps()
- .inNamespace(namespace)
- .withResourceVersion(resourceVersion).watch(new WatcherCallback(ConfigMapWatcher.this,namespace)));
- }
- } catch (Exception e) {
- LOGGER.log(SEVERE, "Failed to load ConfigMaps: " + e, e);
- }
- }
- }
- };
- }
-
- public void start() {
- super.start();
- // lets process the initial state
- LOGGER.info("Now handling startup config maps!!");
- }
-
- public void eventReceived(Action action, ConfigMap configMap) {
- try {
- List slavesFromCM = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap);
- boolean hasSlaves = slavesFromCM.size() > 0;
- String uid = configMap.getMetadata().getUid();
- String cmname = configMap.getMetadata().getName();
- String namespace = configMap.getMetadata().getNamespace();
- switch (action) {
- case ADDED:
- if (hasSlaves) {
- processSlavesForAddEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace);
- }
- break;
- case MODIFIED:
- processSlavesForModifyEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace);
- break;
- case DELETED:
- this.processSlavesForDeleteEvent(slavesFromCM, PodTemplateUtils.cmType, uid, cmname, namespace);
- break;
- case ERROR:
- LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received error event ");
- break;
- default:
- LOGGER.warning("watch for configMap " + configMap.getMetadata().getName() + " received unknown event " + action);
- break;
- }
- } catch (Exception e) {
- LOGGER.log(WARNING, "Caught: " + e, e);
- }
- }
- @Override
- public void eventReceived(io.fabric8.kubernetes.client.Watcher.Action action, T resource) {
- ConfigMap cfgmap = (ConfigMap)resource;
- eventReceived(action, cfgmap);
- }
-
- private void onInitialConfigMaps(ConfigMapList configMaps) {
- if (configMaps == null)
- return;
- if (PodTemplateUtils.trackedPodTemplates == null) {
- PodTemplateUtils.trackedPodTemplates = new ConcurrentHashMap<>(configMaps.getItems().size());
- }
- List items = configMaps.getItems();
- if (items != null) {
- for (ConfigMap configMap : items) {
- try {
- if (PodTemplateUtils.configMapContainsSlave(configMap) && !PodTemplateUtils.trackedPodTemplates.containsKey(configMap.getMetadata().getUid())) {
- List templates = PodTemplateUtils.podTemplatesFromConfigMap(this, configMap);
- PodTemplateUtils.trackedPodTemplates.put(configMap.getMetadata().getUid(), templates);
- for (PodTemplate podTemplate : templates) {
- PodTemplateUtils.addPodTemplate(podTemplate);
- }
- }
- } catch (Exception e) {
- LOGGER.log(SEVERE,
- "Failed to update ConfigMap PodTemplates", e);
- }
- }
- }
- }
-
-}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java b/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java
index dd4874c4d..40ffe890b 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/Constants.java
@@ -15,7 +15,6 @@
*/
package io.fabric8.jenkins.openshiftsync;
-
/**
*/
public class Constants {
@@ -32,9 +31,12 @@ public class Constants {
public static final String OPENSHIFT_ANNOTATIONS_JENKINS_NAMESPACE = "openshift.io/jenkins-namespace";
public static final String OPENSHIFT_LABELS_BUILD_CONFIG_NAME = "openshift.io/build-config.name";
public static final String OPENSHIFT_LABELS_BUILD_CONFIG_GIT_REPOSITORY_NAME = "openshift.io/gitRepository";
- // see PR https://github.com/openshift/jenkins-sync-plugin/pull/189, there was a issue with having "/"
- // in a label we construct a watch over, where usual UTF-8 encoding of the label name (which becomes part of
- // a query param on the REST invocation) was causing okhttp3 to complain (there is even more history/discussion
+ // see PR https://github.com/openshift/jenkins-sync-plugin/pull/189, there was a
+ // issue with having "/"
+ // in a label we construct a watch over, where usual UTF-8 encoding of the label
+ // name (which becomes part of
+ // a query param on the REST invocation) was causing okhttp3 to complain (there
+ // is even more history/discussion
// in the PR as to issues with fixing).
// so we avoid use of "/" for this label
public static final String OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC = "credential.sync.jenkins.openshift.io";
@@ -53,9 +55,11 @@ public class Constants {
public static final String OPENSHIFT_SECRETS_TYPE_OPAQUE = "Opaque";
public static final String OPENSHIFT_BUILD_STATUS_FIELD = "status";
public static final String OPENSHIFT_SECRETS_DATA_CLIENT_TOKEN = "openshift-client-token";
-
+
public static final String OPENSHIFT_PROJECT_ENV_VAR_NAME = "PROJECT_NAME";
public static final String OPENSHIFT_PROJECT_FILE = "/run/secrets/kubernetes.io/serviceaccount/namespace";
+ public static final String IMAGESTREAM_AGENT_LABEL_VALUE = "jenkins-slave";
+ public static final String IMAGESTREAM_AGENT_LABEL = "role";
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java
index 8223a9139..28ab9522a 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/CredentialsUtils.java
@@ -1,10 +1,45 @@
package io.fabric8.jenkins.openshiftsync;
+import static com.cloudbees.plugins.credentials.CredentialsProvider.lookupStores;
+import static com.cloudbees.plugins.credentials.CredentialsScope.GLOBAL;
+import static hudson.Util.fixNull;
+import static hudson.util.Secret.fromString;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_CERTIFICATE;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_CLIENT_TOKEN;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_FILENAME;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_PASSPHRASE;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_PASSWORD;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_SECRET_TEXT;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_SSHPRIVATEKEY;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_DATA_USERNAME;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_TYPE_BASICAUTH;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_TYPE_OPAQUE;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_SECRETS_TYPE_SSH;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static java.util.logging.Level.SEVERE;
+import static java.util.logging.Level.WARNING;
+import static org.apache.commons.lang.StringUtils.isNotBlank;
+
+import java.io.IOException;
+import java.util.Base64;
+import java.util.Base64.Decoder;
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.acegisecurity.context.SecurityContext;
+import org.acegisecurity.context.SecurityContextHolder;
+import org.apache.commons.lang.StringUtils;
+import org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl;
+import org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl;
+
import com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey;
import com.cloudbees.plugins.credentials.Credentials;
import com.cloudbees.plugins.credentials.CredentialsMatchers;
import com.cloudbees.plugins.credentials.CredentialsProvider;
-import com.cloudbees.plugins.credentials.CredentialsScope;
import com.cloudbees.plugins.credentials.CredentialsStore;
import com.cloudbees.plugins.credentials.SecretBytes;
import com.cloudbees.plugins.credentials.domains.Domain;
@@ -13,9 +48,8 @@
import com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
-import com.openshift.jenkins.plugins.OpenShiftTokenCredentials;
+
import hudson.model.Fingerprint;
-import hudson.remoting.Base64;
import hudson.security.ACL;
import io.fabric8.kubernetes.api.model.LocalObjectReference;
import io.fabric8.kubernetes.api.model.ObjectMeta;
@@ -23,120 +57,107 @@
import io.fabric8.openshift.api.model.BuildConfig;
import io.fabric8.openshift.api.model.BuildConfigSpec;
import io.fabric8.openshift.api.model.BuildSource;
+import io.fabric8.openshift.client.OpenShiftClient;
import jenkins.model.Jenkins;
-import org.acegisecurity.context.SecurityContext;
-import org.acegisecurity.context.SecurityContextHolder;
-import org.apache.commons.lang.StringUtils;
-import org.jenkinsci.plugins.plaincredentials.impl.FileCredentialsImpl;
-import org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl;
-
-import java.io.IOException;
-import java.nio.charset.StandardCharsets;
-import java.util.Collections;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import static com.cloudbees.plugins.credentials.CredentialsScope.GLOBAL;
-import static hudson.Util.fixNull;
-import static io.fabric8.jenkins.openshiftsync.Constants.*;
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
-import static java.nio.charset.StandardCharsets.UTF_8;
-import static java.util.logging.Level.WARNING;
-import static org.apache.commons.lang.StringUtils.isNotBlank;
public class CredentialsUtils {
- private final static Logger logger = Logger.getLogger(CredentialsUtils.class.getName());
- public static final String KUBERNETES_SERVICE_ACCOUNT = "Kubernetes Service Account";
- public static ConcurrentHashMap uidToSecretNameMap;
-
-
- public static Secret getSourceCredentials(BuildConfig buildConfig) {
- BuildConfigSpec spec = buildConfig.getSpec();
- if (spec != null) {
- BuildSource source = spec.getSource();
- if (source != null) {
- LocalObjectReference sourceSecret = source.getSourceSecret();
- if (sourceSecret != null) {
- String sourceSecretName = sourceSecret.getName();
- if (sourceSecretName != null && !sourceSecretName.isEmpty()) {
- ObjectMeta buildConfigMetadata = buildConfig.getMetadata();
- String namespace = buildConfigMetadata.getNamespace();
- String buildConfigName = buildConfigMetadata.getName();
- logger.info("Retrieving SourceSecret for BuildConfig " + buildConfigName + " in Namespace " + namespace);
- Secret secret = getAuthenticatedOpenShiftClient().secrets().inNamespace(namespace).withName(sourceSecretName).get();
- if (secret == null) {
- logger.warning("Secret Name provided in BuildConfig " + buildConfigName + " as " + sourceSecretName + " does not exist. " +
- "Please review the BuildConfig and make the necessary changes.");
- } else{
- return secret;
+ private static final String SECRET_TEXT_SECRET_TYPE = "secretText";
+ private static final String FILE_SECRET_TYPE = "filename";
+ private static final String TOKEN_SECRET_TYPE = "token";
+ private static final Decoder DECODER = Base64.getDecoder();
+ private final static Logger logger = Logger.getLogger(CredentialsUtils.class.getName());
+ private final static Map SOURCE_SECRET_TO_CREDS_MAP = new ConcurrentHashMap();
+ public static final String KUBERNETES_SERVICE_ACCOUNT = "Kubernetes Service Account";
+ public final static ConcurrentHashMap UID_TO_SECRET_MAP = new ConcurrentHashMap();
+
+ public static Secret getSourceSecretForBuildConfig(BuildConfig buildConfig) {
+ BuildConfigSpec spec = buildConfig.getSpec();
+ if (spec != null) {
+ BuildSource source = spec.getSource();
+ if (source != null) {
+ LocalObjectReference sourceSecret = source.getSourceSecret();
+ if (sourceSecret != null) {
+ String sourceSecretName = sourceSecret.getName();
+ if (sourceSecretName != null && !sourceSecretName.isEmpty()) {
+ ObjectMeta buildConfigMetadata = buildConfig.getMetadata();
+ String namespace = buildConfigMetadata.getNamespace();
+ String name = buildConfigMetadata.getName();
+ logger.info("Retrieving SourceSecret for BuildConfig " + name + " in Namespace " + namespace);
+ OpenShiftClient client = getAuthenticatedOpenShiftClient();
+ Secret secret = client.secrets().inNamespace(namespace).withName(sourceSecretName).get();
+ if (secret != null) {
+ return secret;
+ } else {
+ String message = "Secret Name provided in BuildConfig " + name + " as " + sourceSecretName;
+ message += " does not exist. Please review the BuildConfig and make the necessary changes.";
+ logger.warning(message);
+ }
+ }
+ }
}
- }
}
- }
- }
return null;
}
public static String updateSourceCredentials(BuildConfig buildConfig) throws IOException {
- String credId = null;
- Secret sourceSecret = getSourceCredentials(buildConfig);
- if (sourceSecret != null) {
+ String credentialsName = null;
+ Secret sourceSecret = getSourceSecretForBuildConfig(buildConfig);
+ if (sourceSecret != null) {
ObjectMeta sourceSecretMetadata = sourceSecret.getMetadata();
- if (sourceSecretMetadata != null){
- String namespace = sourceSecretMetadata.getNamespace();
- String secretName = sourceSecretMetadata.getName();
- ObjectMeta buildConfigMetadata = buildConfig.getMetadata();
- String buildConfigName = buildConfigMetadata.getName();
- credId = upsertCredential(sourceSecret, namespace, secretName);
- if (credId != null) {
- logger.info("Linking BuildConfig sourceSecret "+secretName+" to Jenkins Credential "+credId);
- BuildConfigSecretToCredentialsMap.linkBCSecretToCredential(NamespaceName.create(buildConfig).toString(), credId);
- return credId;
- }else {
- // call delete and remove any credential that fits the
- // project/bcname pattern
- logger.info("Unlinking BuildConfig sourceSecret matching BuildConfig "+buildConfigName);
- credId = BuildConfigSecretToCredentialsMap.unlinkBCSecretToCrendential(NamespaceName.create(buildConfig).toString());
- if (credId != null){
- logger.info("Deleting sourceSecret "+secretName+" in namespace "+namespace);
- deleteCredential(credId, NamespaceName.create(buildConfig), buildConfigMetadata.getResourceVersion());
+ if (sourceSecretMetadata != null) {
+ String namespace = sourceSecretMetadata.getNamespace();
+ String secretName = sourceSecretMetadata.getName();
+ ObjectMeta buildConfigMetadata = buildConfig.getMetadata();
+ String buildConfigName = buildConfigMetadata.getName();
+ credentialsName = insertOrUpdateCredentialsFromSecret(sourceSecret);
+ String buildConfigAsString = NamespaceName.create(buildConfig).toString();
+ if (credentialsName != null) {
+ logger.info("Linking sourceSecret " + secretName + " to Jenkins Credentials " + credentialsName);
+ linkSourceSecretToCredentials(buildConfigAsString, credentialsName);
+ return credentialsName;
+ } else {
+ // call delete and remove any credential that fits the project/bcname pattern
+ logger.info("Unlinking BuildConfig sourceSecret matching BuildConfig " + buildConfigName);
+ credentialsName = unlinkBCSecretToCrendential(buildConfigAsString);
+ if (credentialsName != null) {
+ logger.info("Deleting sourceSecret " + secretName + " in namespace " + namespace);
+ String resourceVersion = buildConfigMetadata.getResourceVersion();
+ deleteCredential(credentialsName, NamespaceName.create(buildConfig), resourceVersion);
+ }
}
- }
}
- }
- return credId;
+ }
+ return credentialsName;
}
public static void deleteSourceCredentials(BuildConfig buildConfig) throws IOException {
- Secret sourceSecret = getSourceCredentials(buildConfig);
+ Secret sourceSecret = getSourceSecretForBuildConfig(buildConfig);
if (sourceSecret != null) {
ObjectMeta metadata = sourceSecret.getMetadata();
if (metadata != null) {
- Map labels = metadata.getLabels();
- if (labels != null) {
- String labelValue =labels.get(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC);
- boolean watching = labelValue != null && labelValue.equalsIgnoreCase(Constants.VALUE_SECRET_SYNC);
- // for a bc delete, if we are watching this secret, do not delete
- // credential until secret is actually deleted
- if (watching)
- return;
- deleteCredential(sourceSecret);
- }
+ Map labels = metadata.getLabels();
+ if (labels != null) {
+ String labelValue = labels.get(Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC);
+ boolean watching = labelValue != null && labelValue.equalsIgnoreCase(Constants.VALUE_SECRET_SYNC);
+ // for a bc delete, if we are watching this secret, do not delete
+ // credential until secret is actually deleted
+ if (watching)
+ return;
+ deleteCredential(sourceSecret);
+ }
}
}
}
-
+
private static String getSecretCustomName(Secret secret) {
ObjectMeta metadata = secret.getMetadata();
if (metadata != null) {
- Map annotations = metadata.getAnnotations();
+ Map annotations = metadata.getAnnotations();
if (annotations != null) {
String secretName = annotations.get(Annotations.SECRET_NAME);
- if (secretName != null){
- return secretName;
+ if (secretName != null) {
+ return secretName;
}
}
}
@@ -145,6 +166,7 @@ private static String getSecretCustomName(Secret secret) {
/**
* Inserts or creates a Jenkins Credential for the given Secret
+ *
* @param secret the secret to insert
* @return the insert secret name
* @throws IOException when the update of the secret fails
@@ -153,74 +175,78 @@ public static String upsertCredential(Secret secret) throws IOException {
if (secret != null) {
ObjectMeta metadata = secret.getMetadata();
if (metadata != null) {
- return upsertCredential(secret, metadata.getNamespace(), metadata.getName());
+ return insertOrUpdateCredentialsFromSecret(secret);
}
}
return null;
}
- private static String upsertCredential(Secret secret, String namespace, String secretName) throws IOException {
- if (uidToSecretNameMap == null){
- uidToSecretNameMap = new ConcurrentHashMap();
- }
- String customSecretName = getSecretCustomName(secret);
+ private static String insertOrUpdateCredentialsFromSecret(Secret secret) throws IOException {
if (secret != null) {
- Credentials creds = secretToCredentials(secret);
- if (creds != null) {
- // checking with updated secret name if custom name is not null
- String id = generateCredentialsName(namespace, secretName, customSecretName);
- Credentials existingCreds = lookupCredentials(id);
- final SecurityContext previousContext = ACL.impersonate(ACL.SYSTEM);
- try {
- CredentialsStore s = CredentialsProvider.lookupStores(Jenkins.getActiveInstance()).iterator().next();
- String originalId = generateCredentialsName(namespace, secretName, null);
- Credentials existingOriginalCreds = lookupCredentials(originalId);
- NamespaceName secretNamespaceName = null;
-
- ObjectMeta metadata = secret.getMetadata();
- String secretUid = metadata.getUid();
- if (!originalId.equals(id)) {
- boolean hasAddedCredential = s.addCredentials(Domain.global(), creds);
- if (!hasAddedCredential) {
- logger.warning("Setting secret failed for secret with new Id " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
- logger.warning("Check if Id "+id+" is not already used.");
- } else {
- String oldId = uidToSecretNameMap.get(secretUid);
- if (oldId != null) {
- Credentials oldCredentials = lookupCredentials(oldId);
- s.removeCredentials(Domain.global(), oldCredentials);
- } else if (existingOriginalCreds != null) {
- s.removeCredentials(Domain.global(), existingOriginalCreds);
- }
- uidToSecretNameMap.put(secretUid, id);
- secretNamespaceName = NamespaceName.create(secret);
- logger.info("Updated credential " + oldId + " with new Id " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
+ String customSecretName = getSecretCustomName(secret);
+ ObjectMeta metadata = secret.getMetadata();
+ String namespace = metadata.getNamespace();
+ String secretName = metadata.getName();
+ Credentials creds = secretToCredentials(secret);
+ if (creds != null) {
+ // checking with updated secret name if custom name is not null
+ String id = generateCredentialsName(namespace, secretName, customSecretName);
+ Credentials existingCreds = lookupCredentials(id);
+ final SecurityContext previousContext = ACL.impersonate(ACL.SYSTEM);
+ try {
+ CredentialsStore creentialsStore = lookupStores(Jenkins.getActiveInstance()).iterator().next();
+ String originalId = generateCredentialsName(namespace, secretName, null);
+ Credentials existingOriginalCreds = lookupCredentials(originalId);
+ NamespaceName secretNamespaceName = null;
+
+ String secretUid = metadata.getUid();
+ if (!originalId.equals(id)) {
+ boolean hasAddedCredential = creentialsStore.addCredentials(Domain.global(), creds);
+ if (!hasAddedCredential) {
+ logger.warning("Setting secret failed for secret with new Id " + id + " from Secret "
+ + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
+ logger.warning("Check if Id " + id + " is not already used.");
+ } else {
+ String oldId = UID_TO_SECRET_MAP.get(secretUid);
+ if (oldId != null) {
+ Credentials oldCredentials = lookupCredentials(oldId);
+ creentialsStore.removeCredentials(Domain.global(), oldCredentials);
+ } else if (existingOriginalCreds != null) {
+ creentialsStore.removeCredentials(Domain.global(), existingOriginalCreds);
+ }
+ UID_TO_SECRET_MAP.put(secretUid, id);
+ secretNamespaceName = NamespaceName.create(secret);
+ logger.info("Updated credential " + oldId + " with new Id " + id + " from Secret "
+ + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
+ }
+ } else {
+ if (existingCreds != null) {
+ creentialsStore.updateCredentials(Domain.global(), existingCreds, creds);
+ UID_TO_SECRET_MAP.put(secretUid, id);
+ secretNamespaceName = NamespaceName.create(secret);
+ logger.info("Updated credential " + id + " from Secret " + secretNamespaceName
+ + " with revision: " + metadata.getResourceVersion());
+ } else {
+ boolean hasAddedCredential = creentialsStore.addCredentials(Domain.global(), creds);
+ if (!hasAddedCredential) {
+ logger.warning("Update failed for secret with new Id " + id + " from Secret "
+ + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
+ } else {
+ UID_TO_SECRET_MAP.put(secretUid, id);
+ secretNamespaceName = NamespaceName.create(secret);
+ logger.info("Created credential " + id + " from Secret " + secretNamespaceName
+ + " with revision: " + metadata.getResourceVersion());
+ }
+ }
+ }
+ creentialsStore.save();
+ } finally {
+ SecurityContextHolder.setContext(previousContext);
}
- } else {
- if (existingCreds != null) {
- s.updateCredentials(Domain.global(), existingCreds, creds);
- uidToSecretNameMap.put(secretUid, id);
- secretNamespaceName = NamespaceName.create(secret);
- logger.info("Updated credential " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
- } else {
- boolean hasAddedCredential = s.addCredentials(Domain.global(), creds);
- if (!hasAddedCredential) {
- logger.warning("Update failed for secret with new Id " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
- } else {
- uidToSecretNameMap.put(secretUid, id);
- secretNamespaceName = NamespaceName.create(secret);
- logger.info("Created credential " + id + " from Secret " + secretNamespaceName + " with revision: " + metadata.getResourceVersion());
- }
+ if (id != null && !id.isEmpty()) {
+ return id;
}
- }
- s.save();
- } finally {
- SecurityContextHolder.setContext(previousContext);
- }
- if (id != null && !id.isEmpty()){
- return id;
}
- }
}
return null;
}
@@ -232,8 +258,10 @@ private static void deleteCredential(String id, NamespaceName name, String resou
try {
Fingerprint fp = CredentialsProvider.getFingerprintOf(existingCred);
if (fp != null && fp.getJobs().size() > 0) {
- // per messages in credentials console, it is not a given but it is possible for job refs to a
- // credential to be tracked ; if so, we will not prevent deletion, but at least note things for
+ // per messages in credentials console, it is not a given but it is possible for
+ // job refs to a
+ // credential to be tracked ; if so, we will not prevent deletion, but at least
+ // note things for
// potential diagnostics
StringBuffer sb = new StringBuffer();
for (String job : fp.getJobs())
@@ -241,23 +269,31 @@ private static void deleteCredential(String id, NamespaceName name, String resou
logger.info("About to delete credential " + id + "which is referenced by jobs: " + sb.toString());
}
CredentialsStore s = CredentialsProvider.lookupStores(Jenkins.getActiveInstance()).iterator().next();
- if (!existingCred.getDescriptor().getDisplayName().contains(KUBERNETES_SERVICE_ACCOUNT)) {
- s.removeCredentials(Domain.global(), existingCred);
- logger.info("Deleted credential " + id + " from Secret " + name + " with revision: " + resourceRevision);
- s.save();
- } else {
- logger.warning("Stopped attempt to delete " + KUBERNETES_SERVICE_ACCOUNT + " credentials with Id " + id );
- }
+ if (!existingCred.getDescriptor().getDisplayName().contains(KUBERNETES_SERVICE_ACCOUNT)) {
+ s.removeCredentials(Domain.global(), existingCred);
+ logger.info("Deleted credential " + id + " from Secret " + name + " with revision: "
+ + resourceRevision);
+ s.save();
+ } else {
+ logger.warning(
+ "Stopped attempt to delete " + KUBERNETES_SERVICE_ACCOUNT + " credentials with Id " + id);
+ }
} finally {
SecurityContextHolder.setContext(previousContext);
}
}
}
- public static void deleteCredential(Secret secret) throws IOException {
+ public static void deleteCredential(Secret secret) {
if (secret != null) {
- String id = generateCredentialsName(secret.getMetadata().getNamespace(), secret.getMetadata().getName(), getSecretCustomName(secret));
- deleteCredential(id, NamespaceName.create(secret), secret.getMetadata().getResourceVersion());
+ String id = generateCredentialsName(secret.getMetadata().getNamespace(), secret.getMetadata().getName(),
+ getSecretCustomName(secret));
+ try {
+ deleteCredential(id, NamespaceName.create(secret), secret.getMetadata().getResourceVersion());
+ } catch (IOException e) {
+ logger.log(SEVERE, "Credentials has not been deleted: " + e, e);
+ throw new RuntimeException(e);
+ }
}
}
@@ -295,25 +331,27 @@ private static String generateCredentialsName(String namespace, String name, Str
return (customName == null) ? namespace + "-" + name : customName;
}
- private static Credentials arbitraryKeyValueTextCredential(Map data, String generatedCredentialsName) {
+ private static Credentials arbitraryKeyValueTextCredential(Map data,
+ String generatedCredentialsName) {
String text = "";
if (data != null && data.size() > 0) {
// convert to JSON for parsing ease in pipelines
try {
text = new ObjectMapper().writeValueAsString(data);
} catch (JsonProcessingException e) {
- logger.log(Level.WARNING, "Arbitrary opaque secret " + generatedCredentialsName + " had issue converting json", e);
+ logger.log(Level.WARNING,
+ "Arbitrary opaque secret " + generatedCredentialsName + " had issue converting json", e);
}
}
if (StringUtils.isBlank(text)) {
- logger.log(
- Level.WARNING,
+ logger.log(Level.WARNING,
"Opaque secret {0} did not provide any data that could be processed into a Jenkins credential",
new Object[] { generatedCredentialsName });
return null;
}
- return newSecretTextCredential(generatedCredentialsName, new String(Base64.encode(text.getBytes())));
+ return newSecretTextCredential(generatedCredentialsName,
+ new String(Base64.getEncoder().encode(text.getBytes())));
}
private static Credentials secretToCredentials(Secret secret) {
@@ -321,16 +359,17 @@ private static Credentials secretToCredentials(Secret secret) {
String name = secret.getMetadata().getName();
Map data = secret.getData();
if (data == null) {
- logger.log(WARNING, "An OpenShift secret was marked for import, but it has no secret data. No credential will be created.");
+ logger.log(WARNING, "Secret " + name + " does not contain any data. No credential will be created.");
return null;
}
- final String generatedCredentialsName = generateCredentialsName(namespace, name, getSecretCustomName(secret));
+ String generatedCredentialsName = generateCredentialsName(namespace, name, getSecretCustomName(secret));
String passwordData = data.get(OPENSHIFT_SECRETS_DATA_PASSWORD);
String sshKeyData = data.get(OPENSHIFT_SECRETS_DATA_SSHPRIVATEKEY);
String usernameData = data.get(OPENSHIFT_SECRETS_DATA_USERNAME);
- // We support "passphrase" and "password" for the ssh passphrase; passphrase has precedence over password
- String passphraseData = data.get(OPENSHIFT_SECRETS_DATA_PASSPHRASE);
+ // We support "passphrase" and "password" for the ssh passphrase; passphrase has
+ // precedence over password
+ String passphraseData = data.get(OPENSHIFT_SECRETS_DATA_PASSPHRASE);
String sshPassphrase = isNotBlank(passphraseData) ? passphraseData : passwordData;
switch (secret.getType()) {
@@ -355,14 +394,14 @@ private static Credentials secretToCredentials(Secret secret) {
}
String openshiftTokenData = data.get(OPENSHIFT_SECRETS_DATA_CLIENT_TOKEN);
if (isNotBlank(openshiftTokenData)) {
- return newOpenshiftTokenCredentials(generatedCredentialsName, openshiftTokenData);
+ return newOpenshiftTokenCredentials(generatedCredentialsName, openshiftTokenData);
}
return arbitraryKeyValueTextCredential(data, generatedCredentialsName);
case OPENSHIFT_SECRETS_TYPE_BASICAUTH:
return newUsernamePasswordCredentials(generatedCredentialsName, usernameData, passwordData);
case OPENSHIFT_SECRETS_TYPE_SSH:
- return newSSHUserCredential(generatedCredentialsName, usernameData, sshKeyData, sshPassphrase);
+ return newSSHUserCredential(generatedCredentialsName, usernameData, sshKeyData, sshPassphrase);
default:
// the type field is marked optional in k8s.io/api/core/v1/types.go,
// default to OPENSHIFT_SECRETS_DATA_SECRET_TEXT in this case
@@ -371,68 +410,64 @@ private static Credentials secretToCredentials(Secret secret) {
}
private static Credentials newOpenshiftTokenCredentials(String secretName, String secretText) {
- if (secretName == null || secretName.length() == 0 || secretText == null || secretText.length() == 0) {
- logger.log(Level.WARNING,
- "Invalid secret data, secretName: " + secretName + " secretText is null: " + (secretText == null)
- + " secretText is empty: " + (secretText != null ? secretText.length() == 0 : false));
- return null;
-
- }
-
- return new OpenShiftTokenCredentials(CredentialsScope.GLOBAL, secretName, secretName,
- hudson.util.Secret.fromString(new String(Base64.decode(secretText), StandardCharsets.UTF_8)));
+ if (secretName == null || secretName.length() == 0 || secretText == null || secretText.length() == 0) {
+ logInvalidSecretData(secretName, secretText, TOKEN_SECRET_TYPE);
+ return null;
+ }
+ return new OpenShiftTokenCredentials(GLOBAL, secretName, secretName,
+ fromString(new String(DECODER.decode(secretText), UTF_8)));
}
private static Credentials newSecretFileCredential(String secretName, String fileData) {
if (secretName == null || secretName.length() == 0 || fileData == null || fileData.length() == 0) {
- logger.log(Level.WARNING,
- "Invalid secret data, secretName: " + secretName + " filename is null: " + (fileData == null)
- + " filename is empty: " + (fileData != null ? fileData.length() == 0 : false));
+ logInvalidSecretData(secretName, fileData, FILE_SECRET_TYPE);
return null;
-
}
- return new FileCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName, secretName,
- SecretBytes.fromString(fileData));
+ return new FileCredentialsImpl(GLOBAL, secretName, secretName, secretName, SecretBytes.fromString(fileData));
}
private static Credentials newSecretTextCredential(String secretName, String secretText) {
if (secretName == null || secretName.length() == 0 || secretText == null || secretText.length() == 0) {
- logger.log(Level.WARNING,
- "Invalid secret data, secretName: " + secretName + " secretText is null: " + (secretText == null)
- + " secretText is empty: " + (secretText != null ? secretText.length() == 0 : false));
+ logInvalidSecretData(secretName, secretText, SECRET_TEXT_SECRET_TYPE);
return null;
-
}
- return new StringCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName,
- hudson.util.Secret.fromString(new String(Base64.decode(secretText), StandardCharsets.UTF_8)));
+ String data = new String(DECODER.decode(secretText), UTF_8);
+ return new StringCredentialsImpl(GLOBAL, secretName, secretName, fromString(data));
}
private static Credentials newCertificateCredential(String secretName, String passwordData,
String certificateData) {
if (secretName == null || secretName.length() == 0 || certificateData == null
|| certificateData.length() == 0) {
- logger.log(Level.WARNING,
- "Invalid secret data, secretName: " + secretName + " certificate is null: "
- + (certificateData == null) + " certificate is empty: "
- + (certificateData != null ? certificateData.length() == 0 : false));
+ logInvalidSecretData(secretName, certificateData, "certificate");
return null;
}
- String certificatePassword = passwordData != null ? new String(Base64.decode(passwordData)) : null;
- return new CertificateCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName, certificatePassword,
+ String certificatePassword = passwordData != null ? new String(DECODER.decode(passwordData)) : null;
+ return new CertificateCredentialsImpl(GLOBAL, secretName, secretName, certificatePassword,
new CertificateCredentialsImpl.UploadedKeyStoreSource(SecretBytes.fromString(certificateData)));
}
- private static Credentials newSSHUserCredential(String secretName, String username, String sshKeyData, String passwordData) {
+ private static void logInvalidSecretData(String secretName, String secretText, String secretType) {
+ logger.log(Level.WARNING,
+ "Invalid secret data, secretName: " + secretName + " " + secretType + " is null: "
+ + (secretText == null) + " " + secretType + " is empty: "
+ + (secretText != null ? secretText.length() == 0 : false));
+ }
+
+ private static Credentials newSSHUserCredential(String secretName, String username, String sshKeyData,
+ String passwordData) {
boolean secretNameIsBlank = StringUtils.isBlank(secretName);
boolean sshKeyDataIsBlank = StringUtils.isBlank(sshKeyData);
- if ( secretNameIsBlank || sshKeyDataIsBlank) {
- logger.log(WARNING, "Invalid secret data, secretName: " + secretName + " sshKeyData is blank null: " + sshKeyDataIsBlank);
+ if (secretNameIsBlank || sshKeyDataIsBlank) {
+ logger.log(WARNING, "Invalid secret data, secretName: " + secretName + " sshKeyData is blank null: "
+ + sshKeyDataIsBlank);
return null;
}
- String sshKeyPassword = (passwordData != null) ? new String(Base64.decode(passwordData),UTF_8) : null;
- String sshKey = new String(Base64.decode(sshKeyData), UTF_8);
- String sshUser = fixNull(username).isEmpty() ? "" : new String(Base64.decode(username), UTF_8);
- BasicSSHUserPrivateKey.DirectEntryPrivateKeySource key = new BasicSSHUserPrivateKey.DirectEntryPrivateKeySource(sshKey);
+ String sshKeyPassword = (passwordData != null) ? new String(DECODER.decode(passwordData), UTF_8) : null;
+ String sshKey = new String(DECODER.decode(sshKeyData), UTF_8);
+ String sshUser = fixNull(username).isEmpty() ? "" : new String(DECODER.decode(username), UTF_8);
+ BasicSSHUserPrivateKey.DirectEntryPrivateKeySource key = new BasicSSHUserPrivateKey.DirectEntryPrivateKeySource(
+ sshKey);
return new BasicSSHUserPrivateKey(GLOBAL, secretName, sshUser, key, sshKeyPassword, secretName);
}
@@ -449,9 +484,8 @@ private static Credentials newUsernamePasswordCredentials(String secretName, Str
return null;
}
- return new UsernamePasswordCredentialsImpl(CredentialsScope.GLOBAL, secretName, secretName,
- new String(Base64.decode(usernameData), UTF_8),
- new String(Base64.decode(passwordData), UTF_8));
+ return new UsernamePasswordCredentialsImpl(GLOBAL, secretName, secretName,
+ new String(DECODER.decode(usernameData), UTF_8), new String(DECODER.decode(passwordData), UTF_8));
}
/**
@@ -463,4 +497,12 @@ public static boolean hasCredentials() {
return !StringUtils.isEmpty(getAuthenticatedOpenShiftClient().getConfiguration().getOauthToken());
}
+ static void linkSourceSecretToCredentials(String bc, String credential) {
+ SOURCE_SECRET_TO_CREDS_MAP.put(bc, credential);
+ }
+
+ static String unlinkBCSecretToCrendential(String bc) {
+ return SOURCE_SECRET_TO_CREDS_MAP.remove(bc);
+ }
+
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java
new file mode 100644
index 000000000..1a39e8903
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GenericEventHandler.java
@@ -0,0 +1,30 @@
+package io.fabric8.jenkins.openshiftsync;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+
+public class GenericEventHandler implements ResourceEventHandler {
+ private Logger logger = LoggerFactory.getLogger(this.getClass().getName());
+
+ public void onAdd(T obj) {
+ String className = obj.getClass().getSimpleName();
+ final String name = obj.getMetadata().getName();
+ logger.info("{}/{} added", className, name);
+ }
+
+ public void onUpdate(T oldObj, T newObj) {
+ String className = oldObj.getClass().getSimpleName();
+ final String name = oldObj.getMetadata().getName();
+ logger.info("{}/{} updated", className, name);
+ }
+
+ public void onDelete(T obj, boolean deletedFinalStateUnknown) {
+ String className = obj.getClass().getSimpleName();
+ final String name = obj.getMetadata().getName();
+ logger.info("{}/{} deleted", className, name);
+ }
+
+}
\ No newline at end of file
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java
index 4b747dbc4..0c11062cb 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfiguration.java
@@ -16,24 +16,33 @@
package io.fabric8.jenkins.openshiftsync;
import static hudson.security.ACL.SYSTEM;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getNamespaceOrUseDefault;
import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getOpenShiftClient;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.shutdownOpenShiftClient;
import static java.util.concurrent.TimeUnit.SECONDS;
-import static java.util.logging.Level.SEVERE;
import static jenkins.model.Jenkins.ADMINISTER;
+import java.io.IOException;
+import java.util.concurrent.ScheduledFuture;
import java.util.logging.Logger;
+import javax.servlet.ServletException;
+
import org.apache.commons.lang.StringUtils;
+import org.kohsuke.stapler.AncestorInPath;
import org.kohsuke.stapler.DataBoundConstructor;
+import org.kohsuke.stapler.QueryParameter;
import org.kohsuke.stapler.StaplerRequest;
+import org.kohsuke.stapler.verb.POST;
import com.cloudbees.plugins.credentials.common.StandardListBoxModel;
import hudson.Extension;
import hudson.Util;
+import hudson.model.Job;
+import hudson.util.FormValidation;
import hudson.util.ListBoxModel;
-import io.fabric8.kubernetes.client.KubernetesClientException;
import jenkins.model.GlobalConfiguration;
import jenkins.model.Jenkins;
import jenkins.util.Timer;
@@ -46,8 +55,16 @@ public class GlobalPluginConfiguration extends GlobalConfiguration {
private boolean enabled = true;
private boolean foldersEnabled = true;
+ private boolean useClusterMode = false;
+ private boolean syncConfigMaps = true;
+ private boolean syncSecrets = true;
+ private boolean syncImageStreams = true;
+ private boolean syncBuildConfigsAndBuilds = true;
+
private String server;
private String credentialsId = "";
+ private int maxConnections = 100;
+
private String[] namespaces;
private String jobNamePattern;
private String skipOrganizationPrefix;
@@ -58,17 +75,15 @@ public class GlobalPluginConfiguration extends GlobalConfiguration {
private int configMapListInterval = 300;
private int imageStreamListInterval = 300;
- private transient BuildWatcher buildWatcher;
- private transient BuildConfigWatcher buildConfigWatcher;
- private transient SecretWatcher secretWatcher;
- private transient ConfigMapWatcher configMapWatcher;
- private transient ImageStreamWatcher imageStreamWatcher;
+ private static GlobalPluginConfigurationTimerTask TASK;
+ private static ScheduledFuture> FUTURE;
@DataBoundConstructor
public GlobalPluginConfiguration(boolean enable, String server, String namespace, boolean foldersEnabled,
String credentialsId, String jobNamePattern, String skipOrganizationPrefix, String skipBranchSuffix,
int buildListInterval, int buildConfigListInterval, int configMapListInterval, int secretListInterval,
- int imageStreamListInterval) {
+ int imageStreamListInterval, boolean useClusterMode, boolean syncConfigMaps, boolean syncSecrets,
+ boolean syncImageStreams, boolean syncBuildsConfigAndBuilds, int maxConnections) {
this.enabled = enable;
this.server = server;
this.namespaces = StringUtils.isBlank(namespace) ? null : namespace.split(" ");
@@ -82,6 +97,12 @@ public GlobalPluginConfiguration(boolean enable, String server, String namespace
this.configMapListInterval = configMapListInterval;
this.secretListInterval = secretListInterval;
this.imageStreamListInterval = imageStreamListInterval;
+ this.useClusterMode = useClusterMode;
+ this.syncConfigMaps = syncConfigMaps;
+ this.syncSecrets = syncSecrets;
+ this.syncImageStreams = syncImageStreams;
+ this.syncBuildConfigsAndBuilds = syncBuildsConfigAndBuilds;
+ this.maxConnections = maxConnections;
configChange();
}
@@ -95,6 +116,100 @@ public static GlobalPluginConfiguration get() {
return GlobalConfiguration.all().get(GlobalPluginConfiguration.class);
}
+ private synchronized void configChange() {
+ logger.info("OpenShift Sync Plugin processing a newly supplied configuration");
+ stop();
+// shutdownOpenShiftClient();
+ start();
+ }
+
+ private void start() {
+ if (this.enabled) {
+ OpenShiftUtils.initializeOpenShiftClient(this.server, this.maxConnections);
+ this.namespaces = getNamespaceOrUseDefault(this.namespaces, getOpenShiftClient());
+ if (TASK != null) {
+ logger.warning("Previously existing configuration task");
+ }
+ TASK = new GlobalPluginConfigurationTimerTask(this.namespaces);
+ FUTURE = Timer.get().schedule(TASK, 1, SECONDS); // lets give jenkins a while to get started ;)
+ } else {
+ logger.info("OpenShift Sync Plugin has been disabled");
+ }
+ }
+
+ private void stop() {
+ if (FUTURE != null) {
+ boolean interrupted = FUTURE.cancel(true);
+ if (interrupted) {
+ logger.info("OpenShift Sync Plugin task has been interrupted");
+ }
+ }
+ if (TASK != null) {
+ TASK.stop();
+ TASK.cancel();
+ TASK = null;
+ }
+ OpenShiftUtils.shutdownOpenShiftClient();
+ }
+
+ /**
+ * Validates OpenShift Sync Configuration form by chec
+ */
+ @POST
+ public FormValidation doValidate(@QueryParameter("useClusterMode") final boolean useClusterMode,
+ @QueryParameter("syncConfigMaps") final boolean syncConfigMaps,
+ @QueryParameter("syncSecrets") final boolean syncSecrets,
+ @QueryParameter("syncImageStreams") final boolean syncImageStreams,
+ @QueryParameter("syncBuildConfigsAndBuilds") final boolean syncBuildConfigsAndBuilds,
+ @QueryParameter("maxConnections") final int maxConnections,
+ @QueryParameter("namespace") final String namespace, @SuppressWarnings("rawtypes") @AncestorInPath Job job)
+ throws IOException, ServletException {
+ if (useClusterMode) {
+ try {
+ int secrets = getAuthenticatedOpenShiftClient().secrets().inAnyNamespace().list().getItems().size();
+ logger.info("Cluster secrets: " + secrets);
+ } catch (Exception e) {
+ StringBuilder message = new StringBuilder();
+ message.append("The ServiceAccount used by Jenkins does not have cluster wide watch permissions.\n");
+ message.append("To use cluster mode, you need to run the following commands an restart Jenkins: \n\n");
+ message.append("oc create clusterrole jenkins-watcher --verb=get,list,watch \\\n");
+ message.append(" --resource=configmaps,builds,buildconfigs,imagestreams,secrets\n\n");
+ message.append("oc adm policy add-cluster-role-to-user jenkins-watcher -z jenkins\n");
+ logger.severe("Error while trying to query secrets lists: " + e);
+ return FormValidation.error(message.toString());
+ }
+ } else {
+ StringBuilder message = new StringBuilder();
+ if (maxConnections > 200) {
+ message.append("Cluster mode is recommended if max connections is greater than 200.");
+ }
+ int requiredConnectionsCount = 0;
+ if (syncBuildConfigsAndBuilds) {
+ requiredConnectionsCount += 2;
+ }
+ if (syncImageStreams) {
+ requiredConnectionsCount++;
+ }
+ if (syncSecrets) {
+ requiredConnectionsCount++;
+ }
+ if (syncConfigMaps) {
+ requiredConnectionsCount++;
+ }
+ String[] namespaces = StringUtils.isBlank(namespace) ? new String[] {} : namespace.split(" ");
+ int namespacesCount = namespaces.length;
+ requiredConnectionsCount = namespacesCount * requiredConnectionsCount;
+ if (maxConnections < requiredConnectionsCount) {
+ message.append(String.format("Watching %s namespaces with your configuration requires %s connections.",
+ namespacesCount, requiredConnectionsCount));
+ }
+ if (message.length() > 0) {
+ return FormValidation.warning(message.toString());
+ }
+ }
+ return FormValidation.ok("Success");
+ }
+
@Override
public String getDisplayName() {
return "OpenShift Jenkins Sync";
@@ -239,62 +354,52 @@ void setNamespaces(String[] namespaces) {
this.namespaces = namespaces;
}
- void setBuildWatcher(BuildWatcher buildWatcher) {
- this.buildWatcher = buildWatcher;
+ public boolean isUseClusterMode() {
+ return useClusterMode;
}
- void setBuildConfigWatcher(BuildConfigWatcher buildConfigWatcher) {
- this.buildConfigWatcher = buildConfigWatcher;
+ public void setUseClusterMode(boolean useClusterMode) {
+ this.useClusterMode = useClusterMode;
}
- void setSecretWatcher(SecretWatcher secretWatcher) {
- this.secretWatcher = secretWatcher;
+ public boolean isSyncConfigMaps() {
+ return syncConfigMaps;
}
- void setConfigMapWatcher(ConfigMapWatcher configMapWatcher) {
- this.configMapWatcher = configMapWatcher;
+ public void setSyncConfigMaps(boolean syncConfigMaps) {
+ this.syncConfigMaps = syncConfigMaps;
}
- void setImageStreamWatcher(ImageStreamWatcher imageStreamWatcher) {
- this.imageStreamWatcher = imageStreamWatcher;
+ public boolean isSyncSecrets() {
+ return syncSecrets;
}
- private synchronized void configChange() {
- logger.info("OpenShift Sync Plugin processing a newly supplied configuration");
- if (this.buildConfigWatcher != null) {
- this.buildConfigWatcher.stop();
- }
- if (this.buildWatcher != null) {
- this.buildWatcher.stop();
- }
- if (this.configMapWatcher != null) {
- this.configMapWatcher.stop();
- }
- if (this.imageStreamWatcher != null) {
- this.imageStreamWatcher.stop();
- }
- if (this.secretWatcher != null) {
- this.secretWatcher.stop();
- }
- this.buildWatcher = null;
- this.buildConfigWatcher = null;
- this.configMapWatcher = null;
- this.imageStreamWatcher = null;
- this.secretWatcher = null;
- OpenShiftUtils.shutdownOpenShiftClient();
+ public void setSyncSecrets(boolean syncSecrets) {
+ this.syncSecrets = syncSecrets;
+ }
- if (!this.enabled) {
- logger.info("OpenShift Sync Plugin has been disabled");
- return;
- }
- try {
- OpenShiftUtils.initializeOpenShiftClient(this.server);
- this.namespaces = getNamespaceOrUseDefault(this.namespaces, getOpenShiftClient());
- Runnable task = new GlobalPluginConfigurationTimerTask(this);
- Timer.get().schedule(task, 1, SECONDS); // lets give jenkins a while to get started ;)
- } catch (KubernetesClientException e) {
- Throwable exceptionOrCause = (e.getCause() != null) ? e.getCause() : e;
- logger.log(SEVERE, "Failed to configure OpenShift Jenkins Sync Plugin: " + exceptionOrCause);
- }
+ public boolean isSyncImageStreams() {
+ return syncImageStreams;
}
+
+ public void setSyncImageStreams(boolean syncImageStreams) {
+ this.syncImageStreams = syncImageStreams;
+ }
+
+ public boolean isSyncBuildConfigsAndBuilds() {
+ return syncBuildConfigsAndBuilds;
+ }
+
+ public void setSyncBuildConfigsAndBuilds(boolean syncBuildConfigsAndBuilds) {
+ this.syncBuildConfigsAndBuilds = syncBuildConfigsAndBuilds;
+ }
+
+ public int getMaxConnections() {
+ return maxConnections;
+ }
+
+ public void setMaxConnections(int maxConnections) {
+ this.maxConnections = maxConnections;
+ }
+
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java
index 94020c586..a359802cc 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/GlobalPluginConfigurationTimerTask.java
@@ -1,7 +1,10 @@
package io.fabric8.jenkins.openshiftsync;
import static hudson.init.InitMilestone.COMPLETED;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import java.util.ArrayList;
+import java.util.List;
import java.util.logging.Logger;
import hudson.init.InitMilestone;
@@ -11,17 +14,37 @@
public class GlobalPluginConfigurationTimerTask extends SafeTimerTask {
private static final Logger logger = Logger.getLogger(GlobalPluginConfigurationTimerTask.class.getName());
+ private String[] namespaces;
+ private final static List informers = new ArrayList<>();
- private GlobalPluginConfiguration globalPluginConfiguration;
-
- public GlobalPluginConfigurationTimerTask(GlobalPluginConfiguration globalPluginConfiguration) {
- this.globalPluginConfiguration = globalPluginConfiguration;
+ public GlobalPluginConfigurationTimerTask(String[] namespaces) {
+ this.namespaces = namespaces;
}
@Override
protected void doRun() throws Exception {
logger.info("Confirming Jenkins is started");
+ waitForJenkinsStartup();
+ stop();
+ start();
+ }
+
+ private void start() {
+ if (GlobalPluginConfiguration.get().isUseClusterMode()) {
+ startClusterInformers();
+ logger.info("All the cluster informers have been registered!! ... starting all registered informers");
+ } else {
+ startNamespaceInformers();
+ logger.info("All the namespaced informers have been registered!! ... starting all registered informers");
+ }
+ getInformerFactory().startAllRegisteredInformers();
+ logger.info("All registered informers have been started");
+
+ }
+
+ private void waitForJenkinsStartup() {
while (true) {
+ @SuppressWarnings("deprecation")
final Jenkins instance = Jenkins.getActiveInstance();
// We can look at Jenkins Init Level to see if we are ready to start. If we do
// not wait, we risk the chance of a deadlock.
@@ -30,36 +53,84 @@ protected void doRun() throws Exception {
if (initLevel == COMPLETED) {
break;
}
- logger.fine("Jenkins not ready...");
+ logger.info("Jenkins not ready...");
try {
Thread.sleep(500);
} catch (InterruptedException e) {
- // ignore
+ logger.info("Interrupted while sleeping");
}
}
- intializeAndStartWatchers();
}
- private void intializeAndStartWatchers() {
- String[] namespaces = globalPluginConfiguration.getNamespaces();
- BuildConfigWatcher buildConfigWatcher = new BuildConfigWatcher(namespaces);
- globalPluginConfiguration.setBuildConfigWatcher(buildConfigWatcher);
- buildConfigWatcher.start();
+ private void startNamespaceInformers() {
+ for (String namespace : namespaces) {
+ GlobalPluginConfiguration configuration = GlobalPluginConfiguration.get();
+ if (configuration.isSyncBuildConfigsAndBuilds()) {
+ BuildConfigInformer buildConfigInformer = new BuildConfigInformer(namespace);
+ informers.add(buildConfigInformer);
+ buildConfigInformer.start();
- BuildWatcher buildWatcher = new BuildWatcher(namespaces);
- globalPluginConfiguration.setBuildWatcher(buildWatcher);
- buildWatcher.start();
+ BuildInformer buildInformer = new BuildInformer(namespace);
+ buildInformer.start();
+ informers.add(buildInformer);
+ }
+ if (configuration.isSyncConfigMaps()) {
+ ConfigMapInformer configMapInformer = new ConfigMapInformer(namespace);
+ configMapInformer.start();
+ informers.add(configMapInformer);
+ }
+ if (configuration.isSyncImageStreams()) {
+ ImageStreamInformer imageStreamInformer = new ImageStreamInformer(namespace);
+ imageStreamInformer.start();
+ informers.add(imageStreamInformer);
+ }
+ if (configuration.isSyncSecrets()) {
+ SecretInformer secretInformer = new SecretInformer(namespace);
+ secretInformer.start();
+ informers.add(secretInformer);
+ }
+ }
+ }
- ConfigMapWatcher configMapWatcher = new ConfigMapWatcher(namespaces);
- globalPluginConfiguration.setConfigMapWatcher(configMapWatcher);
- configMapWatcher.start();
+ private void startClusterInformers() {
+ logger.info("Initializing cluster informers ...");
+ GlobalPluginConfiguration configuration = GlobalPluginConfiguration.get();
+ if (configuration.isSyncBuildConfigsAndBuilds()) {
+ BuildConfigClusterInformer buildConfigInformer = new BuildConfigClusterInformer(namespaces);
+ informers.add(buildConfigInformer);
+ buildConfigInformer.start();
- ImageStreamWatcher imageStreamWatcher = new ImageStreamWatcher(namespaces);
- globalPluginConfiguration.setImageStreamWatcher(imageStreamWatcher);
- imageStreamWatcher.start();
+ BuildClusterInformer buildInformer = new BuildClusterInformer(namespaces);
+ informers.add(buildInformer);
+ buildInformer.start();
+ }
+ if (configuration.isSyncConfigMaps()) {
+ ConfigMapClusterInformer configMapInformer = new ConfigMapClusterInformer(namespaces);
+ informers.add(configMapInformer);
+ configMapInformer.start();
+ }
+ if (configuration.isSyncImageStreams()) {
+ ImageStreamClusterInformer imageStreamInformer = new ImageStreamClusterInformer(namespaces);
+ informers.add(imageStreamInformer);
+ imageStreamInformer.start();
+ }
+ if (configuration.isSyncSecrets()) {
+ SecretClusterInformer secretInformer = new SecretClusterInformer(namespaces);
+ informers.add(secretInformer);
+ secretInformer.start();
+ }
+ }
- SecretWatcher secretWatcher = new SecretWatcher(namespaces);
- globalPluginConfiguration.setSecretWatcher(secretWatcher);
- secretWatcher.start();
+ public void stop() {
+ logger.info("Stopping all informers ...");
+ synchronized (this) {
+ for (Lifecyclable informer : informers) {
+ logger.info("Stopping informer: {}" + informer);
+ informer.stop();
+ logger.info("Stopped informer: {}" + informer);
+ }
+ informers.clear();
+ logger.info("Stopped all informers");
+ }
}
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java
new file mode 100644
index 000000000..ad78dc352
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamClusterInformer.java
@@ -0,0 +1,148 @@
+/**
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL;
+import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.IMAGESTREAM_TYPE;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.deleteAgents;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents;
+import static java.util.Collections.singletonMap;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.dsl.base.OperationContext;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+import io.fabric8.openshift.api.model.ImageStream;
+
+public class ImageStreamClusterInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private SharedIndexInformer informer;
+ private Set namespaces;
+
+ public ImageStreamClusterInformer(String[] namespaces) {
+ this.namespaces = new HashSet<>(Arrays.asList(namespaces));
+ }
+
+ public int getResyncPeriodMilliseconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting ImageStream informer for {} !!" + namespaces);
+ LOGGER.debug("Listing ImageStream resources");
+ SharedInformerFactory factory = getInformerFactory();
+ Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE);
+ OperationContext withLabels = new OperationContext().withLabels(labels);
+ this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getResyncPeriodMilliseconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("ImageStream informer started for namespace: {}" + namespaces);
+// ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping secret informer {} !!" + namespaces);
+ this.informer.stop();
+ }
+
+ @Override
+ public void onAdd(ImageStream obj) {
+ LOGGER.debug("ImageStream informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = metadata.getName();
+ String uid = metadata.getUid();
+ LOGGER.info("ImageStream informer received add event for: {}" + name);
+ List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj);
+ addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(ImageStream oldObj, ImageStream newObj) {
+ LOGGER.info("ImageStream informer received update event for: {} to: {}" + oldObj + newObj);
+ if (newObj != null) {
+ List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(newObj);
+ ObjectMeta metadata = newObj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ @Override
+ public void onDelete(ImageStream obj, boolean deletedFinalStateUnknown) {
+ LOGGER.info("ImageStream informer received delete event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj);
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ private void onInit(List list) {
+ for (ImageStream imageStream : list) {
+ try {
+ List agents = getPodTemplatesListFromImageStreams(imageStream);
+ for (PodTemplate podTemplate : agents) {
+ // watch event might beat the timer - put call is technically fine, but not
+ // addPodTemplate given k8s plugin issues
+ if (!hasPodTemplate(podTemplate)) {
+ addPodTemplate(podTemplate);
+ }
+ }
+ } catch (Exception e) {
+ LOGGER.error("Failed to update job", e);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java
new file mode 100644
index 000000000..2b5b56c7b
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamInformer.java
@@ -0,0 +1,136 @@
+/**
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL;
+import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.IMAGESTREAM_TYPE;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addAgents;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.addPodTemplate;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.deleteAgents;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.getPodTemplatesListFromImageStreams;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.hasPodTemplate;
+import static io.fabric8.jenkins.openshiftsync.PodTemplateUtils.updateAgents;
+import static java.util.Collections.singletonMap;
+
+import java.util.List;
+import java.util.Map;
+
+import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.client.dsl.base.OperationContext;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+import io.fabric8.openshift.api.model.ImageStream;
+
+public class ImageStreamInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+ private SharedIndexInformer informer;
+ private String namespace;
+
+ public ImageStreamInformer(String namespace) {
+ this.namespace = namespace;
+ }
+
+ public int getResyncPeriodMilliseconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getImageStreamListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting ImageStream informer for {} !!" + namespace);
+ LOGGER.debug("Listing ImageStream resources");
+ SharedInformerFactory factory = getInformerFactory().inNamespace(namespace);
+ Map labels = singletonMap(IMAGESTREAM_AGENT_LABEL, IMAGESTREAM_AGENT_LABEL_VALUE);
+ OperationContext withLabels = new OperationContext().withLabels(labels);
+ this.informer = factory.sharedIndexInformerFor(ImageStream.class, withLabels, getResyncPeriodMilliseconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("ImageStream informer started for namespace: {}" + namespace);
+// ImageStreamList list = getOpenshiftClient().imageStreams().inNamespace(namespace).withLabels(labels).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespace);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+
+ @Override
+ public void onAdd(ImageStream obj) {
+ LOGGER.debug("ImageStream informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String name = metadata.getName();
+ String uid = metadata.getUid();
+ LOGGER.info("ImageStream informer received add event for: {}" + name);
+ List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj);
+ addAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace);
+ }
+ }
+
+ @Override
+ public void onUpdate(ImageStream oldObj, ImageStream newObj) {
+ LOGGER.info("ImageStream informer received update event for: {} to: {}" + oldObj + newObj);
+ if (newObj != null) {
+ List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(newObj);
+ ObjectMeta metadata = newObj.getMetadata();
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ String namespace = metadata.getNamespace();
+ updateAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace);
+ }
+ }
+
+ @Override
+ public void onDelete(ImageStream obj, boolean deletedFinalStateUnknown) {
+ LOGGER.info("ImageStream informer received delete event for: {}" + obj);
+ if (obj != null) {
+ List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(obj);
+ ObjectMeta metadata = obj.getMetadata();
+ String uid = metadata.getUid();
+ String name = metadata.getName();
+ String namespace = metadata.getNamespace();
+ deleteAgents(slaves, IMAGESTREAM_TYPE, uid, name, namespace);
+ }
+
+ }
+
+ private void onInit(List list) {
+ for (ImageStream imageStream : list) {
+ try {
+ List agents = getPodTemplatesListFromImageStreams(imageStream);
+ for (PodTemplate podTemplate : agents) {
+ // watch event might beat the timer - put call is technically fine, but not
+ // addPodTemplate given k8s plugin issues
+ if (!hasPodTemplate(podTemplate)) {
+ addPodTemplate(podTemplate);
+ }
+ }
+ } catch (Exception e) {
+ LOGGER.error("Failed to update job", e);
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java b/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java
deleted file mode 100644
index b8ea01954..000000000
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/ImageStreamWatcher.java
+++ /dev/null
@@ -1,148 +0,0 @@
-/**
- * Copyright (C) 2017 Red Hat, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package io.fabric8.jenkins.openshiftsync;
-
-import static java.util.logging.Level.SEVERE;
-import static java.util.logging.Level.WARNING;
-
-import java.util.List;
-import java.util.logging.Logger;
-
-import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
-
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import hudson.triggers.SafeTimerTask;
-import io.fabric8.kubernetes.api.model.ObjectMeta;
-import io.fabric8.kubernetes.client.Watcher.Action;
-import io.fabric8.openshift.api.model.ImageStream;
-import io.fabric8.openshift.api.model.ImageStreamList;
-
-public class ImageStreamWatcher extends BaseWatcher {
- private final Logger logger = Logger.getLogger(getClass().getName());
-
- @SuppressFBWarnings("EI_EXPOSE_REP2")
- public ImageStreamWatcher(String[] namespaces) {
- super(namespaces);
- }
-
- @Override
- public int getListIntervalInSeconds() {
- return GlobalPluginConfiguration.get().getImageStreamListInterval();
- }
-
- public Runnable getStartTimerTask() {
- return new SafeTimerTask() {
- @Override
- public void doRun() {
- if (!CredentialsUtils.hasCredentials()) {
- logger.fine("No Openshift Token credential defined.");
- return;
- }
- for (String ns : namespaces) {
- ImageStreamList imageStreams = null;
- try {
- logger.fine("listing ImageStream resources");
- imageStreams = OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(ns).list();
- onImageStreamInitialization(imageStreams);
- logger.fine("handled ImageStream resources");
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to load ImageStreams: " + e, e);
- }
- try {
- String resourceVersion = "0";
- if (imageStreams == null) {
- logger.warning("Unable to get image stream list; impacts resource version used for watch");
- } else {
- resourceVersion = imageStreams.getMetadata().getResourceVersion();
- }
- if (watches.get(ns) == null) {
- logger.info("creating ImageStream watch for namespace " + ns + " and resource version " + resourceVersion);
- ImageStreamWatcher w = ImageStreamWatcher.this;
- WatcherCallback watcher = new WatcherCallback(w, ns);
- addWatch(ns, OpenShiftUtils.getOpenshiftClient().imageStreams().inNamespace(ns).withResourceVersion(resourceVersion).watch(watcher));
- }
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to load ImageStreams: " + e, e);
- }
- }
- }
- };
- }
-
- public void start() {
- // lets process the initial state
- logger.info("Now handling startup image streams!!");
- super.start();
- }
-
- public void eventReceived(Action action, ImageStream imageStream) {
- try {
- List slaves = PodTemplateUtils.getPodTemplatesListFromImageStreams(imageStream);
- ObjectMeta metadata = imageStream.getMetadata();
- String uid = metadata.getUid();
- String name = metadata.getName();
- String namespace = metadata.getNamespace();
- switch (action) {
- case ADDED:
- processSlavesForAddEvent(slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace);
- break;
- case MODIFIED:
- processSlavesForModifyEvent(slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace);
- break;
- case DELETED:
- processSlavesForDeleteEvent(slaves, PodTemplateUtils.IMAGESTREAM_TYPE, uid, name, namespace);
- break;
- case ERROR:
- logger.warning("watch for imageStream " + name + " received error event ");
- break;
- default:
- logger.warning("watch for imageStream " + name + " received unknown event " + action);
- break;
- }
- } catch (Exception e) {
- logger.log(WARNING, "Caught: " + e, e);
- }
- }
-
- @Override
- public void eventReceived(Action action, T resource) {
- ImageStream imageStream = (ImageStream) resource;
- eventReceived(action, imageStream);
- }
-
- private void onImageStreamInitialization(ImageStreamList imageStreams) {
- if (imageStreams != null) {
- List items = imageStreams.getItems();
- if (items != null) {
- for (ImageStream imageStream : items) {
- try {
- List agents = PodTemplateUtils.getPodTemplatesListFromImageStreams(imageStream);
- for (PodTemplate entry : agents) {
- // watch event might beat the timer - put call is technically fine, but not
- // addPodTemplate given k8s plugin issues
- if (!PodTemplateUtils.hasPodTemplate(entry)) {
- PodTemplateUtils.addPodTemplate(entry);
- }
- }
- } catch (Exception e) {
- logger.log(SEVERE, "Failed to update job", e);
- }
- }
- }
- }
- }
-
-}
\ No newline at end of file
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java
index 7ee221d67..d69ce79ae 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/JenkinsUtils.java
@@ -21,7 +21,6 @@
import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING;
import static io.fabric8.jenkins.openshiftsync.BuildRunPolicy.SERIAL;
import static io.fabric8.jenkins.openshiftsync.BuildRunPolicy.SERIAL_LATEST_ONLY;
-import static io.fabric8.jenkins.openshiftsync.BuildWatcher.addEventToJenkinsJobRun;
import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_ANNOTATIONS_BUILD_NUMBER;
import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_BUILD_STATUS_FIELD;
import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_BUILD_CONFIG_NAME;
@@ -783,7 +782,7 @@ public int compare(Build b1, Build b2) {
}
boolean buildAdded = false;
try {
- buildAdded = addEventToJenkinsJobRun(b);
+ buildAdded = BuildManager.addEventToJenkinsJobRun(b);
} catch (IOException e) {
ObjectMeta meta = b.getMetadata();
LOGGER.log(WARNING, "Failed to add new build " + meta.getNamespace() + "/" + meta.getName(), e);
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java b/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java
index 90b1f9d5a..f1268cd39 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/JobProcessor.java
@@ -1,6 +1,5 @@
package io.fabric8.jenkins.openshiftsync;
-
import static io.fabric8.jenkins.openshiftsync.Annotations.AUTOSTART;
import static io.fabric8.jenkins.openshiftsync.Annotations.DISABLE_SYNC_CREATE;
import static io.fabric8.jenkins.openshiftsync.BuildConfigToJobMap.getJobFromBuildConfig;
@@ -40,155 +39,154 @@
public class JobProcessor extends NotReallyRoleSensitiveCallable {
- private final BuildConfigWatcher jobProcessor;
- private final BuildConfig buildConfig;
+ private final BuildConfig buildConfig;
private final static Logger logger = Logger.getLogger(BuildConfigToJobMap.class.getName());
- public JobProcessor(BuildConfigWatcher buildConfigWatcher, BuildConfig buildConfig) {
- jobProcessor = buildConfigWatcher;
- this.buildConfig = buildConfig;
- }
-
- @Override
- public Void call() throws Exception {
- Jenkins activeInstance = Jenkins.getActiveInstance();
- ItemGroup parent = activeInstance;
-
- String jobName = jenkinsJobName(buildConfig);
- String jobFullName = jenkinsJobFullName(buildConfig);
- WorkflowJob job = getJobFromBuildConfig(buildConfig);
-
- if (job == null) {
- job = (WorkflowJob) activeInstance.getItemByFullName(jobFullName);
- }
- boolean newJob = job == null;
-
- if (newJob) {
- String disableOn = getAnnotation(buildConfig, DISABLE_SYNC_CREATE);
- if (disableOn != null && disableOn.length() > 0) {
- logger.fine("Not creating missing jenkins job " + jobFullName + " due to annotation: "
- + DISABLE_SYNC_CREATE);
- return null;
- }
- parent = getFullNameParent(activeInstance, jobFullName, getNamespace(buildConfig));
- job = new WorkflowJob(parent, jobName);
- }
- BulkChange bulkJob = new BulkChange(job);
-
- job.setDisplayName(jenkinsJobDisplayName(buildConfig));
-
- FlowDefinition flowFromBuildConfig = mapBuildConfigToFlow(buildConfig);
- if (flowFromBuildConfig == null) {
- return null;
- }
- Map paramMap = createOrUpdateJob(activeInstance, parent, jobName, job, newJob,
- flowFromBuildConfig);
- bulkJob.commit();
- populateNamespaceFolder(activeInstance, parent, jobName, job, paramMap);
- return null;
- }
-
- private void populateNamespaceFolder(Jenkins activeInstance, ItemGroup parent, String jobName, WorkflowJob job,
- Map paramMap) throws IOException, AbortException {
- String fullName = job.getFullName();
- WorkflowJob workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class);
- if (workflowJob == null && parent instanceof Folder) {
- // we should never need this but just in
- // case there's an
- // odd timing issue or something...
- Folder folder = (Folder) parent;
- folder.add(job, jobName);
- workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class);
-
- }
- if (workflowJob == null) {
- logger.warning("Could not find created job " + fullName + " for BuildConfig: " + getNamespace(buildConfig)
- + "/" + getName(buildConfig));
- } else {
- JenkinsUtils.verifyEnvVars(paramMap, workflowJob, buildConfig);
- putJobWithBuildConfig(workflowJob, buildConfig);
- }
- }
-
- private Map createOrUpdateJob(Jenkins activeInstance, ItemGroup parent, String jobName,
- WorkflowJob job, boolean newJob, FlowDefinition flowFromBuildConfig) throws IOException {
- job.setDefinition(flowFromBuildConfig);
-
- String existingBuildRunPolicy = null;
-
- BuildConfigProjectProperty buildConfigProjectProperty = job.getProperty(BuildConfigProjectProperty.class);
- existingBuildRunPolicy = populateBCProjectProperty(job, existingBuildRunPolicy, buildConfigProjectProperty);
-
- // (re)populate job param list with any envs
- // from the build config
- Map paramMap = JenkinsUtils.addJobParamForBuildEnvs(job,
- buildConfig.getSpec().getStrategy().getJenkinsPipelineStrategy(), true);
-
- job.setConcurrentBuild(!(buildConfig.getSpec().getRunPolicy().equals(SERIAL)
- || buildConfig.getSpec().getRunPolicy().equals(SERIAL_LATEST_ONLY)));
-
- InputStream jobStream = new StringInputStream(new XStream2().toXML(job));
-
- if (newJob) {
- try {
- if (parent instanceof Folder) {
- Folder folder = (Folder) parent;
- folder.createProjectFromXML(jobName, jobStream).save();
- } else {
- activeInstance.createProjectFromXML(jobName, jobStream).save();
- }
-
- logger.info("Created job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig)
- + " with revision: " + buildConfig.getMetadata().getResourceVersion());
-
- String autostart = getAnnotation(buildConfig, AUTOSTART);
- if (Boolean.parseBoolean(autostart)) {
- logger.info("Automatically starting job " + jobName + " from BuildConfig "
- + NamespaceName.create(buildConfig) + " with revision: " + buildConfig.getMetadata().getResourceVersion());
- job.scheduleBuild2(0);
- }
- } catch (IllegalArgumentException e) {
- // see
- // https://github.com/openshift/jenkins-sync-plugin/issues/117,
- // jenkins might reload existing jobs on
- // startup between the
- // newJob check above and when we make
- // the createProjectFromXML call; if so,
- // retry as an update
- updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty);
- logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig)
- + " with revision: " + buildConfig.getMetadata().getResourceVersion());
- }
- } else {
- updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty);
- logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig)
- + " with revision: " + buildConfig.getMetadata().getResourceVersion());
- }
- return paramMap;
- }
-
- private String populateBCProjectProperty(WorkflowJob job, String existingBuildRunPolicy,
- BuildConfigProjectProperty buildConfigProjectProperty) throws IOException {
- if (buildConfigProjectProperty != null) {
- existingBuildRunPolicy = buildConfigProjectProperty.getBuildRunPolicy();
- long updatedBCResourceVersion = parseResourceVersion(buildConfig);
- long oldBCResourceVersion = parseResourceVersion(buildConfigProjectProperty.getResourceVersion());
- BuildConfigProjectProperty newProperty = new BuildConfigProjectProperty(buildConfig);
- if (updatedBCResourceVersion <= oldBCResourceVersion
- && newProperty.getUid().equals(buildConfigProjectProperty.getUid())
- && newProperty.getNamespace().equals(buildConfigProjectProperty.getNamespace())
- && newProperty.getName().equals(buildConfigProjectProperty.getName())
- && newProperty.getBuildRunPolicy().equals(buildConfigProjectProperty.getBuildRunPolicy())) {
- return null;
- }
- buildConfigProjectProperty.setUid(newProperty.getUid());
- buildConfigProjectProperty.setNamespace(newProperty.getNamespace());
- buildConfigProjectProperty.setName(newProperty.getName());
- buildConfigProjectProperty.setResourceVersion(newProperty.getResourceVersion());
- buildConfigProjectProperty.setBuildRunPolicy(newProperty.getBuildRunPolicy());
- } else {
- job.addProperty(new BuildConfigProjectProperty(buildConfig));
- }
- return existingBuildRunPolicy;
- }
+ public JobProcessor(BuildConfig buildConfig) {
+ this.buildConfig = buildConfig;
+ }
+
+ @Override
+ public Void call() throws Exception {
+ Jenkins activeInstance = Jenkins.getActiveInstance();
+ ItemGroup parent = activeInstance;
+
+ String jobName = jenkinsJobName(buildConfig);
+ String jobFullName = jenkinsJobFullName(buildConfig);
+ WorkflowJob job = getJobFromBuildConfig(buildConfig);
+
+ if (job == null) {
+ job = (WorkflowJob) activeInstance.getItemByFullName(jobFullName);
+ }
+ boolean newJob = job == null;
+
+ if (newJob) {
+ String disableOn = getAnnotation(buildConfig, DISABLE_SYNC_CREATE);
+ if (disableOn != null && disableOn.length() > 0) {
+ logger.fine("Not creating missing jenkins job " + jobFullName + " due to annotation: "
+ + DISABLE_SYNC_CREATE);
+ return null;
+ }
+ parent = getFullNameParent(activeInstance, jobFullName, getNamespace(buildConfig));
+ job = new WorkflowJob(parent, jobName);
+ }
+ BulkChange bulkJob = new BulkChange(job);
+
+ job.setDisplayName(jenkinsJobDisplayName(buildConfig));
+
+ FlowDefinition flowFromBuildConfig = mapBuildConfigToFlow(buildConfig);
+ if (flowFromBuildConfig == null) {
+ return null;
+ }
+ Map paramMap = createOrUpdateJob(activeInstance, parent, jobName, job, newJob,
+ flowFromBuildConfig);
+ bulkJob.commit();
+ populateNamespaceFolder(activeInstance, parent, jobName, job, paramMap);
+ return null;
+ }
+
+ private void populateNamespaceFolder(Jenkins activeInstance, ItemGroup parent, String jobName, WorkflowJob job,
+ Map paramMap) throws IOException, AbortException {
+ String fullName = job.getFullName();
+ WorkflowJob workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class);
+ if (workflowJob == null && parent instanceof Folder) {
+ // we should never need this but just in
+ // case there's an
+ // odd timing issue or something...
+ Folder folder = (Folder) parent;
+ folder.add(job, jobName);
+ workflowJob = activeInstance.getItemByFullName(fullName, WorkflowJob.class);
+
+ }
+ if (workflowJob == null) {
+ logger.warning("Could not find created job " + fullName + " for BuildConfig: " + getNamespace(buildConfig)
+ + "/" + getName(buildConfig));
+ } else {
+ JenkinsUtils.verifyEnvVars(paramMap, workflowJob, buildConfig);
+ putJobWithBuildConfig(workflowJob, buildConfig);
+ }
+ }
+
+ private Map createOrUpdateJob(Jenkins activeInstance, ItemGroup parent, String jobName,
+ WorkflowJob job, boolean newJob, FlowDefinition flowFromBuildConfig) throws IOException {
+ job.setDefinition(flowFromBuildConfig);
+
+ String existingBuildRunPolicy = null;
+
+ BuildConfigProjectProperty buildConfigProjectProperty = job.getProperty(BuildConfigProjectProperty.class);
+ existingBuildRunPolicy = populateBCProjectProperty(job, existingBuildRunPolicy, buildConfigProjectProperty);
+
+ // (re)populate job param list with any envs
+ // from the build config
+ Map paramMap = JenkinsUtils.addJobParamForBuildEnvs(job,
+ buildConfig.getSpec().getStrategy().getJenkinsPipelineStrategy(), true);
+
+ job.setConcurrentBuild(!(buildConfig.getSpec().getRunPolicy().equals(SERIAL)
+ || buildConfig.getSpec().getRunPolicy().equals(SERIAL_LATEST_ONLY)));
+
+ InputStream jobStream = new StringInputStream(new XStream2().toXML(job));
+
+ if (newJob) {
+ try {
+ if (parent instanceof Folder) {
+ Folder folder = (Folder) parent;
+ folder.createProjectFromXML(jobName, jobStream).save();
+ } else {
+ activeInstance.createProjectFromXML(jobName, jobStream).save();
+ }
+
+ logger.info("Created job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig)
+ + " with revision: " + buildConfig.getMetadata().getResourceVersion());
+
+ String autostart = getAnnotation(buildConfig, AUTOSTART);
+ if (Boolean.parseBoolean(autostart)) {
+ logger.info("Automatically starting job " + jobName + " from BuildConfig "
+ + NamespaceName.create(buildConfig) + " with revision: "
+ + buildConfig.getMetadata().getResourceVersion());
+ job.scheduleBuild2(0);
+ }
+ } catch (IllegalArgumentException e) {
+ // see
+ // https://github.com/openshift/jenkins-sync-plugin/issues/117,
+ // jenkins might reload existing jobs on
+ // startup between the
+ // newJob check above and when we make
+ // the createProjectFromXML call; if so,
+ // retry as an update
+ updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty);
+ logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig)
+ + " with revision: " + buildConfig.getMetadata().getResourceVersion());
+ }
+ } else {
+ updateJob(job, jobStream, existingBuildRunPolicy, buildConfigProjectProperty);
+ logger.info("Updated job " + jobName + " from BuildConfig " + NamespaceName.create(buildConfig)
+ + " with revision: " + buildConfig.getMetadata().getResourceVersion());
+ }
+ return paramMap;
+ }
+
+ private String populateBCProjectProperty(WorkflowJob job, String existingBuildRunPolicy,
+ BuildConfigProjectProperty buildConfigProjectProperty) throws IOException {
+ if (buildConfigProjectProperty != null) {
+ existingBuildRunPolicy = buildConfigProjectProperty.getBuildRunPolicy();
+ long updatedBCResourceVersion = parseResourceVersion(buildConfig);
+ long oldBCResourceVersion = parseResourceVersion(buildConfigProjectProperty.getResourceVersion());
+ BuildConfigProjectProperty newProperty = new BuildConfigProjectProperty(buildConfig);
+ if (updatedBCResourceVersion <= oldBCResourceVersion
+ && newProperty.getUid().equals(buildConfigProjectProperty.getUid())
+ && newProperty.getNamespace().equals(buildConfigProjectProperty.getNamespace())
+ && newProperty.getName().equals(buildConfigProjectProperty.getName())
+ && newProperty.getBuildRunPolicy().equals(buildConfigProjectProperty.getBuildRunPolicy())) {
+ return null;
+ }
+ buildConfigProjectProperty.setUid(newProperty.getUid());
+ buildConfigProjectProperty.setNamespace(newProperty.getNamespace());
+ buildConfigProjectProperty.setName(newProperty.getName());
+ buildConfigProjectProperty.setResourceVersion(newProperty.getResourceVersion());
+ buildConfigProjectProperty.setBuildRunPolicy(newProperty.getBuildRunPolicy());
+ } else {
+ job.addProperty(new BuildConfigProjectProperty(buildConfig));
+ }
+ return existingBuildRunPolicy;
+ }
}
\ No newline at end of file
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java b/src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java
new file mode 100644
index 000000000..ca9bd080a
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/Lifecyclable.java
@@ -0,0 +1,7 @@
+package io.fabric8.jenkins.openshiftsync;
+
+public interface Lifecyclable {
+ public void stop();
+ public void start();
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftTokenCredentials.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftTokenCredentials.java
new file mode 100644
index 000000000..3d3f2ff4d
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftTokenCredentials.java
@@ -0,0 +1,38 @@
+package io.fabric8.jenkins.openshiftsync;
+
+import com.cloudbees.plugins.credentials.CredentialsScope;
+import com.cloudbees.plugins.credentials.impl.BaseStandardCredentials;
+import hudson.Extension;
+import hudson.util.Secret;
+import org.kohsuke.stapler.DataBoundConstructor;
+
+// TODO: merge with https://github.com/jenkinsci/kubernetes-plugin/blob/master/src/main/java/org/csanchez/jenkins/plugins/kubernetes/OpenShiftTokenCredentialImpl.java ?
+public class OpenShiftTokenCredentials extends BaseStandardCredentials {
+
+ private final Secret secret;
+
+ @DataBoundConstructor
+ public OpenShiftTokenCredentials(CredentialsScope scope, String id,
+ String description, Secret secret) {
+ super(scope, id, description);
+ this.secret = secret;
+ }
+
+ public String getToken() {
+ return secret.getPlainText();
+ }
+
+ public Secret getSecret() {
+ return secret;
+ }
+
+ @Extension
+ public static class DescriptorImpl extends
+ BaseStandardCredentialsDescriptor {
+ @Override
+ public String getDisplayName() {
+ return "OpenShift Token for OpenShift Client Plugin";
+ }
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java
index 191667fba..6170b7de8 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/OpenShiftUtils.java
@@ -15,15 +15,44 @@
*/
package io.fabric8.jenkins.openshiftsync;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING;
+import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING;
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_DEFAULT_NAMESPACE;
+import static java.util.logging.Level.FINE;
+import static java.util.logging.Level.INFO;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.builder.ReflectionToStringBuilder;
+import org.apache.tools.ant.filters.StringInputStream;
+import org.joda.time.DateTime;
+import org.joda.time.format.DateTimeFormatter;
+import org.joda.time.format.ISODateTimeFormat;
+
import com.cloudbees.hudson.plugins.folder.Folder;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-import hudson.model.ItemGroup;
import hudson.BulkChange;
import hudson.model.Item;
+import hudson.model.ItemGroup;
import hudson.util.XStream2;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.ObjectMeta;
@@ -33,7 +62,9 @@
import io.fabric8.kubernetes.api.model.ServiceSpec;
import io.fabric8.kubernetes.client.Config;
import io.fabric8.kubernetes.client.Version;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
import io.fabric8.openshift.api.model.Build;
+import io.fabric8.openshift.api.model.BuildBuilder;
import io.fabric8.openshift.api.model.BuildConfig;
import io.fabric8.openshift.api.model.BuildConfigSpec;
import io.fabric8.openshift.api.model.BuildSource;
@@ -46,47 +77,20 @@
import io.fabric8.openshift.client.OpenShiftClient;
import io.fabric8.openshift.client.OpenShiftConfigBuilder;
import jenkins.model.Jenkins;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.tools.ant.filters.StringInputStream;
-import org.joda.time.DateTime;
-import org.joda.time.format.DateTimeFormatter;
-import org.joda.time.format.ISODateTimeFormat;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.nio.charset.StandardCharsets;
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import static io.fabric8.jenkins.openshiftsync.BuildPhases.NEW;
-import static io.fabric8.jenkins.openshiftsync.BuildPhases.PENDING;
-import static io.fabric8.jenkins.openshiftsync.BuildPhases.RUNNING;
-import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_DEFAULT_NAMESPACE;
-import static java.util.logging.Level.FINE;
+import okhttp3.Dispatcher;
/**
*/
public class OpenShiftUtils {
- private final static Logger logger = Logger.getLogger(OpenShiftUtils.class
- .getName());
+ private final static Logger logger = Logger.getLogger(OpenShiftUtils.class.getName());
private static OpenShiftClient openShiftClient;
private static String jenkinsPodNamespace = null;
-
+ private static final Jenkins JENKINS_INSTANCE = Jenkins.getInstanceOrNull();
+
static {
- jenkinsPodNamespace = System
- .getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME);
+ jenkinsPodNamespace = System.getProperty(Constants.OPENSHIFT_PROJECT_ENV_VAR_NAME);
if (jenkinsPodNamespace != null && jenkinsPodNamespace.trim().length() > 0) {
jenkinsPodNamespace = jenkinsPodNamespace.trim();
} else {
@@ -119,30 +123,37 @@ public class OpenShiftUtils {
}
}
- private static final DateTimeFormatter dateFormatter = ISODateTimeFormat
- .dateTimeNoMillis();
+ private static final DateTimeFormatter dateFormatter = ISODateTimeFormat.dateTimeNoMillis();
/**
* Initializes an {@link OpenShiftClient}
*
- * @param serverUrl
- * the optional URL of where the OpenShift cluster API server is
- * running
+ * @param serverUrl the optional URL of where the OpenShift cluster API server
+ * is running
+ * @param maxConnections2
*/
- public synchronized static void initializeOpenShiftClient(String serverUrl) {
+ public synchronized static void initializeOpenShiftClient(String serverUrl, int maxConnections) {
+ if (openShiftClient != null) {
+ logger.log(INFO, "Closing already initialized openshift client");
+ openShiftClient.close();
+ }
OpenShiftConfigBuilder configBuilder = new OpenShiftConfigBuilder();
if (serverUrl != null && !serverUrl.isEmpty()) {
configBuilder.withMasterUrl(serverUrl);
}
Config config = configBuilder.build();
- config.setUserAgent("openshift-sync-plugin-"
- + Jenkins.getInstance().getPluginManager()
- .getPlugin("openshift-sync").getVersion() + "/fabric8-"
- + Version.clientVersion());
+ logger.log(INFO, "Current OpenShift Client Configuration: " + ReflectionToStringBuilder.toString(config));
+
+ String version = JENKINS_INSTANCE.getPluginManager().getPlugin("openshift-sync").getVersion();
+ config.setUserAgent("openshift-sync-plugin-" + version + "/fabric8-" + Version.clientVersion());
openShiftClient = new DefaultOpenShiftClient(config);
- DefaultOpenShiftClient defClient = (DefaultOpenShiftClient)openShiftClient;
- defClient.getHttpClient().dispatcher().setMaxRequestsPerHost(100);
- defClient.getHttpClient().dispatcher().setMaxRequests(100);
+ logger.log(INFO, "New OpenShift client initialized: " + openShiftClient);
+
+ DefaultOpenShiftClient defClient = (DefaultOpenShiftClient) openShiftClient;
+ Dispatcher dispatcher = defClient.getHttpClient().dispatcher();
+// int maxConnections = 100;//GlobalPluginConfiguration.get().getMaxConnections();
+ dispatcher.setMaxRequestsPerHost(maxConnections);
+ dispatcher.setMaxRequests(maxConnections);
}
public synchronized static OpenShiftClient getOpenShiftClient() {
@@ -152,30 +163,60 @@ public synchronized static OpenShiftClient getOpenShiftClient() {
// Get the current OpenShiftClient and configure to use the current Oauth
// token.
public synchronized static OpenShiftClient getAuthenticatedOpenShiftClient() {
+ if (openShiftClient == null) {
+ GlobalPluginConfiguration config = GlobalPluginConfiguration.get();
+ initializeOpenShiftClient(config.getServer(), config.getMaxConnections());
+ }
if (openShiftClient != null) {
String token = CredentialsUtils.getCurrentToken();
if (token.length() > 0) {
openShiftClient.getConfiguration().setOauthToken(token);
}
}
-
return openShiftClient;
}
+ public static SharedInformerFactory getInformerFactory() {
+ return getAuthenticatedOpenShiftClient().informers();
+/* if (factory == null) {
+ synchronized (lock) {
+ factory = getAuthenticatedOpenShiftClient().informers();
+ }
+ }
+ return factory;*/
+ }
+
public synchronized static void shutdownOpenShiftClient() {
+ logger.info("Stopping openshift client: " + openShiftClient);
if (openShiftClient != null) {
+
+ // All this stuff is done by openShiftClient.close();
+
+// DefaultOpenShiftClient client = (DefaultOpenShiftClient) openShiftClient;
+// Dispatcher dispatcher = client.getHttpClient().dispatcher();
+// ExecutorService executorService = dispatcher.executorService();
+// try {
+// dispatcher.cancelAll();
+// client.getHttpClient().connectionPool().evictAll();
+// //TODO Akram: shutting donw the executorService prevents other informers to re-attach to it.
+// executorService.shutdown();
+// TimeUnit.SECONDS.sleep(1);
+// } catch (Exception e) {
+// logger.warning("Error while stopping executor thread");
+// executorService.shutdownNow();
+// }
openShiftClient.close();
openShiftClient = null;
+// factory = null;
}
}
/**
* Checks if a {@link BuildConfig} relates to a Jenkins build
*
- * @param bc
- * the BuildConfig
- * @return true if this is an OpenShift BuildConfig which should be mirrored
- * to a Jenkins Job
+ * @param bc the BuildConfig
+ * @return true if this is an OpenShift BuildConfig which should be mirrored to
+ * a Jenkins Job
*/
public static boolean isPipelineStrategyBuildConfig(BuildConfig bc) {
if (BuildConfigToJobMapper.JENKINS_PIPELINE_BUILD_STRATEGY
@@ -206,8 +247,7 @@ public static boolean isPipelineStrategyBuild(Build b) {
logger.warning("bad input, null strategy: " + b);
return false;
}
- if (BuildConfigToJobMapper.JENKINS_PIPELINE_BUILD_STRATEGY
- .equalsIgnoreCase(b.getSpec().getStrategy().getType())
+ if (BuildConfigToJobMapper.JENKINS_PIPELINE_BUILD_STRATEGY.equalsIgnoreCase(b.getSpec().getStrategy().getType())
&& b.getSpec().getStrategy().getJenkinsPipelineStrategy() != null) {
return true;
}
@@ -217,8 +257,7 @@ public static boolean isPipelineStrategyBuild(Build b) {
/**
* Finds the Jenkins job name for the given {@link BuildConfig}.
*
- * @param bc
- * the BuildConfig
+ * @param bc the BuildConfig
* @return the jenkins job name for the given BuildConfig
*/
public static String jenkinsJobName(BuildConfig bc) {
@@ -231,10 +270,9 @@ public static String jenkinsJobName(BuildConfig bc) {
/**
* Creates the Jenkins Job name for the given buildConfigName
*
- * @param namespace
- * the namespace of the build
- * @param buildConfigName
- * the name of the {@link BuildConfig} in in the namespace
+ * @param namespace the namespace of the build
+ * @param buildConfigName the name of the {@link BuildConfig} in in the
+ * namespace
* @return the jenkins job name for the given namespace and name
*/
public static String jenkinsJobName(String namespace, String buildConfigName) {
@@ -245,8 +283,7 @@ public static String jenkinsJobName(String namespace, String buildConfigName) {
* Finds the full jenkins job path including folders for the given
* {@link BuildConfig}.
*
- * @param bc
- * the BuildConfig
+ * @param bc the BuildConfig
* @return the jenkins job name for the given BuildConfig
*/
public static String jenkinsJobFullName(BuildConfig bc) {
@@ -255,22 +292,22 @@ public static String jenkinsJobFullName(BuildConfig bc) {
return jobName;
}
if (GlobalPluginConfiguration.get().getFoldersEnabled()) {
- return getNamespace(bc) + "/" + jenkinsJobName(getNamespace(bc), getName(bc));
+ return getNamespace(bc) + "/" + jenkinsJobName(getNamespace(bc), getName(bc));
} else {
- return getName(bc);
+ return getName(bc);
}
}
/**
* Returns the parent for the given item full name or default to the active
* jenkins if it does not exist
+ *
* @param activeJenkins the active Jenkins instance
- * @param fullName the full name of the instance
- * @param namespace the namespace where the instance runs
+ * @param fullName the full name of the instance
+ * @param namespace the namespace where the instance runs
* @return and ItemGroup representing the full parent
*/
- public static ItemGroup getFullNameParent(Jenkins activeJenkins,
- String fullName, String namespace) {
+ public static ItemGroup getFullNameParent(Jenkins activeJenkins, String fullName, String namespace) {
int idx = fullName.lastIndexOf('/');
if (idx > 0) {
String parentFullName = fullName.substring(0, idx);
@@ -282,25 +319,21 @@ public static ItemGroup getFullNameParent(Jenkins activeJenkins,
// lets lazily create a new folder for this namespace parent
Folder folder = new Folder(activeJenkins, namespace);
try {
- folder.setDescription("Folder for the OpenShift project: "
- + namespace);
+ folder.setDescription("Folder for the OpenShift project: " + namespace);
} catch (IOException e) {
// ignore
}
BulkChange bk = new BulkChange(folder);
- InputStream jobStream = new StringInputStream(
- new XStream2().toXML(folder));
+ InputStream jobStream = new StringInputStream(new XStream2().toXML(folder));
try {
- activeJenkins.createProjectFromXML(namespace, jobStream)
- .save();
+ activeJenkins.createProjectFromXML(namespace, jobStream).save();
} catch (IOException e) {
logger.warning("Failed to create the Folder: " + namespace);
}
try {
bk.commit();
} catch (IOException e) {
- logger.warning("Failed to commit toe BulkChange for the Folder: "
- + namespace);
+ logger.warning("Failed to commit toe BulkChange for the Folder: " + namespace);
}
// lets look it up again to be sure
parent = activeJenkins.getItemByFullName(namespace);
@@ -315,8 +348,7 @@ public static ItemGroup getFullNameParent(Jenkins activeJenkins,
/**
* Finds the Jenkins job display name for the given {@link BuildConfig}.
*
- * @param bc
- * the BuildConfig
+ * @param bc the BuildConfig
* @return the jenkins job display name for the given BuildConfig
*/
public static String jenkinsJobDisplayName(BuildConfig bc) {
@@ -328,14 +360,12 @@ public static String jenkinsJobDisplayName(BuildConfig bc) {
/**
* Creates the Jenkins Job display name for the given buildConfigName
*
- * @param namespace
- * the namespace of the build
- * @param buildConfigName
- * the name of the {@link BuildConfig} in in the namespace
+ * @param namespace the namespace of the build
+ * @param buildConfigName the name of the {@link BuildConfig} in in the
+ * namespace
* @return the jenkins job display name for the given namespace and name
*/
- public static String jenkinsJobDisplayName(String namespace,
- String buildConfigName) {
+ public static String jenkinsJobDisplayName(String namespace, String buildConfigName) {
return namespace + "/" + buildConfigName;
}
@@ -343,27 +373,21 @@ public static String jenkinsJobDisplayName(String namespace,
* Gets the current namespace running Jenkins inside or returns a reasonable
* default
*
- * @param configuredNamespaces
- * the optional configured namespace(s)
- * @param client
- * the OpenShift client
+ * @param configuredNamespaces the optional configured namespace(s)
+ * @param client the OpenShift client
* @return the default namespace using either the configuration value, the
* default namespace on the client or "default"
*/
- public static String[] getNamespaceOrUseDefault(
- String[] configuredNamespaces, OpenShiftClient client) {
+ public static String[] getNamespaceOrUseDefault(String[] configuredNamespaces, OpenShiftClient client) {
String[] namespaces = configuredNamespaces;
if (namespaces != null) {
for (int i = 0; i < namespaces.length; i++) {
- if (namespaces[i].startsWith("${")
- && namespaces[i].endsWith("}")) {
- String envVar = namespaces[i].substring(2,
- namespaces[i].length() - 1);
+ if (namespaces[i].startsWith("${") && namespaces[i].endsWith("}")) {
+ String envVar = namespaces[i].substring(2, namespaces[i].length() - 1);
namespaces[i] = System.getenv(envVar);
if (StringUtils.isBlank(namespaces[i])) {
- logger.warning("No value defined for namespace environment variable `"
- + envVar + "`");
+ logger.warning("No value defined for namespace environment variable `" + envVar + "`");
}
}
}
@@ -380,30 +404,22 @@ public static String[] getNamespaceOrUseDefault(
/**
* Returns the public URL of the given service
*
- * @param openShiftClient
- * the OpenShiftClient to use
- * @param defaultProtocolText
- * the protocol text part of a URL such as http://
- * @param namespace
- * the Kubernetes namespace
- * @param serviceName
- * the service name
+ * @param openShiftClient the OpenShiftClient to use
+ * @param defaultProtocolText the protocol text part of a URL such as
+ * http://
+ * @param namespace the Kubernetes namespace
+ * @param serviceName the service name
* @return the external URL of the service
*/
- public static String getExternalServiceUrl(OpenShiftClient openShiftClient,
- String defaultProtocolText, String namespace, String serviceName) {
+ public static String getExternalServiceUrl(OpenShiftClient openShiftClient, String defaultProtocolText,
+ String namespace, String serviceName) {
if (namespace != null && serviceName != null) {
try {
- RouteList routes = openShiftClient.routes()
- .inNamespace(namespace).list();
+ RouteList routes = openShiftClient.routes().inNamespace(namespace).list();
for (Route route : routes.getItems()) {
RouteSpec spec = route.getSpec();
- if (spec != null
- && spec.getTo() != null
- && "Service".equalsIgnoreCase(spec.getTo()
- .getKind())
- && serviceName.equalsIgnoreCase(spec.getTo()
- .getName())) {
+ if (spec != null && spec.getTo() != null && "Service".equalsIgnoreCase(spec.getTo().getKind())
+ && serviceName.equalsIgnoreCase(spec.getTo().getName())) {
String host = spec.getHost();
if (host != null && host.length() > 0) {
if (spec.getTls() != null) {
@@ -414,13 +430,12 @@ public static String getExternalServiceUrl(OpenShiftClient openShiftClient,
}
}
} catch (Exception e) {
- logger.log(Level.WARNING, "Could not find Route for service "
- + namespace + "/" + serviceName + ". " + e, e);
+ logger.log(Level.WARNING,
+ "Could not find Route for service " + namespace + "/" + serviceName + ". " + e, e);
}
// lets try the portalIP instead
try {
- Service service = openShiftClient.services()
- .inNamespace(namespace).withName(serviceName).get();
+ Service service = openShiftClient.services().inNamespace(namespace).withName(serviceName).get();
if (service != null) {
ServiceSpec spec = service.getSpec();
if (spec != null) {
@@ -431,8 +446,8 @@ public static String getExternalServiceUrl(OpenShiftClient openShiftClient,
}
}
} catch (Exception e) {
- logger.log(Level.WARNING, "Could not find Route for service "
- + namespace + "/" + serviceName + ". " + e, e);
+ logger.log(Level.WARNING,
+ "Could not find Route for service " + namespace + "/" + serviceName + ". " + e, e);
}
}
@@ -443,14 +458,11 @@ public static String getExternalServiceUrl(OpenShiftClient openShiftClient,
/**
* Calculates the external URL to access Jenkins
*
- * @param namespace
- * the namespace Jenkins is runing inside
- * @param openShiftClient
- * the OpenShift client
+ * @param namespace the namespace Jenkins is runing inside
+ * @param openShiftClient the OpenShift client
* @return the external URL to access Jenkins
*/
- public static String getJenkinsURL(OpenShiftClient openShiftClient,
- String namespace) {
+ public static String getJenkinsURL(OpenShiftClient openShiftClient, String namespace) {
// if the user has explicitly configured the jenkins root URL, use it
String rootUrl = Jenkins.getInstance().getRootUrl();
if (StringUtils.isNotEmpty(rootUrl)) {
@@ -461,8 +473,7 @@ public static String getJenkinsURL(OpenShiftClient openShiftClient,
// the service/route
// TODO we will eventually make the service name configurable, with the
// default of "jenkins"
- return getExternalServiceUrl(openShiftClient, "http://", namespace,
- "jenkins");
+ return getExternalServiceUrl(openShiftClient, "http://", namespace, "jenkins");
}
public static String getNamespacefromPodInputs() {
@@ -472,15 +483,11 @@ public static String getNamespacefromPodInputs() {
/**
* Lazily creates the GitSource if need be then updates the git URL
*
- * @param buildConfig
- * the BuildConfig to update
- * @param gitUrl
- * the URL to the git repo
- * @param ref
- * the git ref (commit/branch/etc) for the build
+ * @param buildConfig the BuildConfig to update
+ * @param gitUrl the URL to the git repo
+ * @param ref the git ref (commit/branch/etc) for the build
*/
- public static void updateGitSourceUrl(BuildConfig buildConfig,
- String gitUrl, String ref) {
+ public static void updateGitSourceUrl(BuildConfig buildConfig, String gitUrl, String ref) {
BuildConfigSpec spec = buildConfig.getSpec();
if (spec == null) {
spec = new BuildConfigSpec();
@@ -502,26 +509,22 @@ public static void updateGitSourceUrl(BuildConfig buildConfig,
}
public static void updateOpenShiftBuildPhase(Build build, String phase) {
- logger.log(FINE, "setting build to {0} in namespace {1}/{2}",
- new Object[] { phase, build.getMetadata().getNamespace(),
- build.getMetadata().getName() });
- getAuthenticatedOpenShiftClient().builds()
- .inNamespace(build.getMetadata().getNamespace())
- .withName(build.getMetadata().getName()).edit().editStatus()
- .withPhase(phase).endStatus().done();
+ String ns = build.getMetadata().getNamespace();
+ String name = build.getMetadata().getName();
+ logger.log(FINE, "setting build to {0} in namespace {1}/{2}", new Object[] { phase, ns, name });
+
+ BuildBuilder builder = new BuildBuilder(build).editStatus().withPhase(phase).endStatus();
+ getAuthenticatedOpenShiftClient().builds().inNamespace(ns).withName(name).edit(b -> builder.build());
}
/**
* Maps a Jenkins Job name to an ObjectShift BuildConfig name
*
* @return the namespaced name for the BuildConfig
- * @param jobName
- * the job to associate to a BuildConfig name
- * @param namespace
- * the default namespace that Jenkins is running inside
+ * @param jobName the job to associate to a BuildConfig name
+ * @param namespace the default namespace that Jenkins is running inside
*/
- public static NamespaceName buildConfigNameFromJenkinsJobName(
- String jobName, String namespace) {
+ public static NamespaceName buildConfigNameFromJenkinsJobName(String jobName, String namespace) {
// TODO lets detect the namespace separator in the jobName for cases
// where a jenkins is used for
// BuildConfigs in multiple namespaces?
@@ -548,35 +551,28 @@ public static long parseTimestamp(String timestamp) {
return dateFormatter.parseMillis(timestamp);
}
- public static boolean isResourceWithoutStateEqual(HasMetadata oldObj,
- HasMetadata newObj) {
+ public static boolean isResourceWithoutStateEqual(HasMetadata oldObj, HasMetadata newObj) {
try {
- byte[] oldDigest = MessageDigest.getInstance("MD5").digest(
- dumpWithoutRuntimeStateAsYaml(oldObj).getBytes(
- StandardCharsets.UTF_8));
- byte[] newDigest = MessageDigest.getInstance("MD5").digest(
- dumpWithoutRuntimeStateAsYaml(newObj).getBytes(
- StandardCharsets.UTF_8));
+ byte[] oldDigest = MessageDigest.getInstance("MD5")
+ .digest(dumpWithoutRuntimeStateAsYaml(oldObj).getBytes(StandardCharsets.UTF_8));
+ byte[] newDigest = MessageDigest.getInstance("MD5")
+ .digest(dumpWithoutRuntimeStateAsYaml(newObj).getBytes(StandardCharsets.UTF_8));
return Arrays.equals(oldDigest, newDigest);
} catch (NoSuchAlgorithmException | JsonProcessingException e) {
throw new RuntimeException(e);
}
}
- public static String dumpWithoutRuntimeStateAsYaml(HasMetadata obj)
- throws JsonProcessingException {
+ public static String dumpWithoutRuntimeStateAsYaml(HasMetadata obj) throws JsonProcessingException {
ObjectMapper statelessMapper = new ObjectMapper(new YAMLFactory());
- statelessMapper.addMixInAnnotations(ObjectMeta.class,
- ObjectMetaMixIn.class);
- statelessMapper.addMixInAnnotations(ReplicationController.class,
- StatelessReplicationControllerMixIn.class);
+ statelessMapper.addMixInAnnotations(ObjectMeta.class, ObjectMetaMixIn.class);
+ statelessMapper.addMixInAnnotations(ReplicationController.class, StatelessReplicationControllerMixIn.class);
return statelessMapper.writeValueAsString(obj);
}
public static boolean isCancellable(BuildStatus buildStatus) {
String phase = buildStatus.getPhase();
- return phase.equals(NEW) || phase.equals(PENDING)
- || phase.equals(RUNNING);
+ return phase.equals(NEW) || phase.equals(PENDING) || phase.equals(RUNNING);
}
public static boolean isNew(BuildStatus buildStatus) {
@@ -584,8 +580,7 @@ public static boolean isNew(BuildStatus buildStatus) {
}
public static boolean isCancelled(BuildStatus status) {
- return status != null && status.getCancelled() != null
- && Boolean.TRUE.equals(status.getCancelled());
+ return status != null && status.getCancelled() != null && Boolean.TRUE.equals(status.getCancelled());
}
/**
@@ -626,8 +621,7 @@ public static String getAnnotation(HasMetadata resource, String name) {
return null;
}
- public static void addAnnotation(HasMetadata resource, String name,
- String value) {
+ public static void addAnnotation(HasMetadata resource, String name, String value) {
ObjectMeta metadata = resource.getMetadata();
if (metadata == null) {
metadata = new ObjectMeta();
@@ -657,12 +651,11 @@ public static String getName(HasMetadata resource) {
return null;
}
- protected static OpenShiftClient getOpenshiftClient() {
- return getAuthenticatedOpenShiftClient();
- }
+ protected static OpenShiftClient getOpenshiftClient() {
+ return getAuthenticatedOpenShiftClient();
+ }
- abstract class StatelessReplicationControllerMixIn extends
- ReplicationController {
+ abstract class StatelessReplicationControllerMixIn extends ReplicationController {
@JsonIgnore
private ReplicationControllerStatus status;
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PipelineJobListener.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PipelineJobListener.java
index b1ca9a776..1a3e9d8e4 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/PipelineJobListener.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PipelineJobListener.java
@@ -167,7 +167,7 @@ private void upsertItemGroup(ItemGroup itemGroup) {
private void upsertWorkflowJob(WorkflowJob job) {
BuildConfigProjectProperty property = buildConfigProjectForJob(job);
- if (property != null && (!BuildConfigWatcher.isDeleteInProgress(property.getNamespace() + property.getName()))) {
+ if (property != null && (!BuildConfigManager.isDeleteInProgress(property.getNamespace() + property.getName()))) {
logger.info("Upsert WorkflowJob " + job.getName() + " to BuildConfig: " + property.getNamespace() + "/" + property.getName() + " in OpenShift");
upsertBuildConfigForJob(job, property);
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java
index 14a4dbe1d..bc084a24c 100644
--- a/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/PodTemplateUtils.java
@@ -1,6 +1,25 @@
package io.fabric8.jenkins.openshiftsync;
+import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL;
+import static io.fabric8.jenkins.openshiftsync.Constants.IMAGESTREAM_AGENT_LABEL_VALUE;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
+import static java.util.logging.Level.FINE;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+import org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud;
+import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
+import org.csanchez.jenkins.plugins.kubernetes.PodVolumes;
+
import com.thoughtworks.xstream.XStreamException;
+
import hudson.util.XStream2;
import io.fabric8.kubernetes.api.model.ConfigMap;
import io.fabric8.kubernetes.api.model.ObjectMeta;
@@ -11,411 +30,477 @@
import io.fabric8.openshift.api.model.ImageStreamTag;
import io.fabric8.openshift.api.model.TagReference;
import jenkins.model.Jenkins;
-import org.csanchez.jenkins.plugins.kubernetes.KubernetesCloud;
-import org.csanchez.jenkins.plugins.kubernetes.PodTemplate;
-import org.csanchez.jenkins.plugins.kubernetes.PodVolumes;
-import org.csanchez.jenkins.plugins.kubernetes.model.KeyValueEnvVar;
-import org.csanchez.jenkins.plugins.kubernetes.model.TemplateEnvVar;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.logging.Level;
-import java.util.logging.Logger;
-
-import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getAuthenticatedOpenShiftClient;
-import static java.util.logging.Level.FINE;
public class PodTemplateUtils {
- protected static final String cmType = "ConfigMap";
- protected static final String isType = "ImageStream";
- static final String IMAGESTREAM_TYPE = isType;
- private static final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name";
- private static final String PT_NOT_OWNED = "The event for %s | %s | %s that no longer includes the pod template %s was ignored because the type %s was associated with that pod template";
- private static final Logger LOGGER = Logger.getLogger(PodTemplateUtils.class.getName());
- private static final String PARAM_FROM_ENV_DESCRIPTION = "From OpenShift Build Environment Variable";
- static final String SLAVE_LABEL = "slave-label";
- private static final String SPECIAL_IST_PREFIX = "imagestreamtag:";
- private static final int SPECIAL_IST_PREFIX_IDX = SPECIAL_IST_PREFIX.length();
- protected static ConcurrentHashMap> trackedPodTemplates = new ConcurrentHashMap>();
- protected static ConcurrentHashMap podTemplateToApiType = new ConcurrentHashMap();
-
- protected static boolean hasOneAndOnlyOneWithSomethingAfter(String str, String substr) {
- return str.contains(substr)
- && str.indexOf(substr) == str.lastIndexOf(substr)
- && str.indexOf(substr) < str.length();
- }
-
- public static PodTemplate podTemplateInit(String name, String image, String label) {
- LOGGER.info("Initializing PodTemplate: "+name);
- PodTemplate podTemplate = new PodTemplate(image, new ArrayList());
- // with the above ctor guarnateed to have 1 container
- // also still force our image as the special case "jnlp" container for
- // the KubernetesSlave;
- // attempts to use the "jenkinsci/jnlp-slave:alpine" image for a
- // separate jnlp container
- // have proved unsuccessful (could not access gihub.com for example)
- podTemplate.getContainers().get(0).setName("jnlp");
- // podTemplate.setInstanceCap(Integer.MAX_VALUE);
- podTemplate.setName(name);
- podTemplate.setLabel(label);
- podTemplate.setAlwaysPullImage(true);
- podTemplate.setCommand("");
- podTemplate.setArgs("${computer.jnlpmac} ${computer.name}");
- podTemplate.setRemoteFs("/tmp");
- String podName = System.getenv().get("HOSTNAME");
- if (podName != null) {
- Pod pod = getAuthenticatedOpenShiftClient().pods().withName(podName).get();
- if (pod != null) {
- podTemplate.setServiceAccount(pod.getSpec().getServiceAccountName());
- }
+ private static final String MAVEN_POD_TEMPLATE_NAME = "maven";
+ private static final String NODEJS_POD_TEMPLATE_NAME = "nodejs";
+ protected static final String CONFIGMAP = "ConfigMap";
+ protected static final String isType = "ImageStream";
+ static final String IMAGESTREAM_TYPE = isType;
+ private static final String PT_NAME_CLAIMED = "The event for %s | %s | %s that attempts to add the pod template %s was ignored because a %s previously created a pod template with the same name";
+ private static final String PT_NOT_OWNED = "The event for %s | %s | %s that no longer includes the pod template %s was ignored because the type %s was associated with that pod template";
+ private static final Logger LOGGER = Logger.getLogger(PodTemplateUtils.class.getName());
+ private static final String PARAM_FROM_ENV_DESCRIPTION = "From OpenShift Build Environment Variable";
+ static final String SLAVE_LABEL = "slave-label";
+ private static final String SPECIAL_IST_PREFIX = "imagestreamtag:";
+ private static final int SPECIAL_IST_PREFIX_IDX = SPECIAL_IST_PREFIX.length();
+ protected final static ConcurrentHashMap> trackedPodTemplates = new ConcurrentHashMap>();
+ protected static ConcurrentHashMap podTemplateToApiType = new ConcurrentHashMap();
+
+ protected static boolean hasOneAndOnlyOneWithSomethingAfter(String str, String substr) {
+ return str.contains(substr) && str.indexOf(substr) == str.lastIndexOf(substr)
+ && str.indexOf(substr) < str.length();
}
- return podTemplate;
- }
-
-
- public static void removePodTemplate(PodTemplate podTemplate) {
- KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
- if (kubeCloud != null) {
- LOGGER.info("Removing PodTemplate: " + podTemplate.getName());
- // NOTE - PodTemplate does not currently override hashCode, equals,
- // so
- // the KubernetsCloud.removeTemplate currently is broken;
- // kubeCloud.removeTemplate(podTemplate);
- List list = kubeCloud.getTemplates();
- Iterator iter = list.iterator();
- while (iter.hasNext()) {
- PodTemplate pt = iter.next();
- if (pt.getName().equals(podTemplate.getName())) {
- iter.remove();
+
+ public static PodTemplate podTemplateInit(String name, String image, String label) {
+ LOGGER.info("Initializing PodTemplate: " + name);
+ PodTemplate podTemplate = new PodTemplate(image, new ArrayList());
+ // with the above ctor guarnateed to have 1 container
+ // also still force our image as the special case "jnlp" container for
+ // the KubernetesSlave;
+ // attempts to use the "jenkinsci/jnlp-slave:alpine" image for a
+ // separate jnlp container
+ // have proved unsuccessful (could not access gihub.com for example)
+ podTemplate.getContainers().get(0).setName("jnlp");
+ // podTemplate.setInstanceCap(Integer.MAX_VALUE);
+ podTemplate.setName(name);
+ podTemplate.setLabel(label);
+ podTemplate.setAlwaysPullImage(true);
+ podTemplate.setCommand("");
+ podTemplate.setArgs("${computer.jnlpmac} ${computer.name}");
+ podTemplate.setRemoteFs("/tmp");
+ String podName = System.getenv().get("HOSTNAME");
+ if (podName != null) {
+ Pod pod = getAuthenticatedOpenShiftClient().pods().withName(podName).get();
+ if (pod != null) {
+ podTemplate.setServiceAccount(pod.getSpec().getServiceAccountName());
+ }
}
- }
- // now set new list back into cloud
- kubeCloud.setTemplates(list);
- try {
- // pedantic mvn:findbugs
- Jenkins jenkins = Jenkins.getInstance();
- if (jenkins != null)
- jenkins.save();
- } catch (IOException e) {
- LOGGER.log(Level.SEVERE, "removePodTemplate", e);
- }
-
- if (LOGGER.isLoggable(Level.FINE)) {
- LOGGER.fine("PodTemplates now:");
- for (PodTemplate pt : kubeCloud.getTemplates()) {
- LOGGER.fine(pt.getName());
+ return podTemplate;
+ }
+
+ public static void removePodTemplate(PodTemplate podTemplate) {
+ KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
+ if (kubeCloud != null) {
+ String name = podTemplate.getName();
+ String namespace = podTemplate.getNamespace();
+ LOGGER.info("Removing PodTemplate: " + name + " in namespace: " + namespace);
+ // NOTE - PodTemplate does not currently override hashCode, equals,
+ // so the KubernetsCloud.removeTemplate currently is broken;
+ // kubeCloud.removeTemplate(podTemplate);
+ List list = kubeCloud.getTemplates();
+ Iterator iter = list.iterator();
+ while (iter.hasNext()) {
+ PodTemplate pt = iter.next();
+ if (pt.getName().equals(name)) {
+ iter.remove();
+ }
+ }
+ // now set new list back into cloud
+ kubeCloud.setTemplates(list);
+ try {
+ // pedantic mvn:findbugs
+ Jenkins jenkins = Jenkins.getInstance();
+ if (jenkins != null)
+ jenkins.save();
+ } catch (IOException e) {
+ LOGGER.log(Level.SEVERE, "removePodTemplate", e);
+ }
+
+ if (LOGGER.isLoggable(Level.FINE)) {
+ LOGGER.fine("PodTemplates now:");
+ for (PodTemplate pt : kubeCloud.getTemplates()) {
+ LOGGER.fine(pt.getName());
+ }
+ }
}
- }
}
- }
-
- public static synchronized List getPodTemplates() {
- KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
- if (kubeCloud != null) {
- // create copy of list for more flexiblity in loops
- ArrayList list = new ArrayList();
- list.addAll(kubeCloud.getTemplates());
- return list;
- } else {
- return null;
+
+ public static synchronized List getPodTemplates() {
+ KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
+ List list = new ArrayList();
+ if (kubeCloud != null) {
+ // create copy of list for more flexibility in loops
+ list.addAll(kubeCloud.getTemplates());
+ }
+ return list;
}
- }
-
- public static synchronized boolean hasPodTemplate(PodTemplate incomingPod) {
- String name = incomingPod.getName();
- if (name == null)
- return false;
- String image = incomingPod.getImage();
- if (image == null)
- return false;
- KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
- if (kubeCloud != null) {
- List list = kubeCloud.getTemplates();
- for (PodTemplate pod : list) {
- if (name.equals(pod.getName()) && image.equals(pod.getImage()))
- return true;
- }
+
+ @SuppressWarnings("deprecation")
+ public static synchronized boolean hasPodTemplate(PodTemplate podTemplate) {
+ String name = podTemplate.getName();
+ String image = podTemplate.getImage();
+ if (name != null && image != null) {
+ KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
+ if (kubeCloud != null) {
+ List list = kubeCloud.getTemplates();
+ for (PodTemplate pod : list) {
+ if (name.equals(pod.getName()) && image.equals(pod.getImage()))
+ return true;
+ }
+ }
+ }
+ return false;
}
- return false;
- }
-
- public static synchronized void addPodTemplate(PodTemplate podTemplate) {
- // clear out existing template with same name; k8s plugin maintains
- // list, not map
- removePodTemplate(podTemplate);
- KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
- if (kubeCloud != null) {
- LOGGER.info("Adding PodTemplate: " + podTemplate.getName());
- kubeCloud.addTemplate(podTemplate);
- try {
- // pedantic mvn:findbugs
- Jenkins jenkins = Jenkins.getInstance();
- if (jenkins != null)
- jenkins.save();
- } catch (IOException e) {
- LOGGER.log(Level.SEVERE, "addPodTemplate", e);
- }
+
+ public static synchronized void addPodTemplate(PodTemplate podTemplate) {
+ // clear out existing template with same name; k8s plugin maintains
+ // list, not map
+ removePodTemplate(podTemplate);
+ KubernetesCloud kubeCloud = JenkinsUtils.getKubernetesCloud();
+ if (kubeCloud != null) {
+ LOGGER.info("Adding PodTemplate: " + podTemplate.getName());
+ kubeCloud.addTemplate(podTemplate);
+ try {
+ // pedantic mvn:findbugs
+ Jenkins jenkins = Jenkins.getInstance();
+ if (jenkins != null)
+ jenkins.save();
+ } catch (IOException e) {
+ LOGGER.log(Level.SEVERE, "addPodTemplate", e);
+ }
+ }
}
- }
-
- protected static void purgeTemplates(BaseWatcher baseWatcher, String type, String uid, String apiObjName, String namespace) {
- LOGGER.info("Purging PodTemplates for from Configmap with Uid "+uid);
- for (PodTemplate podTemplate : trackedPodTemplates.get(uid)) {
- // we should not have included any pod templates we did not
- // mark the type for, but we'll check just in case
- removePodTemplate(LOGGER, PT_NOT_OWNED, type, apiObjName, namespace, podTemplate);
+
+ protected static void purgeTemplates(String type, String uid, String apiObjName, String namespace) {
+ LOGGER.info("Purging PodTemplates for from Configmap with Uid " + uid);
+ for (PodTemplate podTemplate : trackedPodTemplates.get(uid)) {
+ // we should not have included any pod templates we did not
+ // mark the type for, but we'll check just in case
+ removePodTemplate(type, apiObjName, namespace, podTemplate);
+ }
+ trackedPodTemplates.remove(uid);
}
- trackedPodTemplates.remove(uid);
- }
-
- protected static void updateTrackedPodTemplatesMap(String uid, List finalSlaveList) {
- if (finalSlaveList != null && finalSlaveList.size() > 0)
- trackedPodTemplates.put(uid, finalSlaveList);
- }
-
- // Adds PodTemplate to the List correspoding to the ConfigMap of given uid
- protected static void trackPodTemplates(String uid, List podTemplatesToTrack) {
- trackedPodTemplates.put(uid, podTemplatesToTrack);
- }
-
- // Adds PodTemplate to the List correspoding to the ConfigMap of given uid and Deletes from Jenkins
- protected static List onlyTrackPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, String namespace, List podTemplates, PodTemplate podTemplate) {
- String name = podTemplate.getName();
- // we allow configmap overrides of maven and nodejs, but not imagestream ones
- // as they are less specific/defined wrt podTemplate fields
-
- if (isReservedPodTemplateName(name) && isType.equals(type))
- return null;
- // for imagestreams, if the core image has not changed, we avoid
- // the remove/add pod template churn and multiple imagestream events
- // come in for activity that does not affect the pod template
- if (type.equals(isType) && hasPodTemplate(podTemplate))
- return null;
- // once a CM or IS claims a name, it gets to keep it until it is remove or un-labeled
- String ret = podTemplateToApiType.putIfAbsent(name, type);
- // if not set, or previously set by an obj of the same type
- if (ret == null || ret.equals(type)) {
- removePodTemplate(podTemplate);
- podTemplates.add(podTemplate);
- } else {
- LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret));
+
+ protected static void updateTrackedPodTemplatesMap(String uid, List finalSlaveList) {
+ if (finalSlaveList != null && finalSlaveList.size() > 0)
+ trackedPodTemplates.put(uid, finalSlaveList);
}
- return podTemplates;
- }
-
- // Adds PodTemplate from Jenkins
- protected static void addPodTemplate(BaseWatcher baseWatcher, String type, String apiObjName, String namespace, List podTemplates, PodTemplate podTemplate) {
- String name = podTemplate.getName();
- // we allow configmap overrides of maven and nodejs, but not imagestream ones
- // as they are less specific/defined wrt podTemplate fields
- if (apiObjName != null && namespace != null && podTemplates != null){
- if (isReservedPodTemplateName(name) && isType.equals(type)) {
- return;
- }
- String ret = podTemplateToApiType.putIfAbsent(name, type);
- if (ret == null || ret.equals(type)) {
- addPodTemplate(podTemplate);
- podTemplates.add(podTemplate);
- } else {
- LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret));
- }
- } else {
- podTemplateToApiType.put(name, type);
- addPodTemplate(podTemplate);
+
+ // Adds PodTemplate to the List correspoding to the ConfigMap of
+ // given uid
+ protected static void trackPodTemplates(String uid, List podTemplatesToTrack) {
+ trackedPodTemplates.put(uid, podTemplatesToTrack);
}
- }
-
- // Delete a PodTemplate from Jenkins
- protected static void removePodTemplate(Logger LOGGER, String PT_NOT_OWNED, String type, String apiObjName, String namespace, PodTemplate podTemplate) {
- String name = podTemplate.getName();
- String t = podTemplateToApiType.get(name);
- if (t != null && t.equals(type)) {
- podTemplateToApiType.remove(name);
- removePodTemplate(podTemplate);
- } else {
- LOGGER.info(String.format(PT_NOT_OWNED, type, apiObjName, namespace, name, t));
+
+ // Adds PodTemplate to the List correspoding to the ConfigMap of
+ // given uid and Deletes from Jenkins
+ protected static List onlyTrackPodTemplate(String type, String apiObjName, String namespace,
+ List podTemplates, PodTemplate podTemplate) {
+ String name = podTemplate.getName();
+ // we allow configmap overrides of maven and nodejs, but not imagestream ones
+ // as they are less specific/defined wrt podTemplate fields
+
+ if (isReservedPodTemplateName(name) && isType.equals(type))
+ return null;
+ // for imagestreams, if the core image has not changed, we avoid
+ // the remove/add pod template churn and multiple imagestream events
+ // come in for activity that does not affect the pod template
+ if (type.equals(isType) && hasPodTemplate(podTemplate))
+ return null;
+ // once a CM or IS claims a name, it gets to keep it until it is remove or
+ // un-labeled
+ String ret = podTemplateToApiType.putIfAbsent(name, type);
+ // if not set, or previously set by an obj of the same type
+ if (ret == null || ret.equals(type)) {
+ removePodTemplate(podTemplate);
+ podTemplates.add(podTemplate);
+ } else {
+ LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, ret));
+ }
+ return podTemplates;
}
- }
-
- protected static boolean isReservedPodTemplateName(String name) {
- if (name.equals("maven") || name.equals("nodejs"))
- return true;
- return false;
- }
-
- protected static List getPodTemplatesListFromImageStreams(ImageStream imageStream) {
- List results = new ArrayList();
- // for IS, since we can check labels, check there
- ObjectMeta metadata = imageStream.getMetadata();
- String isName = metadata.getName();
- if (hasSlaveLabelOrAnnotation(metadata.getLabels())) {
- ImageStreamStatus status = imageStream.getStatus();
- String repository = status.getDockerImageRepository();
- Map annotations = metadata.getAnnotations();
- PodTemplate podTemplate = podTemplateFromData(isName, repository, annotations);
- results.add(podTemplate);
+
+ // Adds PodTemplate from Jenkins
+ protected static void addPodTemplate(String type, String apiObjName, String namespace,
+ List podTemplates, PodTemplate podTemplate) {
+ String name = podTemplate.getName();
+ // we allow configmap overrides of maven and nodejs, but not imagestream ones
+ // as they are less specific/defined wrt podTemplate fields
+ if (apiObjName != null && namespace != null && podTemplates != null) {
+ if (isReservedPodTemplateName(name) && isType.equals(type)) {
+ LOGGER.info("PodTemplate " + name + " cannot be added because it has a reserved name...ignoring");
+ return;
+ }
+ String podTemplateAsXmlString = podTemplateToApiType.putIfAbsent(name, type);
+ if (podTemplateAsXmlString == null || podTemplateAsXmlString.equals(type)) {
+ addPodTemplate(podTemplate);
+ podTemplates.add(podTemplate);
+ } else {
+ LOGGER.info(String.format(PT_NAME_CLAIMED, type, apiObjName, namespace, name, podTemplateAsXmlString));
+ }
+ } else {
+ podTemplateToApiType.put(name, type);
+ addPodTemplate(podTemplate);
+ }
}
- results.addAll(extractPodTemplatesFromImageStreamTags(imageStream));
- return results;
- }
-
- protected static List extractPodTemplatesFromImageStreamTags(ImageStream imageStream) {
- // for slave-label, still check annotations
- // since we cannot create watches on ImageStream tags, we have to
- // traverse the tags and look for the slave label
- List results = new ArrayList();
- List tags = imageStream.getSpec().getTags();
- for (TagReference tagRef : tags) {
- addPodTemplateFromImageStreamTag(results, imageStream, tagRef);
+
+ // Delete a PodTemplate from Jenkins
+ protected static void removePodTemplate(String type, String apiObjName, String namespace, PodTemplate podTemplate) {
+ String name = podTemplate.getName();
+ String t = podTemplateToApiType.get(name);
+ if (t != null && t.equals(type)) {
+ podTemplateToApiType.remove(name);
+ removePodTemplate(podTemplate);
+ } else {
+ LOGGER.info(String.format(PT_NOT_OWNED, type, apiObjName, namespace, name, t));
+ }
}
- return results;
- }
-
- protected static void addPodTemplateFromImageStreamTag(List results, ImageStream imageStream, TagReference tagRef) {
- ObjectMeta metadata = imageStream.getMetadata();
- String ns = metadata.getNamespace();
- String isName = metadata.getName();
- ImageStreamTag tag = null;
- try {
- String tagName = isName + ":" + tagRef.getName();
- tag = OpenShiftUtils.getOpenshiftClient().imageStreamTags().inNamespace(ns).withName(tagName).get();
- } catch (Throwable t) {
- LOGGER.log(FINE, "addPodTemplateFromImageStreamTag", t);
+
+ protected static boolean isReservedPodTemplateName(String name) {
+ return (name.equals(MAVEN_POD_TEMPLATE_NAME) || name.equals(NODEJS_POD_TEMPLATE_NAME));
}
- // for ImageStreamTag (IST), we can't set labels directly, but can inherit, so
- // we check annotations (if ImageStreamTag directly updated) and then labels (if
- // inherited from imagestream)
- if (tag != null) {
- ObjectMeta tagMetadata = tag.getMetadata();
- Map tagAnnotations = tagMetadata.getAnnotations();
- String tagName = tagMetadata.getName();
- String tagImageReference = tag.getImage().getDockerImageReference();
- if (hasSlaveLabelOrAnnotation(tagAnnotations)) {
- results.add(podTemplateFromData(tagName, tagImageReference, tagAnnotations));
- } else {
- Map tagLabels = tagMetadata.getLabels();
- if (hasSlaveLabelOrAnnotation(tagLabels)) {
- results.add(podTemplateFromData(tagName, tagImageReference, tagLabels));
+
+ protected static List getPodTemplatesListFromImageStreams(ImageStream imageStream) {
+ List results = new ArrayList();
+ if (imageStream != null) {
+ // for IS, since we can check labels, check there
+ ObjectMeta metadata = imageStream.getMetadata();
+ String isName = metadata.getName();
+ if (hasSlaveLabelOrAnnotation(metadata.getLabels())) {
+ ImageStreamStatus status = imageStream.getStatus();
+ String repository = status.getDockerImageRepository();
+ Map annotations = metadata.getAnnotations();
+ PodTemplate podTemplate = podTemplateFromData(isName, repository, annotations);
+ results.add(podTemplate);
+ }
+ results.addAll(extractPodTemplatesFromImageStreamTags(imageStream));
}
- }
+ return results;
}
- }
-
- protected static PodTemplate podTemplateFromData(String name, String image, Map map) {
- // node, pod names cannot have colons
- String templateName = name.replaceAll(":", ".");
- String label = (map != null && map.containsKey(SLAVE_LABEL)) ? map.get(SLAVE_LABEL) : name;
- return podTemplateInit(templateName, image, label);
- }
-
- // podTemplatesFromConfigMap takes every key from a ConfigMap and tries to
- // create a PodTemplate from the contained
- // XML.
- public static List podTemplatesFromConfigMap(ConfigMapWatcher configMapWatcher, ConfigMap configMap) {
- List results = new ArrayList<>();
- Map data = configMap.getData();
-
- if (!configMapContainsSlave(configMap)) {
- return results;
+
+ protected static List extractPodTemplatesFromImageStreamTags(ImageStream imageStream) {
+ // for slave-label, still check annotations
+ // since we cannot create watches on ImageStream tags, we have to
+ // traverse the tags and look for the slave label
+ List results = new ArrayList();
+ List tags = imageStream.getSpec().getTags();
+ for (TagReference tagRef : tags) {
+ addPodTemplateFromImageStreamTag(results, imageStream, tagRef);
+ }
+ return results;
}
- XStream2 xStream2 = new XStream2();
-
- for (Map.Entry entry : data.entrySet()) {
- Object podTemplate;
- try {
- podTemplate = xStream2.fromXML(entry.getValue());
-
- String warningPrefix = "Content of key '" + entry.getKey()
- + "' in ConfigMap '"
- + configMap.getMetadata().getName();
- if (podTemplate instanceof PodTemplate) {
- PodTemplate pt = (PodTemplate) podTemplate;
-
- String image = pt.getImage();
- try {
- // if requested via special prefix, convert this images
- // entry field, if not already fully qualified, as if
- // it were an IST
- // IST of form [optional_namespace]/imagestreamname:tag
- // checks based on ParseImageStreamTagName in
- // https://github.com/openshift/origin/blob/master/pkg/image/apis/image/helper.go
- if (image.startsWith(SPECIAL_IST_PREFIX)) {
- image = image.substring(SPECIAL_IST_PREFIX_IDX);
- if (image.contains("@")) {
- LOGGER.warning(warningPrefix
- + " the presence of @ implies an image stream image, not an image stream tag, "
- + " so no ImageStreamTag to Docker image reference translation was performed.");
- } else {
- boolean hasNamespace = hasOneAndOnlyOneWithSomethingAfter(image, "/");
- boolean hasTag = hasOneAndOnlyOneWithSomethingAfter(image, ":");
- String namespace = getAuthenticatedOpenShiftClient().getNamespace();
- String isName = image;
- String newImage = null;
- if (hasNamespace) {
- String[] parts = image.split("/");
- namespace = parts[0];
- isName = parts[1];
+ protected static void addPodTemplateFromImageStreamTag(List results, ImageStream imageStream,
+ TagReference tagRef) {
+ ObjectMeta metadata = imageStream.getMetadata();
+ String ns = metadata.getNamespace();
+ String isName = metadata.getName();
+ ImageStreamTag tag = null;
+ try {
+ String tagName = isName + ":" + tagRef.getName();
+ tag = OpenShiftUtils.getOpenshiftClient().imageStreamTags().inNamespace(ns).withName(tagName).get();
+ } catch (Throwable t) {
+ LOGGER.log(FINE, "addPodTemplateFromImageStreamTag", t);
+ }
+ // for ImageStreamTag (IST), we can't set labels directly, but can inherit, so
+ // we check annotations (if ImageStreamTag directly updated) and then labels (if
+ // inherited from imagestream)
+ if (tag != null) {
+ ObjectMeta tagMetadata = tag.getMetadata();
+ Map tagAnnotations = tagMetadata.getAnnotations();
+ String tagName = tagMetadata.getName();
+ String tagImageReference = tag.getImage().getDockerImageReference();
+ if (hasSlaveLabelOrAnnotation(tagAnnotations)) {
+ results.add(podTemplateFromData(tagName, tagImageReference, tagAnnotations));
+ } else {
+ Map tagLabels = tagMetadata.getLabels();
+ if (hasSlaveLabelOrAnnotation(tagLabels)) {
+ results.add(podTemplateFromData(tagName, tagImageReference, tagLabels));
}
- if (hasTag) {
- ImageStreamTag ist = getAuthenticatedOpenShiftClient()
- .imageStreamTags()
- .inNamespace(namespace)
- .withName(isName).get();
- Image imageFromIst = ist.getImage();
- String dockerImageReference = imageFromIst.getDockerImageReference();
-
- if (ist != null && imageFromIst != null && dockerImageReference != null && dockerImageReference.length() > 0) {
- newImage = dockerImageReference;
- LOGGER.fine(String.format("Converting image ref %s as an imagestreamtag %s to fully qualified image %s", image, isName, newImage));
- } else {
- LOGGER.warning(warningPrefix
- + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value, while a valid ImageStreamTag reference,"
- + " produced no valid ImageStreaTag upon lookup,"
- + " so no ImageStreamTag to Docker image reference translation was performed.");
- }
+ }
+ }
+ }
+
+ protected static PodTemplate podTemplateFromData(String name, String image, Map map) {
+ // node, pod names cannot have colons
+ String templateName = name.replaceAll(":", ".");
+ String label = (map != null && map.containsKey(SLAVE_LABEL)) ? map.get(SLAVE_LABEL) : name;
+ return podTemplateInit(templateName, image, label);
+ }
+
+ // podTemplatesFromConfigMap takes every key from a ConfigMap and tries to
+ // create a PodTemplate from the contained
+ // XML.
+ public static List podTemplatesFromConfigMap(ConfigMap configMap) {
+ List results = new ArrayList<>();
+ Map data = configMap.getData();
+
+ if (!configMapContainsSlave(configMap)) {
+ return results;
+ }
+
+ XStream2 xStream2 = new XStream2();
+
+ for (Map.Entry entry : data.entrySet()) {
+ Object podTemplate;
+ try {
+ podTemplate = xStream2.fromXML(entry.getValue());
+
+ String warningPrefix = "Content of key '" + entry.getKey() + "' in ConfigMap '"
+ + configMap.getMetadata().getName();
+ if (podTemplate instanceof PodTemplate) {
+ PodTemplate pt = (PodTemplate) podTemplate;
+
+ String image = pt.getImage();
+ try {
+ // if requested via special prefix, convert this images
+ // entry field, if not already fully qualified, as if
+ // it were an IST
+ // IST of form [optional_namespace]/imagestreamname:tag
+ // checks based on ParseImageStreamTagName in
+ // https://github.com/openshift/origin/blob/master/pkg/image/apis/image/helper.go
+ if (image.startsWith(SPECIAL_IST_PREFIX)) {
+ image = image.substring(SPECIAL_IST_PREFIX_IDX);
+ if (image.contains("@")) {
+ LOGGER.warning(warningPrefix
+ + " the presence of @ implies an image stream image, not an image stream tag, "
+ + " so no ImageStreamTag to Docker image reference translation was performed.");
+ } else {
+ boolean hasNamespace = hasOneAndOnlyOneWithSomethingAfter(image, "/");
+ boolean hasTag = hasOneAndOnlyOneWithSomethingAfter(image, ":");
+ String namespace = getAuthenticatedOpenShiftClient().getNamespace();
+ String isName = image;
+ String newImage = null;
+ if (hasNamespace) {
+ String[] parts = image.split("/");
+ namespace = parts[0];
+ isName = parts[1];
+ }
+ if (hasTag) {
+ ImageStreamTag ist = getAuthenticatedOpenShiftClient().imageStreamTags()
+ .inNamespace(namespace).withName(isName).get();
+ Image imageFromIst = ist.getImage();
+ String dockerImageReference = imageFromIst.getDockerImageReference();
+
+ if (ist != null && imageFromIst != null && dockerImageReference != null
+ && dockerImageReference.length() > 0) {
+ newImage = dockerImageReference;
+ LOGGER.fine(String.format(
+ "Converting image ref %s as an imagestreamtag %s to fully qualified image %s",
+ image, isName, newImage));
+ } else {
+ LOGGER.warning(warningPrefix
+ + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value, while a valid ImageStreamTag reference,"
+ + " produced no valid ImageStreaTag upon lookup,"
+ + " so no ImageStreamTag to Docker image reference translation was performed.");
+ }
+ } else {
+ LOGGER.warning(warningPrefix
+ + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value had no tag indicator,"
+ + " so no ImageStreamTag to Docker image reference translation was performed.");
+ }
+ if (newImage != null) {
+ LOGGER.fine("translated IST ref " + image + " to docker image ref " + newImage);
+ pt.getContainers().get(0).setImage(newImage);
+ }
+ }
+ }
+ } catch (Throwable t) {
+ if (LOGGER.isLoggable(FINE))
+ LOGGER.log(FINE, "podTemplateFromConfigMap", t);
+ }
+ results.add((PodTemplate) podTemplate);
} else {
- LOGGER.warning(warningPrefix
- + " used the 'imagestreamtag:' prefix in the image field, but the subsequent value had no tag indicator,"
- + " so no ImageStreamTag to Docker image reference translation was performed.");
+ LOGGER.warning(warningPrefix + "' is not a PodTemplate");
}
- if (newImage != null) {
- LOGGER.fine("translated IST ref " + image + " to docker image ref " + newImage);
- pt.getContainers().get(0).setImage(newImage);
+ } catch (XStreamException xse) {
+ LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '"
+ + configMap.getMetadata().getName() + "'", xse).getMessage());
+ } catch (Error e) {
+ LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '"
+ + configMap.getMetadata().getName() + "'", e).getMessage());
+ }
+ }
+
+ return results;
+ }
+
+ protected static boolean configMapContainsSlave(ConfigMap configMap) {
+ return hasSlaveLabelOrAnnotation(configMap.getMetadata().getLabels());
+ }
+
+ protected static boolean hasSlaveLabelOrAnnotation(Map map) {
+ return map != null && map.containsKey(IMAGESTREAM_AGENT_LABEL)
+ && map.get(IMAGESTREAM_AGENT_LABEL).equals(IMAGESTREAM_AGENT_LABEL_VALUE);
+ }
+
+ protected static void addAgents(List slaves, String type, String uid, String apiObjName,
+ String namespace) {
+ LOGGER.info("Adding PodTemplate(s) for " + namespace);
+ List finalSlaveList = new ArrayList();
+ for (PodTemplate podTemplate : slaves) {
+ addPodTemplate(type, apiObjName, namespace, finalSlaveList, podTemplate);
+ }
+ updateTrackedPodTemplatesMap(uid, finalSlaveList);
+ }
+
+ protected static void updateAgents(List slaves, String type, String uid, String apiObjName,
+ String namespace) {
+ LOGGER.info("Modifying PodTemplates");
+ boolean alreadyTracked = trackedPodTemplates.containsKey(uid);
+ boolean hasSlaves = slaves.size() > 0; // Configmap has podTemplates
+ if (alreadyTracked) {
+ if (hasSlaves) {
+ // Since the user could have change the immutable image
+ // that a PodTemplate uses, we just
+ // recreate the PodTemplate altogether. This makes it so
+ // that any changes from within
+ // Jenkins is undone.
+
+ // Check if there are new PodTemplates added or removed to the configmap,
+ // if they are, add them to or remove them from trackedPodTemplates
+ List podTemplatesToTrack = new ArrayList();
+ purgeTemplates(type, uid, apiObjName, namespace);
+ for (PodTemplate pt : slaves) {
+ podTemplatesToTrack = PodTemplateUtils.onlyTrackPodTemplate(type, apiObjName, namespace,
+ podTemplatesToTrack, pt);
}
- }
+ updateTrackedPodTemplatesMap(uid, podTemplatesToTrack);
+ for (PodTemplate podTemplate : podTemplatesToTrack) {
+ // still do put here in case this is a new item from the last
+ // update on this ConfigMap/ImageStream
+ addPodTemplate(type, null, null, null, podTemplate);
+ }
+ } else {
+ // The user modified the configMap to no longer be a
+ // jenkins-slave.
+ purgeTemplates(type, uid, apiObjName, namespace);
}
- } catch (Throwable t) {
- if (LOGGER.isLoggable(FINE))
- LOGGER.log(FINE, "podTemplateFromConfigMap", t);
- }
- results.add((PodTemplate) podTemplate);
} else {
- LOGGER.warning(warningPrefix + "' is not a PodTemplate");
+ if (hasSlaves) {
+ List finalSlaveList = new ArrayList();
+ for (PodTemplate podTemplate : slaves) {
+ // The user modified the api obj to be a jenkins-slave
+ addPodTemplate(type, apiObjName, namespace, finalSlaveList, podTemplate);
+ }
+ updateTrackedPodTemplatesMap(uid, finalSlaveList);
+ }
}
- } catch (XStreamException xse) {
- LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '" + configMap.getMetadata().getName() + "'", xse).getMessage());
- } catch (Error e) {
- LOGGER.warning(new IOException("Unable to read key '" + entry.getKey() + "' from ConfigMap '" + configMap.getMetadata().getName() + "'", e).getMessage());
- }
}
- return results;
- }
+ protected static void deleteAgents(List slaves, String type, String uid, String apiObjName,
+ String namespace) {
+ if (trackedPodTemplates.containsKey(uid)) {
+ purgeTemplates(type, uid, apiObjName, namespace);
+ }
+ }
- protected static boolean configMapContainsSlave(ConfigMap configMap) {
- return hasSlaveLabelOrAnnotation(configMap.getMetadata().getLabels());
- }
+ protected static void addPodTemplateFromConfigMap(ConfigMap configMap) {
+ try {
+ String uid = configMap.getMetadata().getUid();
+ if (configMapContainsSlave(configMap) && !trackedPodTemplates.containsKey(uid)) {
+ List templates = podTemplatesFromConfigMap(configMap);
+ trackedPodTemplates.put(uid, templates);
+ for (PodTemplate podTemplate : templates) {
+ LOGGER.info("Adding PodTemplate {}" + podTemplate);
+ addPodTemplate(podTemplate);
+ }
+ }
+ } catch (Exception e) {
+ LOGGER.severe("Failed to update ConfigMap PodTemplates" + e);
+ }
+ }
- protected static boolean hasSlaveLabelOrAnnotation(Map map) {
- if (map != null)
- return map.containsKey("role")
- && map.get("role").equals("jenkins-slave");
- return false;
- }
}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/Resyncable.java b/src/main/java/io/fabric8/jenkins/openshiftsync/Resyncable.java
new file mode 100644
index 000000000..a90017681
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/Resyncable.java
@@ -0,0 +1,5 @@
+package io.fabric8.jenkins.openshiftsync;
+
+public interface Resyncable {
+ public long getResyncPeriodMilliseconds();
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java
new file mode 100644
index 000000000..776f5c577
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretClusterInformer.java
@@ -0,0 +1,140 @@
+/**
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC;
+import static io.fabric8.jenkins.openshiftsync.Constants.VALUE_SECRET_SYNC;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import static java.util.Collections.singletonMap;
+
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.fabric8.kubernetes.client.dsl.base.OperationContext;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+
+public class SecretClusterInformer implements ResourceEventHandler, Lifecyclable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretClusterInformer.class.getName());
+
+ private final static ConcurrentHashMap trackedSecrets = new ConcurrentHashMap();
+
+ private SharedIndexInformer informer;
+ private Set namespaces;
+
+ public SecretClusterInformer(String[] namespaces) {
+ this.namespaces = new HashSet<>(Arrays.asList(namespaces));
+ }
+
+ public int getResyncPeriodMilliseconds() {
+ return 1_000 * GlobalPluginConfiguration.get().getSecretListInterval();
+ }
+
+ public void start() {
+ LOGGER.info("Starting cluster wide secret informer {} !!" + namespaces);
+ LOGGER.debug("listing Secret resources");
+ SharedInformerFactory factory = getInformerFactory();
+ Map labels = singletonMap(OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC, VALUE_SECRET_SYNC);
+ OperationContext withLabels = new OperationContext().withLabels(labels);
+ this.informer = factory.sharedIndexInformerFor(Secret.class, withLabels, getResyncPeriodMilliseconds());
+ informer.addEventHandler(this);
+ factory.startAllRegisteredInformers();
+ LOGGER.info("Secret informer started for namespace: {}" + namespaces);
+// SecretList list = getOpenshiftClient().secrets().inNamespace(namespace).withLabels(labels).list();
+// onInit(list.getItems());
+ }
+
+ public void stop() {
+ LOGGER.info("Stopping informer {} !!" + namespaces);
+ if( this.informer != null ) {
+ this.informer.stop();
+ }
+ }
+
+
+ @Override
+ public void onAdd(Secret obj) {
+ LOGGER.debug("Secret informer received add event for: {}" + obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = metadata.getName();
+ LOGGER.info("Secret informer received add event for: {}" + name);
+ SecretManager.insertOrUpdateCredentialFromSecret(obj);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ @Override
+ public void onUpdate(Secret oldObj, Secret newObj) {
+ LOGGER.debug("Secret informer received update event for: {} to: {}" + oldObj + newObj);
+ if (oldObj != null) {
+ ObjectMeta metadata = oldObj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = metadata.getName();
+ LOGGER.info("Secret informer received update event for: {}", name);
+ SecretManager.updateCredential(newObj);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ @Override
+ public void onDelete(Secret obj, boolean deletedFinalStateUnknown) {
+ LOGGER.debug("Secret informer received delete event for: {}", obj);
+ if (obj != null) {
+ ObjectMeta metadata = obj.getMetadata();
+ String namespace = metadata.getNamespace();
+ if (namespaces.contains(namespace)) {
+ String name = obj.getMetadata().getName();
+ LOGGER.info("Secret informer received delete event for: {}", name);
+ CredentialsUtils.deleteCredential(obj);
+ } else {
+ LOGGER.debug("Received event for a namespace we are not watching: {} ... ignoring", namespace);
+ }
+ }
+ }
+
+ private void onInit(List list) {
+ for (Secret secret : list) {
+ try {
+ if (SecretManager.validSecret(secret) && SecretManager.shouldProcessSecret(secret)) {
+ SecretManager.insertOrUpdateCredentialFromSecret(secret);
+ trackedSecrets.put(secret.getMetadata().getUid(), secret.getMetadata().getResourceVersion());
+ }
+ } catch (Exception e) {
+ LOGGER.error("Failed to update secred", e);
+ }
+ }
+ }
+
+}
diff --git a/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java
new file mode 100644
index 000000000..2318fb00d
--- /dev/null
+++ b/src/main/java/io/fabric8/jenkins/openshiftsync/SecretInformer.java
@@ -0,0 +1,121 @@
+/**
+ * Copyright (C) 2017 Red Hat, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package io.fabric8.jenkins.openshiftsync;
+
+import static io.fabric8.jenkins.openshiftsync.Constants.OPENSHIFT_LABELS_SECRET_CREDENTIAL_SYNC;
+import static io.fabric8.jenkins.openshiftsync.Constants.VALUE_SECRET_SYNC;
+import static io.fabric8.jenkins.openshiftsync.OpenShiftUtils.getInformerFactory;
+import static java.util.Collections.singletonMap;
+
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import io.fabric8.kubernetes.api.model.ObjectMeta;
+import io.fabric8.kubernetes.api.model.Secret;
+import io.fabric8.kubernetes.client.dsl.base.OperationContext;
+import io.fabric8.kubernetes.client.informers.ResourceEventHandler;
+import io.fabric8.kubernetes.client.informers.SharedIndexInformer;
+import io.fabric8.kubernetes.client.informers.SharedInformerFactory;
+
+public class SecretInformer implements ResourceEventHandler, Lifecyclable, Resyncable {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(SecretInformer.class.getName());
+
+ private final static ConcurrentHashMap