Skip to content

Commit

Permalink
Merge pull request #2181 from zhbinary/separate-devops-installation
Browse files Browse the repository at this point in the history
Make we can install devops core,ci,cd separately
  • Loading branch information
ks-ci-bot committed Mar 8, 2023
2 parents 736149d + c1a7e07 commit 265d588
Show file tree
Hide file tree
Showing 8 changed files with 543 additions and 245 deletions.
5 changes: 5 additions & 0 deletions deploy/cluster-configuration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,11 @@ spec:
# resources: {}
devops: # (CPU: 0.47 Core, Memory: 8.6 G) Provide an out-of-the-box CI/CD system based on Jenkins, and automated workflow tools including Source-to-Image & Binary-to-Image.
enabled: false # Enable or disable the KubeSphere DevOps System.
ci:
enabled: false # Enable ci
cd:
enabled: false # Enable cd
type: argocd # Choose cd core, although only argocd is supported.
# resources: {}
jenkinsMemoryLim: 4Gi # Jenkins memory limit.
jenkinsMemoryReq: 2Gi # Jenkins memory request.
Expand Down
4 changes: 2 additions & 2 deletions playbooks/devops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,6 @@
roles:
- kubesphere-defaults
- role: ks-devops
when:
- "status.devops is not defined or status.devops.status is not defined or status.devops.status != 'enabled'"
# when:
# - "status.devops is not defined or status.devops.status is not defined or status.devops.status != 'enabled'"
# - { role: ks-core, when: sonar_health is defined and sonar_health == true }
Binary file modified roles/ks-devops/files/ks-devops/charts/ks-devops-0.1.19.tgz
Binary file not shown.
31 changes: 31 additions & 0 deletions roles/ks-devops/tasks/cd.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
- name: ks-devops | Get Argo CD installation
shell: >
{{ bin_dir }}/kubectl get cc -n kubesphere-system ks-installer -o=jsonpath='{.spec.devops.argocd.installation}'
register: argocd_installation
ignore_errors: true

- name: ks-devops | Upgrading or installing Argo CD
args:
executable: /bin/bash
shell: |
{{ bin_dir }}/helm upgrade --install devops {{ kubesphere_dir }}/ks-devops/charts/argo-cd-4.4.0.tgz \
-n argocd --create-namespace --reuse-values \
-f {{ kubesphere_dir }}/ks-devops/argo-cd-values.yaml
register: argocd_upgrade_result
until: argocd_upgrade_result is succeeded
when: argocd_installation.stdout != "custom"
retries: 3
delay: 10

- name: ks-devops | Importing ks-devops cd status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
--type merge
-p '{"status": {"devops": {"cd": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}'
-n kubesphere-system
register: cc_result
failed_when: "cc_result.stderr and 'Warning' not in cc_result.stderr"
until: cc_result is succeeded
retries: 5
delay: 3
229 changes: 229 additions & 0 deletions roles/ks-devops/tasks/ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
---

- name: Jenkins | Check jenkins status
shell: >
{{ bin_dir }}/kubectl get deploy -n kubesphere-devops-system ks-jenkins
register: jenkins_status
ignore_errors: True

# Get old config
- import_tasks: get_old_config.yaml
when:
- jenkins_status.rc == 0

# Uninstall update center
- name: ks-devops | Checking Jenkins update center status
shell: >
{{ bin_dir }}/kubectl get deploy -n kubesphere-devops-system uc-jenkins-update-center
register: jenkins_uc_status
failed_when: false

- import_tasks: uninstall_update_center.yaml
when:
- jenkins_uc_status.rc == 0

# SonarQube application
- block:
- name: ks-devops | Getting sonarqube host
shell: >
{{ bin_dir }}/kubectl get cm -n kubesphere-system kubesphere-config -o jsonpath='{.data.kubesphere\.yaml}' | grep "sonarQube:" -A 2 | grep "host" | awk '{print $2}'
register: sonarqube_host

- name: ks-devops | Getting sonarqube token
shell: >
{{ bin_dir }}/kubectl get cm -n kubesphere-system kubesphere-config -o jsonpath='{.data.kubesphere\.yaml}' | grep "sonarQube:" -A 2 | grep "token" | awk '{print $2}'
register: sonarqube_token
when:
- devops.sonarqube is not defined

- set_fact:
sonarQubeHost: "{{ sonarqube_host.stdout }}"
sonarQubeToken: "{{ sonarqube_token.stdout }}"
when:
- sonarqube_host is defined and sonarqube_host.stdout is defined and sonarqube_host.stdout != ""
- sonarqube_token is defined and sonarqube_token.stdout is defined and sonarqube_token.stdout != ""

- set_fact:
sonarQubeHost: "{{ devops.sonarqube.externalSonarUrl }}"
sonarQubeToken: "{{ devops.sonarqube.externalSonarToken }}"
when:
- devops.sonarqube is defined
- devops.sonarqube.externalSonarUrl is defined
- devops.sonarqube.externalSonarToken is defined

# DevOps application
- name: ks-devops | Getting KubeSphere JWT secret
register: ks_jwt_secret
shell: >
{{ bin_dir }}/kubectl get secret -n kubesphere-system kubesphere-secret -ojsonpath="{.data.secret}" | base64 -d
- name: ks-devops | Debug KubeSphere JWT secret
debug:
var: ks_jwt_secret

- name: ks-devops | Getting devops installation charts
copy:
src: "{{ item }}"
dest: "{{ kubesphere_dir }}/"
loop:
- ks-devops

- name: ks-devops | Checking existing PVC named ks-jenkins
shell: >
{{ bin_dir }}/kubectl -n kubesphere-devops-system get pvc ks-jenkins -o jsonpath='{.metadata.name}' --ignore-not-found
register: ks_jenkins_pvc

- name: ks-devops | Debug exisiting PVC named ks-jenkins
debug:
var: ks_jenkins_pvc

- name: ks-devops | Getting name of existing PVC named ks-jenkins
set_fact:
KS_JENKINS_PVC: "{{ ks_jenkins_pvc.stdout }}"

- name: ks-devops | Getting Kubernetes Node info
shell: |
kubectl get node -ojson | jq '.items[0].status.nodeInfo.containerRuntimeVersion'
register: container_runtime

- name: ks-devops | Setting container runtime of agent builder
set_fact:
builder_container_runtime: "{{ container_runtime.stdout is search('docker://') | ternary('docker', 'podman') }}"

- name: Get Argo CD namespace
shell: |
ns=$({{ bin_dir }}/kubectl get cc -n kubesphere-system ks-installer -o=jsonpath='{.spec.devops.argocd.namespace}')
echo -n ${ns:-argocd}
register: argocd_namespace
ignore_errors: true

- name: ks-devops | Creating manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kubesphere_dir }}/{{ item.path }}/{{ item.file }}"
with_items:
- { path: ks-devops, file: ks-devops-values.yaml }
- { path: ks-devops, file: argo-cd-values.yaml }

- name: ks-devops | Checking if ks-devops has installed
shell: |
# TODO Rename helm chart name
helm status devops -n kubesphere-devops-system
register: ks_devops_installed
failed_when: false

- name: ks-devops | Cleaning up resources before upgrading or installing ks-devops
import_tasks: cleanup.yaml
# When ks-devops is not installed, clean up some resources
when: ks_devops_installed is not defined or ks_devops_installed.rc != 0

- name: ks-devops | Checking ks-devops Helm Release
shell: >
{{ bin_dir }}/kubectl -n kubesphere-devops-system get secrets --field-selector=type=helm.sh/release.v1 | grep s2i-webhook-server-cert |wc -l
register: helm_release
ignore_errors: True

- name: ks-devops | Convert ks-devops to helm mananged
args:
executable: /bin/bash
shell: >
RESOURCE=$({{ bin_dir }}/kubectl -n {{ item.ns }} get {{ item.kind }} {{ item.resource }} 2> /dev/null | head -n 1);
if [[ "$RESOURCE" != '' ]]; then
{{ bin_dir }}/kubectl -n {{ item.ns }} annotate --overwrite {{ item.kind }} {{ item.resource }} meta.helm.sh/release-name={{ item.release }};
{{ bin_dir }}/kubectl -n {{ item.ns }} annotate --overwrite {{ item.kind }} {{ item.resource }} meta.helm.sh/release-namespace=kubesphere-devops-system;
{{ bin_dir }}/kubectl -n {{ item.ns }} label --overwrite {{ item.kind }} {{ item.resource }} app.kubernetes.io/managed-by=Helm;
fi
loop:
- { ns: "kubesphere-devops-system", kind: "secrets", resource: "s2i-webhook-server-cert", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "configmap", resource: "jenkins-casc-config", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "configmap", resource: "ks-devops-agent", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "clusterrole", resource: "manager-role", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "clusterrole", resource: "proxy-role", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "ClusterRoleBinding", resource: "manager-rolebinding", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "ClusterRoleBinding", resource: "proxy-rolebinding", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "service", resource: "s2ioperator-metrics-service", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "service", resource: "s2i-trigger-service", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "service", resource: "webhook-server-service", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "service", resource: "s2ioperator-trigger-service", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "StatefulSet", resource: "s2ioperator", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "MutatingWebhookConfiguration", resource: "mutating-webhook-configuration", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "ValidatingWebhookConfiguration", resource: "validating-webhook-configuration", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "S2iBuilderTemplate", resource: "binary", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "S2iBuilderTemplate", resource: "java", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "S2iBuilderTemplate", resource: "nodejs", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "S2iBuilderTemplate", resource: "tomcat", release: "devops" }
- { ns: "kubesphere-devops-system", kind: "S2iBuilderTemplate", resource: "python", release: "devops" }
when:
- helm_release.stderr == ""
- helm_release.stdout == "0"
register: source_state
failed_when: "source_state.stderr and 'NotFound' not in source_state.stderr"

- name: ks-devops | Stop the existing Jenkins
shell: |
{{ bin_dir }}/kubectl -n kubesphere-devops-system delete service ks-jenkins-agent --ignore-not-found
{{ bin_dir }}/kubectl -n kubesphere-devops-system delete service ks-jenkins --ignore-not-found
{{ bin_dir }}/kubectl -n kubesphere-devops-system scale deployment ks-jenkins --replicas=0
ignore_errors: True
when:
- helm_release.stderr == ""
- helm_release.stdout == "0"

- name: ks-devops | Upgrading or installing ks-devops
args:
executable: /bin/bash
shell: |
# Delete Job migrate because 'helm upgrade' will try to update immutable fields of Job, which is not allowed.
{{ bin_dir }}/kubectl delete job -n kubesphere-devops-system migrate --ignore-not-found
ks_devops_chart_version=0.1.19
charts_folder={{ kubesphere_dir }}/ks-devops/charts
ks_devops_chart=$charts_folder/ks-devops-$ks_devops_chart_version.tgz
# Create or update CRDs manually
tar xzvf $ks_devops_chart -C $charts_folder
{{ bin_dir }}/kubectl apply -f $charts_folder/ks-devops/crds
{{ bin_dir }}/kubectl apply -f $charts_folder/ks-devops/charts/s2i/crds
# Import the templates seperately due the potential webhook issues
rm -rf s2i-templates
helm template $charts_folder/ks-devops/charts/s2i/ \
-f {{ kubesphere_dir }}/ks-devops/ks-devops-values.yaml \
-s templates/binary.yaml \
-s templates/java.yaml \
-s templates/nodejs.yaml \
-s templates/python.yaml \
-s templates/tomcat.yaml > s2i-templates\templates.yaml
rm -rf $charts_folder/ks-devops/charts/s2i/templates/binary.yaml
rm -rf $charts_folder/ks-devops/charts/s2i/templates/java.yaml
rm -rf $charts_folder/ks-devops/charts/s2i/templates/nodejs.yaml
rm -rf $charts_folder/ks-devops/charts/s2i/templates/python.yaml
rm -rf $charts_folder/ks-devops/charts/s2i/templates/tomcat.yaml
{{ bin_dir }}/helm upgrade --install devops $ks_devops_chart \
--set jenkins.enabled=true -n kubesphere-devops-system \
-f {{ kubesphere_dir }}/ks-devops/ks-devops-values.yaml --wait
{{ bin_dir }}/kubectl apply -f s2i-templates\templates.yaml
register: devops_upgrade_result
until: devops_upgrade_result is succeeded
retries: 3
delay: 10

- name: ks-devops | Labelling devops namespaces as system workspace
shell: |
{{ bin_dir }}/kubectl label --overwrite namespaces {{ item }} kubesphere.io/workspace=system-workspace
loop:
- kubesphere-devops-worker

- name: ks-devops | Importing ks-devops ci status
shell: >
{{ bin_dir }}/kubectl patch cc ks-installer
--type merge
-p '{"status": {"devops": {"ci": "enabled", "enabledTime": "{{ lookup('pipe','date +%Y-%m-%dT%H:%M:%S%Z') }}"}}}'
-n kubesphere-system
register: cc_result
failed_when: "cc_result.stderr and 'Warning' not in cc_result.stderr"
until: cc_result is succeeded
retries: 5
delay: 3
Loading

0 comments on commit 265d588

Please sign in to comment.