From b2c14a742fcba679a6fb1bbbf8f8ad5f1c062448 Mon Sep 17 00:00:00 2001 From: Omer Yahud Date: Tue, 9 Jun 2020 10:25:19 +0300 Subject: [PATCH] An upgrade of the SSP operator from versions 1.0.22 to a higher version would cause duplicate CRDs due to a change to the CRDs api group: $ oc get crds | grep "aggre\|labeller\|validator\|common" kubevirtcommontemplatesbundles.kubevirt.io 2020-06-03T14:07:02Z kubevirtcommontemplatesbundles.ssp.kubevirt.io 2020-06-04T10:26:17Z kubevirtmetricsaggregations.kubevirt.io 2020-06-03T14:07:02Z kubevirtmetricsaggregations.ssp.kubevirt.io 2020-06-04T10:26:17Z kubevirtnodelabellerbundles.kubevirt.io 2020-06-03T14:07:02Z kubevirtnodelabellerbundles.ssp.kubevirt.io 2020-06-04T10:26:17Z kubevirttemplatevalidators.kubevirt.io 2020-06-03T14:07:02Z kubevirttemplatevalidators.ssp.kubevirt.io 2020-06-04T10:26:17Z This PR is introducing the following changes: The operator now manages the CRs in the ssp.kubevirt.io group only. When the operator is deployed and reconciling starts, if any resources exist from a previous installation, the operator will take ownership of them. For example, KubevirtTemplateValidator.ssp.kubevirt.io will take ownership of all resources created by KubevirtTemplateValidator.kubevirt.io, if it existed The following status fields are added to the CR to indicate a successful upgrade: status: operatorVersion: "v1.0.36" targetVersion: "v1.0.36" observedVersion: "" operatorVersion - States the version of the deployed operator targetVersion - States the desired version of deployed resources observedVersion - States the version of currently deployed resources An upgrade is complete when the Available condition is True, and when targetVersion == observedVersion. Signed-off-by: Omer Yahud --- _defaults.yml | 1 + build/csv-generator.sh | 10 +- deploy/operator.yaml | 2 + deploy/role.yaml | 4 + hack/make-manifests.sh | 6 +- roles/ClaimOwnership/tasks/main.yml | 16 +++ roles/KubevirtCircuitBreaker/tasks/main.yml | 2 +- .../filter_plugins/k8s_owned_by.py | 40 +++++++ .../tasks/main.yml | 99 ++++++++++++++--- .../KubevirtMetricsAggregation/tasks/main.yml | 66 ++++++++++++ roles/KubevirtNodeLabeller/tasks/main.yml | 100 ++++++++++++----- .../KubevirtTemplateValidator/tasks/main.yml | 102 ++++++++++++------ 12 files changed, 367 insertions(+), 81 deletions(-) create mode 100644 roles/ClaimOwnership/tasks/main.yml create mode 100755 roles/KubevirtCommonTemplatesBundle/filter_plugins/k8s_owned_by.py diff --git a/_defaults.yml b/_defaults.yml index 6c15bf0f..fcbe32d5 100644 --- a/_defaults.yml +++ b/_defaults.yml @@ -27,3 +27,4 @@ virt_launcher_tag: "{{ lookup('env','VIRT_LAUNCHER_TAG')| default('v0.21.0', tru validator_tag: "{{ lookup('env','VALIDATOR_TAG')| default('v0.6.6', true) }}" image_name_prefix: "{{ lookup('env','IMAGE_NAME_PREFIX')| default('', true) }}" templates_version: v0.11.2 +operator_version: "{{ lookup('env', 'OPERATOR_VERSION') }}" diff --git a/build/csv-generator.sh b/build/csv-generator.sh index 6ba0b5d2..ac6804b2 100755 --- a/build/csv-generator.sh +++ b/build/csv-generator.sh @@ -21,12 +21,13 @@ replace_env_var() { } help_text() { - echo "USAGE: csv-generator --csv-version= --namespace= --operator-image= [optional args]" + echo "USAGE: csv-generator --csv-version= --namespace= --operator-image= --operator-version= [optional args]" echo "" echo "ARGS:" echo " --csv-version: (REQUIRED) The version of the CSV file" echo " --namespace: (REQUIRED) The namespace set on the CSV file" echo " --operator-image: (REQUIRED) The operator container image to use in the CSV file" + echo " --operator-version: (REQUIRED) The version of the operator to use in the CSV file" echo " --watch-namespace: (OPTIONAL)" echo " --kvm-info-tag: (OPTIONAL)" echo " --validator-tag: (OPTIONAL)" @@ -41,6 +42,7 @@ help_text() { CSV_VERSION="" NAMESPACE="" OPERATOR_IMAGE="" +OPERATOR_VERSION="" # OPTIONAL ARGS WATCH_NAMESPACE="" @@ -67,6 +69,9 @@ while (( "$#" )); do --operator-image) OPERATOR_IMAGE=$VAL ;; + --operator-version) + OPERATOR_VERSION=$VAL + ;; --watch-namespace) WATCH_NAMESPACE=$VAL ;; @@ -101,7 +106,7 @@ while (( "$#" )); do esac done -if [ -z "$CSV_VERSION" ] || [ -z "$NAMESPACE" ] || [ -z "$OPERATOR_IMAGE" ]; then +if [ -z "$CSV_VERSION" ] || [ -z "$NAMESPACE" ] || [ -z "$OPERATOR_IMAGE" ] || [ -z "$OPERATOR_VERSION" ]; then echo "Error: Missing required arguments" help_text exit 1 @@ -114,6 +119,7 @@ cp ${MANIFESTS_GENERATED_CSV} ${TMP_FILE} sed -i "s/PLACEHOLDER_CSV_VERSION/${CSV_VERSION}/g" ${TMP_FILE} sed -i "s/namespace: placeholder/namespace: ${NAMESPACE}/g" ${TMP_FILE} sed -i "s|REPLACE_IMAGE|${OPERATOR_IMAGE}|g" ${TMP_FILE} +sed -i "s|REPLACE_VERSION|${OPERATOR_VERSION}|g" ${TMP_FILE} replace_env_var "WATCH_NAMESPACE" $WATCH_NAMESPACE replace_env_var "KVM_INFO_TAG" $KVM_INFO_TAG diff --git a/deploy/operator.yaml b/deploy/operator.yaml index b9281997..76c01075 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -50,3 +50,5 @@ spec: value: "" - name: OPERATOR_NAME value: "kubevirt-ssp-operator" + - name: OPERATOR_VERSION + value: REPLACE_VERSION diff --git a/deploy/role.yaml b/deploy/role.yaml index dbe3926b..a77ef9ce 100644 --- a/deploy/role.yaml +++ b/deploy/role.yaml @@ -16,6 +16,7 @@ rules: - patch - update - watch + - delete - apiGroups: - monitoring.coreos.com resources: @@ -26,6 +27,7 @@ rules: - list - patch - watch + - delete - apiGroups: - monitoring.coreos.com resources: @@ -67,6 +69,7 @@ rules: - list - patch - watch + - delete - apiGroups: - "" resources: @@ -81,6 +84,7 @@ rules: - patch - list - watch + - delete - apiGroups: - "" resources: diff --git a/hack/make-manifests.sh b/hack/make-manifests.sh index 30d4d122..77b653b4 100755 --- a/hack/make-manifests.sh +++ b/hack/make-manifests.sh @@ -12,7 +12,7 @@ CHANNEL="beta" CLUSTER_VERSIONED_DIR="cluster/${VERSION}" MANIFESTS_DIR="manifests/kubevirt-ssp-operator" MANIFESTS_VERSIONED_DIR="${MANIFESTS_DIR}/${TAG}" -IMAGE_PATH="quay.io/fromani/kubevirt-ssp-operator-container:latest" +IMAGE_PATH="quay.io/fromani/kubevirt-ssp-operator-container:${TAG}" HAVE_COURIER=0 if which operator-courier &> /dev/null; then @@ -39,11 +39,11 @@ done ( for MF in deploy/service_account.yaml deploy/role.yaml deploy/role_binding.yaml deploy/operator.yaml; do echo "---" - sed "s|REPLACE_IMAGE|${IMAGE_PATH}|" < ${MF} + sed "s|REPLACE_IMAGE|${IMAGE_PATH}|g ; s|REPLACE_VERSION|${TAG}|g" < ${MF} done ) > ${CLUSTER_VERSIONED_DIR}/kubevirt-ssp-operator.yaml -${BASEPATH}/../build/csv-generator.sh --csv-version=${VERSION} --namespace=placeholder --operator-image=REPLACE_IMAGE > ${MANIFESTS_VERSIONED_DIR}/kubevirt-ssp-operator.${TAG}.clusterserviceversion.yaml +${BASEPATH}/../build/csv-generator.sh --csv-version=${VERSION} --namespace=placeholder --operator-image=REPLACE_IMAGE --operator-version=REPLACE_VERSION > ${MANIFESTS_VERSIONED_DIR}/kubevirt-ssp-operator.${TAG}.clusterserviceversion.yaml # caution: operator-courier (as in 5a4852c) wants *one* entity per yaml file (e.g. it does NOT use safe_load_all) for CRD in $( ls deploy/crds/kubevirt_*crd.yaml ); do diff --git a/roles/ClaimOwnership/tasks/main.yml b/roles/ClaimOwnership/tasks/main.yml new file mode 100644 index 00000000..bcac2eed --- /dev/null +++ b/roles/ClaimOwnership/tasks/main.yml @@ -0,0 +1,16 @@ +--- +# tasks file for ClaimOwnership +- name: Claim Ownership + k8s: + state: present + resource_definition: + apiVersion: "{{ object.apiVersion }}" + kind: "{{ object.kind }}" + metadata: + name: "{{ object.metadata.name }}" + namespace: "{{ object.metadata.namespace }}" + ownerReferences: + - apiVersion: "{{ owner.apiVersion }}" + kind: "{{ owner.kind }}" + name: "{{ owner.metadata.name }}" + uid: "{{ owner.metadata.uid }}" \ No newline at end of file diff --git a/roles/KubevirtCircuitBreaker/tasks/main.yml b/roles/KubevirtCircuitBreaker/tasks/main.yml index d9b52974..6d133896 100644 --- a/roles/KubevirtCircuitBreaker/tasks/main.yml +++ b/roles/KubevirtCircuitBreaker/tasks/main.yml @@ -2,7 +2,7 @@ # tasks file for KubevirtCircuitBreaker - name: Extract the CR info set_fact: - cr_info: "{{ lookup('k8s', api_version='v1', kind=cr_kind, namespace=meta.namespace, resource_name=meta.name) | from_yaml }}" + cr_info: "{{ lookup('k8s', api_version='ssp.kubevirt.io/v1', kind=cr_kind, namespace=meta.namespace, resource_name=meta.name) | from_yaml }}" - name: Extract the disable info set_fact: is_paused: "{{ cr_info['metadata'].get('annotations', {})['kubevirt.io/operator.paused'] | default('false', true) | from_json }}" diff --git a/roles/KubevirtCommonTemplatesBundle/filter_plugins/k8s_owned_by.py b/roles/KubevirtCommonTemplatesBundle/filter_plugins/k8s_owned_by.py new file mode 100755 index 00000000..ebcc3bf5 --- /dev/null +++ b/roles/KubevirtCommonTemplatesBundle/filter_plugins/k8s_owned_by.py @@ -0,0 +1,40 @@ +from ansible.errors import AnsibleError + +class FilterModule(object): + def filters(self): + return { + 'k8s_owned_by': k8s_owned_by + } + +def k8s_owned_by(objects, owner): + owned = [] + + for obj in objects: + if object_owned_by(obj, owner): + owned.append(obj) + + return owned + +def object_owned_by(object, owner): + if owner is None: + raise AnsibleError('owner is empty') + if 'metadata' not in owner: + raise AnsibleError('owner is missing "metadata" field') + if 'uid' not in owner['metadata']: + raise AnsibleError('owner is missing "metadata.uid" field') + + if object is None: + raise AnsibleError('object is empty') + if 'metadata' not in object: + raise AnsibleError('object is missing "metadata" field') + if 'ownerReferences' not in object['metadata']: + return False + + ownerUID = owner['metadata']['uid'] + + if object['metadata']['ownerReferences'] is not None: + for ref in object['metadata']['ownerReferences']: + if ref['uid'] == ownerUID: + return True + + return False \ No newline at end of file diff --git a/roles/KubevirtCommonTemplatesBundle/tasks/main.yml b/roles/KubevirtCommonTemplatesBundle/tasks/main.yml index 732f758c..e8a0a861 100644 --- a/roles/KubevirtCommonTemplatesBundle/tasks/main.yml +++ b/roles/KubevirtCommonTemplatesBundle/tasks/main.yml @@ -1,46 +1,113 @@ --- +- name: Set operatorVersion and targetVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + operatorVersion: "{{ operator_version }}" + targetVersion: "{{ operator_version }}" + # tasks file for KubevirtCommonTemplatesBundle - name: Install VM templates k8s: state: present - namespace: "{{ meta.namespace }}" + namespace: "{{ cr_info.metadata.namespace }}" definition: "{{ item | from_yaml }}" + apply: yes with_items: "{{ lookup('file', 'common-templates-'+ version +'.yaml').split('\n---\n') | select('search', '(^|\n)[^#]') | list }}" register: ct_status +# Get all templates +- name: Fetching all templates + set_fact: + templates: "{{ lookup('k8s', api_version=ct_status.results[0].result.apiVersion, kind='template') }}" + +- name: Fetch old cr + block: + - set_fact: + old_cr: "{{ lookup('k8s', api_version='kubevirt.io/v1', kind='KubevirtCommonTemplatesBundle') }}" + - set_fact: + old_cr_exists: true + rescue: + - set_fact: + old_cr_exists: false + +- name: Filter for templates owned by the old cr + set_fact: + old_cr_templates: "{{ templates | k8s_owned_by(old_cr) }}" + when: "{{ old_cr_exists==true }}" + +# Inject ownerReferences +- name: Inject owner references for KubevirtCommonTemplatesBundle + include_role: + name: ClaimOwnership + vars: + object: "{{ item }}" + owner: "{{ cr_info }}" + when: "{{ old_cr_exists==true }}" + with_items: "{{ old_cr_templates }}" # Templates + - name: "Count all new templates in file" set_fact: new_templates: "{{ lookup('file', 'common-templates-'+ version +'.yaml').split('\n---\n') | select('search', '(^|\n)[^#]') | list | length }}" - - name: "Set label" set_fact: label: "template.kubevirt.io/version={{ version }}" - + - name: "Get all templates" set_fact: - deployed_templates_after: "{{ lookup('k8s', api_version=ct_status.results[0].result.apiVersion, kind='template', label_selector=label)|length }}" + deployed_templates_after: "{{ lookup('k8s', api_version=ct_status.results[0].result.apiVersion, kind='template', label_selector=label) | length }}" + +- name: "Set Available status" + set_fact: + available: "{{ true if new_templates <= deployed_templates_after else false }}" - name: Set progressing condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtCommonTemplatesBundle - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Progressing - status: "{{ 'True' if new_templates > deployed_templates_after else 'False' }}" + status: "False" reason: "progressing" - message: "Templates progressing." + message: "Templates progressing (deployed templates: {{ deployed_templates_after }}, desired deployed templated: {{ new_templates }})." - name: Set available condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtCommonTemplatesBundle - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Available - status: "{{ 'True' if new_templates <= deployed_templates_after else 'False' }}" + status: "True" reason: "available" - message: "Common templates available." + message: "Common templates available (deployed templates: {{ deployed_templates_after }}, desired deployed templated: {{ new_templates }})." + +- name: Set degraded condition + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + conditions: + - type: Degraded + status: "False" + reason: "degraded" + message: "Templates degraded (deployed templates: {{ deployed_templates_after }}, desired deployed templated: {{ new_templates }})." + +# Update observerVersion when the CR is Available to indicate a successfull upgrade +- name: Set observedVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + observedVersion: "{{ operator_version }}" + when: "{{ available==true }}" diff --git a/roles/KubevirtMetricsAggregation/tasks/main.yml b/roles/KubevirtMetricsAggregation/tasks/main.yml index 30fb06ea..4fdb7e0c 100644 --- a/roles/KubevirtMetricsAggregation/tasks/main.yml +++ b/roles/KubevirtMetricsAggregation/tasks/main.yml @@ -1,9 +1,75 @@ --- +- name: Set operatorVersion and targetVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + operatorVersion: "{{ operator_version }}" + targetVersion: "{{ operator_version }}" + # tasks file for KubevirtMetricsAggregation - name: Install VMI count aggregation rule k8s: state: present namespace: "{{ meta.namespace }}" definition: "{{ item | from_yaml }}" + apply: yes with_items: "{{ lookup('template', 'aggregation-rule-vmi-count.yaml.j2').split('\n---\n') | select('search', '(^|\n)[^#]') |list }}" + register: promrules + +- name: Inject owner references for KubevirtMetricsAggregation + include_role: + name: ClaimOwnership + vars: + object: "{{ item.result }}" + owner: "{{ cr_info }}" + with_items: "{{ promrules.results }}" + +- name: Set available condition + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + conditions: + - type: Available + status: "True" + reason: "available" + message: "KubevirtMetricsAggregation is available." + +# There is no logic for the Progressing/Degraded conditions for KubevirtMetricsAggregation +# so using these constant conditions +- name: Set progressing condition + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + conditions: + - type: Progressing + status: "False" + reason: "progressing" + message: "KubevirtMetricsAggregation progressing" + +- name: Set degraded condition + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + conditions: + - type: Degraded + status: "False" + reason: "degraded" + message: "KubevirtMetricsAggregation degraded" +- name: Set observedVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + observedVersion: "{{ operator_version }}" diff --git a/roles/KubevirtNodeLabeller/tasks/main.yml b/roles/KubevirtNodeLabeller/tasks/main.yml index 32898dc2..f0a6d810 100644 --- a/roles/KubevirtNodeLabeller/tasks/main.yml +++ b/roles/KubevirtNodeLabeller/tasks/main.yml @@ -1,65 +1,107 @@ --- +- name: Set operatorVersion and targetVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + operatorVersion: "{{ operator_version }}" + targetVersion: "{{ operator_version }}" + - name: Create the node labeller roles k8s: state: present definition: "{{ item | from_yaml }}" + apply: yes with_items: "{{ lookup('template', 'kubevirt-node-labeller-roles.yaml.j2').split('\n---\n') | select('search', '(^|\n)[^#]') | list }}" + register: roles - name: Create the node labeller daemon set k8s: state: present definition: "{{ lookup('template', 'kubevirt-node-labeller-ds.yaml.j2') | from_yaml }}" + apply: yes register: nl +# Actively inject owner references in order to adopt existing resources during an upgrade +- name: Inject owner references for KubevirtNodeLabellerBundle + include_role: + name: ClaimOwnership + vars: + object: "{{ item }}" + owner: "{{ cr_info }}" + with_list: + - "{{ roles.results[0].result }}" # ServiceAccount + - "{{ roles.results[4].result }}" # ConfigMap + - "{{ nl.result }}" # DaemonSet + - name: "Refresh node-labeller var" k8s: state: present definition: "{{ lookup('k8s', kind=nl.result.kind, namespace=nl.result.metadata.namespace, resource_name=nl.result.metadata.name) | from_yaml }}" register: nl_status +- name: "Set Progressing status" + set_fact: + progressing: "{{ true if nl_status.result.status.desiredNumberScheduled != nl_status.result.status.numberReady else false }}" + +- name: "Set Available status" + set_fact: + available: "{{ true if nl_status.result.status.desiredNumberScheduled == nl_status.result.status.numberReady else false }}" + +- name: "Set Degraded status" + set_fact: + degraded: "{{ true if nl_status.result.status.desiredNumberScheduled != nl_status.result.status.numberReady else false }}" + - name: Set progressing condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtNodeLabellerBundle - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Progressing - status: "{{ 'True' if nl_status.result.status.currentNumberScheduled != nl_status.result.status.numberReady else 'False' }}" + status: "{{ 'True' if progressing else 'False' }}" reason: "progressing" - message: "Node-labeller is progressing." - -- name: "Wait for the node-labeller to start" - k8s_info: - api_version: v1 - kind: "{{ nl.result.kind }}" - name: "{{ nl.result.metadata.name }}" - namespace: "{{ nl.result.metadata.namespace }}" - register: nl_status - delay: 10 - retries: 300 - until: nl_status.resources[0].status.currentNumberScheduled == nl_status.resources[0].status.numberReady | default(false) + message: "Node-labeller is progressing (ready pods: {{ nl_status.result.status.numberReady }}, desired pods: {{ nl_status.result.status.desiredNumberScheduled }})." - name: Set available condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtNodeLabellerBundle - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Available - status: "{{ 'True' if nl_status.resources[0].status.currentNumberScheduled > 0 and nl_status.resources[0].status.currentNumberScheduled == nl_status.resources[0].status.numberReady else 'False' }}" + status: "{{ 'True' if available else 'False' }}" reason: "available" - message: "Node-labeller is available." + message: "Node-labeller is available (ready pods: {{ nl_status.result.status.numberReady }}, desired pods: {{ nl_status.result.status.desiredNumberScheduled }})." - name: Set degraded condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtNodeLabellerBundle - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Degraded - status: "{{ 'True' if nl_status.resources[0].status.currentNumberScheduled == 0 or nl_status.resources[0].status.currentNumberScheduled != nl_status.resources[0].status.numberReady else 'False' }}" + status: "{{ 'True' if degraded else 'False' }}" reason: "degraded" - message: "Node-labeller is degraded." + message: "Node-labeller is degraded (ready pods: {{ nl_status.result.status.numberReady }}, desired pods: {{ nl_status.result.status.desiredNumberScheduled }})." + +# Update observerVersion when the CR is Available to indicate a successfull upgrade +- name: Set observedVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + observedVersion: "{{ operator_version }}" + when: "{{ available==true }}" + +- name: Reque if KubevirtNodeLabellerBundle not available + fail: + msg: "Requeing until KubevirtNodeLabellerBundle is available" + when: nl_status.result.status.desiredNumberScheduled != nl_status.result.status.numberReady diff --git a/roles/KubevirtTemplateValidator/tasks/main.yml b/roles/KubevirtTemplateValidator/tasks/main.yml index bfeec793..54a4b65f 100644 --- a/roles/KubevirtTemplateValidator/tasks/main.yml +++ b/roles/KubevirtTemplateValidator/tasks/main.yml @@ -1,70 +1,112 @@ --- # tasks file for KubevirtTemplateValidator +- name: Set operatorVersion and targetVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + operatorVersion: "{{ operator_version }}" + targetVersion: "{{ operator_version }}" + - name: Set template:view role k8s: state: present definition: "{{ lookup('template', 'template-view-role.yaml.j2') | from_yaml }}" + apply: yes - name: Create the service k8s: state: present definition: "{{ item | from_yaml }}" + apply: yes with_items: "{{ lookup('template', 'service.yaml.j2').split('\n---\n') | select('search', '(^|\n)[^#]') | list }}" register: tv + +# Actively inject owner references in order to adopt existing resources during an upgrade +- name: Inject owner references for KubevirtTemplateValidator + include_role: + name: ClaimOwnership + vars: + object: "{{ item }}" + owner: "{{ cr_info }}" + with_list: + - "{{ tv.results[0].result }}" # ServiceAccount + - "{{ tv.results[2].result }}" # Service + - "{{ tv.results[3].result }}" # Deployment + - name: Register the webhook k8s: state: present definition: "{{ lookup('template', 'webhook.yaml.j2') | from_yaml }}" + apply: yes - name: Refresh template-validator var set_fact: tv_status: "{{ lookup('k8s', kind=tv.results[3].result.kind, namespace=tv.results[3].result.metadata.namespace, resource_name=tv.results[3].result.metadata.name) | from_yaml }}" +- name: "Set Progressing status" + set_fact: + progressing: "{{ true if tv_status.status.readyReplicas|default(0) != tv_status.status.replicas|default(2) else false }}" + +- name: "Set Available status" + set_fact: + available: "{{ true if tv_status.status.readyReplicas|default(0) == tv_status.status.replicas|default(2) else false }}" + +- name: "Set Degraded status" + set_fact: + degraded: "{{ true if tv_status.status.readyReplicas|default(0) != tv_status.status.replicas|default(2) else false }}" + # defaults in this ansible code are here because at the start of deployment, there is a chance # there will be no attributes like availableReplicas and readyReplicas - name: Set progressing condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtTemplateValidator - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Progressing - status: "{{ 'True' if tv_status.status.availableReplicas|default(0) != tv_status.status.readyReplicas|default(2) else 'False' }}" + status: "{{ 'True' if progressing else 'False' }}" reason: "progressing" - message: "Template-validator is progressing." - -- name: "Wait for the template-validator to start" - k8s_info: - api_version: v1 - kind: "{{ tv_status.kind }}" - name: "{{ tv_status.metadata.name }}" - namespace: "{{ tv_status.metadata.namespace }}" - register: tv_status - delay: 10 - retries: 300 - until: tv_status.resources[0].status.availableReplicas|default(0) == tv_status.resources[0].status.readyReplicas|default(2) | default(false) - + message: "Template-validator is progressing (readyReplicas: {{ tv_status.status.readyReplicas|default(0) }}, desired replicas: {{ tv_status.status.replicas|default(2) }})." - name: Set available condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtTemplateValidator - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Available - status: "{{ 'True' if tv_status.resources[0].status.availableReplicas|default(0) == tv_status.resources[0].status.readyReplicas|default(2) else 'False' }}" + status: "{{ 'True' if available else 'False' }}" reason: "available" - message: "Template-validator is available." + message: "Template-validator is available (readyReplicas: {{ tv_status.status.readyReplicas|default(0) }}, desired replicas: {{ tv_status.status.replicas|default(2) }})." - name: Set degraded condition operator_sdk.util.k8s_status: - api_version: ssp.kubevirt.io/v1 - kind: KubevirtTemplateValidator - name: "{{ meta.name }}" - namespace: "{{ meta.namespace }}" + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" conditions: - type: Degraded - status: "{{ 'True' if tv_status.resources[0].status.availableReplicas|default(0) != tv_status.resources[0].status.readyReplicas|default(2) else 'False' }}" + status: "{{ 'True' if degraded else 'False' }}" reason: "degraded" - message: "Template-validator is degraded." + message: "Template-validator is degraded (readyReplicas: {{ tv_status.status.readyReplicas|default(0) }}, desired replicas: {{ tv_status.status.replicas|default(2) }})." + +# Update observerVersion when the CR is Available to indicate a successfull upgrade +- name: Set observedVersion + operator_sdk.util.k8s_status: + api_version: "{{ cr_info.apiVersion }}" + kind: "{{ cr_info.kind }}" + name: "{{ cr_info.metadata.name }}" + namespace: "{{ cr_info.metadata.namespace }}" + status: + observedVersion: "{{ operator_version }}" + when: "{{ available==true }}" + +- name: Reque if KubevirtTemplateValidator not available + fail: + msg: "Requeing until KubevirtTemplateValidator is available" + when: tv_status.status.readyReplicas|default(0) != tv_status.status.replicas|default(2)