Skip to content

Commit

Permalink
update: do not gather facts on each play
Browse files Browse the repository at this point in the history
There's no benefit to gather facts again on each play in
rolling_update.yml

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 2c77d00)
  • Loading branch information
guits committed Jun 30, 2021
1 parent 2957b69 commit 22fd084
Showing 1 changed file with 17 additions and 0 deletions.
17 changes: 17 additions & 0 deletions infrastructure-playbooks/rolling_update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,7 @@
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: True
gather_facts: false
tasks:
- name: upgrade ceph mon cluster
block:
Expand Down Expand Up @@ -279,6 +280,7 @@
- name: reset mon_host
hosts: "{{ mon_group_name|default('mons') }}"
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand All @@ -296,6 +298,7 @@
hosts: "{{ mon_group_name|default('mons') }}"
serial: 1
become: True
gather_facts: false
tasks:
- name: upgrade mgrs when no mgr group explicitly defined in inventory
when: groups.get(mgr_group_name, []) | length == 0
Expand Down Expand Up @@ -330,6 +333,7 @@
hosts: "{{ mgr_group_name|default('mgrs') }}"
serial: 1
become: True
gather_facts: false
tasks:
# The following task has a failed_when: false
# to handle the scenario where no mgr existed before the upgrade
Expand Down Expand Up @@ -363,6 +367,7 @@
- name: set osd flags
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand Down Expand Up @@ -390,6 +395,7 @@
hosts: "{{ osd_group_name|default('osds') }}"
serial: 1
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand Down Expand Up @@ -469,6 +475,7 @@
- name: complete osd upgrade
hosts: "{{ mon_group_name|default('mons') }}[0]"
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand All @@ -491,6 +498,7 @@
- name: upgrade ceph mdss cluster, deactivate all rank > 0
hosts: "{{ mon_group_name | default('mons') }}[0]"
become: true
gather_facts: false
tasks:
- name: deactivate all mds rank > 0
when: groups.get(mds_group_name, []) | length > 0
Expand Down Expand Up @@ -591,6 +599,7 @@
upgrade_ceph_packages: True
hosts: active_mdss
become: true
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand Down Expand Up @@ -636,6 +645,7 @@
upgrade_ceph_packages: True
hosts: standby_mdss
become: True
gather_facts: false

tasks:
- import_role:
Expand Down Expand Up @@ -684,6 +694,7 @@
hosts: "{{ rgw_group_name|default('rgws') }}"
serial: 1
become: True
gather_facts: false
tasks:

- import_role:
Expand Down Expand Up @@ -727,6 +738,7 @@
hosts: "{{ rbdmirror_group_name|default('rbdmirrors') }}"
serial: 1
become: True
gather_facts: false
tasks:
- name: stop ceph rbd mirror
systemd:
Expand Down Expand Up @@ -759,6 +771,7 @@
hosts: "{{ nfs_group_name|default('nfss') }}"
serial: 1
become: True
gather_facts: false
tasks:
# failed_when: false is here so that if we upgrade
# from a version of ceph that does not have nfs-ganesha
Expand Down Expand Up @@ -808,6 +821,7 @@
- "{{ iscsi_gw_group_name|default('iscsigws') }}"
serial: 1
become: True
gather_facts: false
tasks:
# failed_when: false is here so that if we upgrade
# from a version of ceph that does not have iscsi gws
Expand Down Expand Up @@ -848,6 +862,7 @@
hosts: "{{ client_group_name|default('clients') }}"
serial: "{{ client_update_batch | default(20) }}"
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand Down Expand Up @@ -913,6 +928,7 @@
- "{{ client_group_name | default('clients') }}"
- "{{ iscsi_gw_group_name | default('iscsigws') }}"
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand Down Expand Up @@ -1051,6 +1067,7 @@
- name: show ceph status
hosts: "{{ mon_group_name|default('mons') }}"
become: True
gather_facts: false
tasks:
- import_role:
name: ceph-defaults
Expand Down

0 comments on commit 22fd084

Please sign in to comment.