Skip to content

Commit

Permalink
rolling_update: stop/start instead of restart
Browse files Browse the repository at this point in the history
During the daemon upgrade we're
  - stopping the service when it's not containerized
  - running the daemon role
  - start the service when it's not containerized
  - restart the service when it's containerized

This implementation has multiple issue.

1/ We don't use the same service workflow when using containers
or baremetal.

2/ The explicity daemon start isn't required since we'are already
doing this in the daemon role.

3/ Any non backward changes in the systemd unit template (for
containerized deployment) won't work due to the restart usage.

This patch refacts the rolling_update playbook by using the same service
stop task for both containerized and baremetal deployment at the start
of the upgrade play.
It removes the explicit service start task because it's already included
in the dedicated role.
The service restart tasks for containerized deployment are also
removed.

Finally, this adds the missing service stop task for ceph crash upgrade
workflow.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1859173

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit 155e2a2)
  • Loading branch information
dsavineau committed Jul 27, 2020
1 parent 56cf716 commit 8ea3fa1
Showing 1 changed file with 19 additions and 120 deletions.
139 changes: 19 additions & 120 deletions infrastructure-playbooks/rolling_update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,6 @@
enabled: no
masked: yes
ignore_errors: True
when: not containerized_deployment | bool

# NOTE: we mask the service so the RPM can't restart it
# after the package gets upgraded
Expand All @@ -202,7 +201,6 @@
enabled: no
masked: yes
ignore_errors: True
when: not containerized_deployment | bool

# only mask the service for mgr because it must be upgraded
# after ALL monitors, even when collocated
Expand All @@ -226,28 +224,12 @@
- import_role:
name: ceph-mon

- name: start ceph mon
systemd:
name: ceph-mon@{{ monitor_name }}
state: started
enabled: yes
when: not containerized_deployment | bool

- name: start ceph mgr
systemd:
name: ceph-mgr@{{ ansible_hostname }}
state: started
enabled: yes
ignore_errors: True # if no mgr collocated with mons
when: not containerized_deployment | bool

- name: restart containerized ceph mon
systemd:
name: ceph-mon@{{ monitor_name }}
state: restarted
enabled: yes
daemon_reload: yes
when: containerized_deployment | bool

- name: non container | waiting for the monitor to join the quorum...
command: ceph --cluster "{{ cluster }}" -m "{{ hostvars[groups[mon_group_name][0]]['_current_monitor_address'] }}" -s --format json
Expand Down Expand Up @@ -392,18 +374,10 @@
shell: "if [ -d /var/lib/ceph/osd ] ; then ls /var/lib/ceph/osd | sed 's/.*-//' ; fi"
register: osd_ids
changed_when: false
when: not containerized_deployment | bool

- name: get osd unit names - container
shell: systemctl list-units | grep -E "loaded * active" | grep -oE "ceph-osd@([0-9]+).service"
register: osd_names
changed_when: false
when: containerized_deployment | bool

- name: set num_osds for container
- name: set num_osds
set_fact:
num_osds: "{{ osd_names.stdout_lines|default([])|length }}"
when: containerized_deployment | bool
num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"

- name: set_fact container_exec_cmd_osd
set_fact:
Expand All @@ -417,12 +391,6 @@
enabled: no
masked: yes
with_items: "{{ osd_ids.stdout_lines }}"
when: not containerized_deployment | bool

- name: set num_osds for non container
set_fact:
num_osds: "{{ osd_ids.stdout_lines|default([])|length }}"
when: not containerized_deployment | bool

- import_role:
name: ceph-handler
Expand All @@ -437,25 +405,6 @@
- import_role:
name: ceph-osd

- name: start ceph osd
systemd:
name: ceph-osd@{{ item }}
state: started
enabled: yes
masked: no
with_items: "{{ osd_ids.stdout_lines }}"
when: not containerized_deployment | bool

- name: restart containerized ceph osd
systemd:
name: "{{ item }}"
state: restarted
enabled: yes
masked: no
daemon_reload: yes
with_items: "{{ osd_names.stdout_lines }}"
when: containerized_deployment | bool

- name: scan ceph-disk osds with ceph-volume if deploying nautilus
command: "ceph-volume --cluster={{ cluster }} simple scan --force"
environment:
Expand Down Expand Up @@ -614,7 +563,6 @@
name: ceph-mds@{{ ansible_hostname }}
enabled: no
masked: yes
when: not containerized_deployment | bool

- import_role:
name: ceph-handler
Expand All @@ -629,14 +577,6 @@
- import_role:
name: ceph-mds

- name: restart ceph mds
systemd:
name: ceph-mds@{{ ansible_hostname }}
state: restarted
enabled: yes
masked: no
daemon_reload: yes


- name: upgrade standbys ceph mdss cluster
vars:
Expand All @@ -656,7 +596,6 @@
name: ceph-mds@{{ ansible_hostname }}
enabled: no
masked: yes
when: not containerized_deployment | bool

- import_role:
name: ceph-handler
Expand All @@ -671,14 +610,6 @@
- import_role:
name: ceph-mds

- name: restart ceph mds
systemd:
name: ceph-mds@{{ ansible_hostname }}
state: restarted
enabled: yes
masked: no
daemon_reload: yes

- name: set max_mds
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} fs set {{ cephfs }} max_mds {{ mds_max_mds }}"
changed_when: false
Expand Down Expand Up @@ -714,7 +645,6 @@
enabled: no
masked: yes
with_items: "{{ rgw_instances }}"
when: not containerized_deployment | bool

- import_role:
name: ceph-handler
Expand All @@ -729,16 +659,6 @@
- import_role:
name: ceph-rgw

- name: restart containerized ceph rgw
systemd:
name: ceph-radosgw@rgw.{{ ansible_hostname }}.{{ item.instance_name }}
state: restarted
enabled: yes
masked: no
daemon_reload: yes
with_items: "{{ rgw_instances }}"
when: containerized_deployment | bool


- name: upgrade ceph rbd mirror node
vars:
Expand Down Expand Up @@ -771,23 +691,6 @@
- import_role:
name: ceph-rbd-mirror

- name: start ceph rbd mirror
systemd:
name: "ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}"
state: started
enabled: yes
masked: no
when: not containerized_deployment | bool

- name: restart containerized ceph rbd mirror
systemd:
name: ceph-rbd-mirror@rbd-mirror.{{ ansible_hostname }}
state: restarted
enabled: yes
masked: no
daemon_reload: yes
when: containerized_deployment | bool


- name: upgrade ceph nfs node
vars:
Expand All @@ -808,6 +711,17 @@
failed_when: false
when: not containerized_deployment | bool

- name: systemd stop nfs container
systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
state: stopped
enabled: no
masked: yes
failed_when: false
when:
- ceph_nfs_enable_service | bool
- containerized_deployment | bool

- import_role:
name: ceph-defaults
- import_role:
Expand All @@ -825,27 +739,6 @@
- import_role:
name: ceph-nfs

- name: start nfs gateway
systemd:
name: nfs-ganesha
state: started
enabled: yes
masked: no
when:
- not containerized_deployment | bool
- ceph_nfs_enable_service | bool

- name: systemd restart nfs container
systemd:
name: ceph-nfs@{{ ceph_nfs_service_suffix | default(ansible_hostname) }}
state: restarted
enabled: yes
masked: no
daemon_reload: yes
when:
- ceph_nfs_enable_service | bool
- containerized_deployment | bool


- name: upgrade ceph iscsi gateway node
vars:
Expand Down Expand Up @@ -923,6 +816,12 @@
gather_facts: false
become: true
tasks:
- name: stop the ceph-crash service
systemd:
name: "{{ 'ceph-crash@' + ansible_hostname if containerized_deployment | bool else 'ceph-crash.service' }}"
state: stopped
enabled: no
masked: yes
- import_role:
name: ceph-defaults
- import_role:
Expand Down

0 comments on commit 8ea3fa1

Please sign in to comment.