Skip to content

Commit

Permalink
lint: commands should not change things
Browse files Browse the repository at this point in the history
Fix ansible lint 301 error:

[301] Commands should not change things if nothing needs doing

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
  • Loading branch information
guits committed Nov 23, 2020
1 parent 1879c26 commit 5450de5
Show file tree
Hide file tree
Showing 11 changed files with 70 additions and 4 deletions.
10 changes: 8 additions & 2 deletions infrastructure-playbooks/lv-teardown.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,18 @@
set -o pipefail;
grep /var/lib/ceph/osd /proc/mounts | awk '{print $2}'
register: old_osd_filesystems
changed_when: false

- name: tear down any existing osd filesystems
command: "umount -v {{ item }}"
- name: tear down any existing osd filesystem
mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ old_osd_filesystems.stdout_lines }}"

- name: kill all lvm commands that may have been hung
command: "killall -q lvcreate pvcreate vgcreate lvconvert || echo -n"
failed_when: false
changed_when: false

## Logcal Vols
- name: tear down existing lv for bucket index
Expand Down Expand Up @@ -96,7 +100,9 @@
## Physical Vols
- name: tear down pv for nvme device
command: "pvremove --force --yes {{ nvme_device }}"
changed_when: false

- name: tear down pv for each hdd device
command: "pvremove --force --yes {{ item }}"
changed_when: false
with_items: "{{ hdd_devices }}"
23 changes: 21 additions & 2 deletions infrastructure-playbooks/purge-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@

- name: ensure cephfs mountpoint(s) are unmounted
command: umount -a -t ceph
changed_when: false

- name: find mapped rbd ids
find:
Expand All @@ -96,6 +97,7 @@

- name: use sysfs to unmap rbd devices
shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
changed_when: false
with_items: "{{ rbd_mapped_ids.files }}"

- name: unload ceph kernel modules
Expand Down Expand Up @@ -415,6 +417,7 @@
blkid -t TYPE=crypto_LUKS -s PARTLABEL -s PARTUUID | grep "ceph.*." | grep -o PARTUUID.* | cut -d '"' -f 2
register: encrypted_ceph_partuuid
failed_when: false
changed_when: false

- name: get osd data and lockbox mount points
shell: |
Expand All @@ -425,9 +428,12 @@

- name: drop all cache
shell: "sync && sleep 1 && echo 3 > /proc/sys/vm/drop_caches"
changed_when: false

- name: umount osd data partition
shell: umount {{ item }}
mount:
path: "{{ item }}"
state: unmounted
with_items: "{{ mounted_osd.stdout_lines }}"

- name: remove osd mountpoint tree
Expand Down Expand Up @@ -470,12 +476,14 @@

- name: get physical sector size
command: blockdev --getpbsz /dev/disk/by-partuuid/{{ item }}
changed_when: false
with_items: "{{ encrypted_ceph_partuuid.stdout_lines }}"
when: encrypted_ceph_partuuid.stdout_lines | length > 0
register: phys_sector_size

- name: wipe dmcrypt device
command: dd if=/dev/zero of=/dev/disk/by-partuuid/{{ item.0 }} bs={{ item.1.stdout }} count={{ item.2.stdout }} oflag=direct
changed_when: false
with_together:
- "{{ encrypted_ceph_partuuid.stdout_lines }}"
- "{{ payload_offset.results }}"
Expand All @@ -484,17 +492,20 @@
- name: get ceph data partitions
shell: |
blkid -o device -t PARTLABEL="ceph data"
changed_when: false
failed_when: false
register: ceph_data_partition_to_erase_path

- name: get ceph lockbox partitions
shell: |
blkid -o device -t PARTLABEL="ceph lockbox"
changed_when: false
failed_when: false
register: ceph_lockbox_partition_to_erase_path

- name: see if ceph-volume is installed
command: command -v ceph-volume
changed_when: false
failed_when: false
register: ceph_volume_present

Expand Down Expand Up @@ -530,24 +541,28 @@
- name: get ceph block partitions
shell: |
blkid -o device -t PARTLABEL="ceph block"
changed_when: false
failed_when: false
register: ceph_block_partition_to_erase_path

- name: get ceph journal partitions
shell: |
blkid -o device -t PARTLABEL="ceph journal"
changed_when: false
failed_when: false
register: ceph_journal_partition_to_erase_path

- name: get ceph db partitions
shell: |
blkid -o device -t PARTLABEL="ceph block.db"
changed_when: false
failed_when: false
register: ceph_db_partition_to_erase_path

- name: get ceph wal partitions
shell: |
blkid -o device -t PARTLABEL="ceph block.wal"
changed_when: false
failed_when: false
register: ceph_wal_partition_to_erase_path

Expand All @@ -563,6 +578,7 @@
- name: resolve parent device
command: lsblk --nodeps -no pkname "{{ item }}"
register: tmp_resolved_parent_device
changed_when: false
with_items: "{{ combined_devices_list }}"

- name: set_fact resolved_parent_device
Expand All @@ -574,6 +590,7 @@
set -o pipefail;
wipefs --all "{{ item }}"
dd if=/dev/zero of="{{ item }}" bs=1 count=4096
changed_when: false
with_items: "{{ combined_devices_list }}"

- name: zap ceph journal/block db/block wal partitions
Expand All @@ -592,6 +609,7 @@
partprobe /dev/"{{ item }}"
udevadm settle --timeout=600
with_items: "{{ resolved_parent_device }}"
changed_when: false

- name: purge ceph mon cluster

Expand Down Expand Up @@ -809,7 +827,7 @@
- name: request data removal
local_action:
module: command
echo requesting data removal
echo requesting data removal # noqa 301
become: false
notify: remove data

Expand Down Expand Up @@ -839,6 +857,7 @@
- name: check for anything running ceph
command: "ps -u ceph -U ceph"
register: check_for_running_ceph
changed_when: false
failed_when: check_for_running_ceph.rc == 0

- name: find ceph systemd unit files to remove
Expand Down
5 changes: 5 additions & 0 deletions infrastructure-playbooks/purge-container-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@

- name: ensure cephfs mountpoint are unmounted
command: umount -a -t ceph
changed_when: false

- name: find mapped rbd ids
find:
Expand All @@ -80,6 +81,7 @@

- name: use sysfs to unmap rbd devices
shell: "echo {{ item.path | basename }} > /sys/bus/rbd/remove_single_major"
changed_when: false
with_items: "{{ rbd_mapped_ids.files }}"

- name: unload ceph kernel modules
Expand Down Expand Up @@ -268,6 +270,7 @@
shell: |
systemctl list-units --all | grep -oE "ceph-osd@([0-9]+).service"
register: osd_units
changed_when: false
ignore_errors: true

- name: disable ceph osd service
Expand Down Expand Up @@ -576,6 +579,7 @@

- name: remove ceph container image
command: "{{ container_binary }} rmi {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
changed_when: false
tags:
- remove_img

Expand Down Expand Up @@ -668,6 +672,7 @@

- name: remove ceph data
shell: rm -rf /var/lib/ceph/*
changed_when: false

# (todo): remove this when we are able to manage docker
# service on atomic host.
Expand Down
5 changes: 5 additions & 0 deletions infrastructure-playbooks/rolling_update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,7 @@

- name: set osd flags
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} osd set {{ item }}"
changed_when: false
with_items:
- noout
- nodeep-scrub
Expand Down Expand Up @@ -415,6 +416,7 @@

- name: get num_pgs - non container
command: "{{ container_exec_cmd_update_osd|default('') }} ceph --cluster {{ cluster }} pg stat --format json"
changed_when: false
register: ceph_pgs
delegate_to: "{{ groups[mon_group_name][0] }}"

Expand Down Expand Up @@ -448,6 +450,7 @@

- name: unset osd flags
command: "{{ container_exec_cmd_update_osd | default('') }} ceph osd unset {{ item }} --cluster {{ cluster }}"
changed_when: false
with_items:
- noout
- nodeep-scrub
Expand Down Expand Up @@ -979,10 +982,12 @@

- name: show ceph status
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
run_once: True
delegate_to: "{{ groups[mon_group_name][0] }}"

- name: show all daemons version
command: "{{ container_exec_cmd_status|default('') }} ceph --cluster {{ cluster }} versions"
run_once: True
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
4 changes: 4 additions & 0 deletions infrastructure-playbooks/shrink-mds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@

- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
changed_when: false
register: ceph_health
until: ceph_health is succeeded
retries: 5
Expand All @@ -80,11 +81,13 @@
# removes the MDS from the FS map.
- name: exit mds when containerized deployment
command: "{{ container_exec_cmd | default('') }} ceph tell mds.{{ mds_to_kill_hostname }} exit"
changed_when: false
when: containerized_deployment | bool

- name: get ceph status
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s -f json"
register: ceph_status
changed_when: false

- name: set_fact current_max_mds
set_fact:
Expand Down Expand Up @@ -162,3 +165,4 @@
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
3 changes: 3 additions & 0 deletions infrastructure-playbooks/shrink-mgr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd | default('') }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health is succeeded
retries: 5
delay: 2
Expand Down Expand Up @@ -115,6 +116,7 @@
- name: fail if the mgr is reported in ceph mgr dump
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} mgr dump -f json"
register: mgr_dump
changed_when: false
failed_when: mgr_to_kill_hostname in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
until: mgr_to_kill_hostname not in (([mgr_dump.stdout | from_json] | map(attribute='active_name') | list) + (mgr_dump.stdout | from_json)['standbys'] | map(attribute='name') | list)
retries: 12
Expand All @@ -129,3 +131,4 @@
post_tasks:
- name: show ceph health
command: "{{ container_exec_cmd | default('') }} ceph --cluster {{ cluster }} -s"
changed_when: false
5 changes: 5 additions & 0 deletions infrastructure-playbooks/shrink-mon.yml
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
delegate_to: "{{ mon_host }}"
retries: 5
Expand All @@ -107,13 +108,15 @@

- name: remove monitor from the quorum
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon remove {{ mon_to_kill_hostname }}"
changed_when: false
failed_when: false
delegate_to: "{{ mon_host }}"

post_tasks:
- name: verify the monitor is out of the cluster
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} quorum_status -f json"
delegate_to: "{{ mon_host }}"
changed_when: false
failed_when: false
register: result
until: mon_to_kill_hostname not in (result.stdout | from_json)['quorum_names']
Expand All @@ -136,7 +139,9 @@
- name: show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
delegate_to: "{{ mon_host }}"
changed_when: false

- name: show ceph mon status
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} mon stat"
delegate_to: "{{ mon_host }}"
changed_when: false
8 changes: 8 additions & 0 deletions infrastructure-playbooks/shrink-osd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,14 @@
- name: exit playbook, if can not connect to the cluster
command: "{{ container_exec_cmd }} timeout 5 ceph --cluster {{ cluster }} health"
register: ceph_health
changed_when: false
until: ceph_health.stdout.find("HEALTH") > -1
retries: 5
delay: 2

- name: find the host(s) where the osd(s) is/are running on
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd find {{ item }}"
changed_when: false
with_items: "{{ osd_to_kill.split(',') }}"
register: find_osd_hosts

Expand All @@ -99,6 +101,7 @@

- name: get ceph-volume lvm list data
command: "{{ container_run_cmd }} lvm list --format json"
changed_when: false
register: _lvm_list_data
delegate_to: "{{ item.0 }}"
loop: "{{ _osd_hosts }}"
Expand Down Expand Up @@ -135,6 +138,7 @@

- name: mark osd(s) out of the cluster
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd out {{ osd_to_kill.replace(',', ' ') }}"
changed_when: false
run_once: true

- name: stop osd(s) service
Expand Down Expand Up @@ -220,11 +224,13 @@

- name: ensure osds are marked down
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd down {{ osd_to_kill.replace(',', ' ') }}"
changed_when: false
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"

- name: purge osd(s) from the cluster
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd purge {{ item }} --yes-i-really-mean-it"
changed_when: false
run_once: true
with_items: "{{ osd_to_kill.split(',') }}"

Expand All @@ -237,6 +243,8 @@

- name: show ceph health
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} -s"
changed_when: false

- name: show ceph osd tree
command: "{{ container_exec_cmd }} ceph --cluster {{ cluster }} osd tree"
changed_when: false

0 comments on commit 5450de5

Please sign in to comment.