Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refact playbook #1727

Merged
merged 9 commits into from
Aug 2, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 7 additions & 12 deletions generate_group_vars_sample.sh
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
#!/usr/bin/env bash

set -euo pipefail


#############
# VARIABLES #
#############

basedir=$(dirname "$0")
no_header="ceph-docker-common" # pipe separated list of roles with no header, MUST end with '$', e.g: 'foo$|bar$'
merge_in_all="ceph-common$|ceph-docker-common$" # pipe separated list of roles you want to merge in all.yml.sample, MUST end with '$', e.g: 'foo$|bar$'
do_not_generate="ceph-common$|ceph-docker-common$" # pipe separated list of roles we don't want to generate sample file, MUST end with '$', e.g: 'foo$|bar$'


#############
Expand Down Expand Up @@ -52,7 +51,7 @@ generate_group_vars_file () {
for role in "$basedir"/roles/ceph-*; do
rolename=$(basename "$role")

if echo "$rolename" | grep -qE "$merge_in_all"; then
if [[ $rolename == "ceph-defaults" ]]; then
output="all.yml.sample"
elif [[ $rolename == "ceph-agent" ]]; then
output="agent.yml.sample"
Expand All @@ -62,17 +61,13 @@ for role in "$basedir"/roles/ceph-*; do
output="${rolename:5}s.yml.sample"
fi


# Do not re-regenerate the header for certain roles
# since we merge them in all.yml.sample
if ! echo "$rolename" | grep -qE "$no_header"; then
populate_header
fi

defaults="$role"/defaults/main.yml
if [[ ! -f $defaults ]]; then
continue
fi

generate_group_vars_file
if ! echo "$rolename" | grep -qE "$do_not_generate"; then
populate_header
generate_group_vars_file
fi
done
56 changes: 23 additions & 33 deletions group_vars/all.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,12 @@ dummy:
# INSTALL #
###########

# Set uid/gid to default '64045' for bootstrap directories.
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
# These values have to be set according to the base OS used by the container image, NOT the host.
#bootstrap_dirs_owner: "64045"
#bootstrap_dirs_group: "64045"

#mon_group_name: mons
#osd_group_name: osds
#rgw_group_name: rgws
Expand Down Expand Up @@ -197,9 +203,12 @@ dummy:
# generated, you may find it useful to disable fsid generation to
# avoid cluttering up your ansible repo. If you set `generate_fsid` to
# false, you *must* generate `fsid` in another way.
# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#generate_fsid: true

#ceph_conf_key_directory: /etc/ceph

#cephx: true
#max_open_files: 131072

Expand Down Expand Up @@ -324,6 +333,14 @@ dummy:
#handler_health_osd_check_delay: 30
#handler_health_osd_check: true

# Confiure the type of NFS gatway access. At least one must be enabled for an
# NFS role to be useful
#
# Set this to true to enable File access via NFS. Requires an MDS role.
#nfs_file_gw: true
# Set this to true to enable Object access via NFS. Requires an RGW role.
#nfs_obj_gw: false

###################
# CONFIG OVERRIDE #
###################
Expand Down Expand Up @@ -358,7 +375,7 @@ dummy:
##########
# DOCKER #
##########

#docker_exec_cmd:
#docker: false
#ceph_docker_image: "ceph/daemon"
#ceph_docker_image_tag: latest
Expand All @@ -368,42 +385,10 @@ dummy:
#containerized_deployment_with_kv: false
#containerized_deployment: false
#mon_containerized_default_ceph_conf_with_kv: false

# Confiure the type of NFS gatway access. At least one must be enabled for an
# NFS role to be useful
#
# Set this to true to enable File access via NFS. Requires an MDS role.
#nfs_file_gw: true
# Set this to true to enable Object access via NFS. Requires an RGW role.
#nfs_obj_gw: false

# this is only here for usage with the rolling_update.yml playbook
# do not ever change this here
#rolling_update: false

#fsid: "{{ cluster_uuid.stdout }}"
#generate_fsid: true
#ceph_docker_registry: docker.io
#ceph_docker_enable_centos_extra_repo: false

#ceph_docker_on_openstack: false

#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn

# Set uid/gid to default '64045' for bootstrap directories.
# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros.
# These values have to be set according to the base OS used by the container image, NOT the host.
#bootstrap_dirs_owner: "64045"
#bootstrap_dirs_group: "64045"

#ceph_conf_key_directory: /etc/ceph

###########
# Network #
###########
#monitor_interface: 'interface'
#monitor_address: '0.0.0.0'
#monitor_address_block: []

############
# KV store #
Expand All @@ -413,3 +398,8 @@ dummy:
#kv_port: 2379
#containerized_deployment_with_kv: false


# this is only here for usage with the rolling_update.yml playbook
# do not ever change this here
#rolling_update: false

4 changes: 0 additions & 4 deletions group_vars/mons.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,9 @@ dummy:
# GENERAL #
###########

#fetch_directory: fetch/

#mon_group_name: mons

# ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"
#monitor_secret: "{{ monitor_keyring.stdout }}"
#admin_secret: 'admin_secret'

Expand Down Expand Up @@ -116,7 +113,6 @@ dummy:
##########
# DOCKER #
##########
#docker_exec_cmd:
#ceph_mon_docker_subnet: "{{ public_network }}"# subnet of the monitor_interface

# ceph_mon_docker_extra_env:
Expand Down
5 changes: 0 additions & 5 deletions group_vars/osds.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,6 @@ dummy:
# GENERAL #
###########

#fetch_directory: fetch/

# Even though OSD nodes should not have the admin key
# at their disposal, some people might want to have it
# distributed on OSD nodes. Setting 'copy_admin_key' to 'true'
Expand Down Expand Up @@ -58,9 +56,6 @@ dummy:
# CEPH OPTIONS
##############

# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT
#fsid: "{{ cluster_uuid.stdout }}"

# Devices to be used as OSDs
# You can pre-provision disks that are not present yet.
# Ansible will just skip them. Newly added disk will be
Expand Down
4 changes: 3 additions & 1 deletion infrastructure-playbooks/osd-configure.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@
- hosts: mons
become: True
roles:
- ceph-defaults
- ceph-fetch-keys

- hosts: osds
become: True
roles:
- ceph-osd
- ceph-defaults
- ceph-osd
16 changes: 14 additions & 2 deletions infrastructure-playbooks/purge-docker-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -97,13 +97,21 @@

tasks:

- name: disable ceph rgw service
# For backward compatibility
- name: disable ceph rgw service (old unit name, for backward compatibility)
service:
name: "ceph-rgw@{{ ansible_hostname }}"
state: stopped
enabled: no
ignore_errors: true

- name: disable ceph rgw service (new unit name)
service:
name: "ceph-radosgw@{{ ansible_hostname }}"
state: stopped
enabled: no
ignore_errors: true

- name: remove ceph rgw container
docker:
image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}"
Expand All @@ -113,8 +121,12 @@

- name: remove ceph rgw service
file:
path: /etc/systemd/system/ceph-rgw@.service
path: "{{ item }}"
state: absent
with_items:
# For backward compatibility
- /etc/systemd/system/ceph-rgw@.service
- /etc/systemd/system/ceph-radosgw@.service

- name: remove ceph rgw image
docker_image:
Expand Down
4 changes: 3 additions & 1 deletion infrastructure-playbooks/rgw-standalone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@
- hosts: mons
become: True
roles:
- ceph-defaults
- ceph-fetch-keys

- hosts: rgws
become: True
roles:
- ceph-rgw
- ceph-defaults
- ceph-rgw
32 changes: 31 additions & 1 deletion infrastructure-playbooks/rolling_update.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
- "{{ mds_group_name|default('mdss') }}"
- "{{ rgw_group_name|default('rgws') }}"
- "{{ mgr_group_name|default('mgrs') }}"
- "{{ client_group_name|default('clients') }}"

become: True
tasks:
Expand Down Expand Up @@ -91,6 +92,9 @@
- not containerized_deployment

roles:
- ceph-defaults
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-mon

post_tasks:
Expand Down Expand Up @@ -224,6 +228,9 @@
- not containerized_deployment

roles:
- ceph-defaults
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-osd

post_tasks:
Expand Down Expand Up @@ -341,6 +348,9 @@
- not containerized_deployment

roles:
- ceph-defaults
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-mds

post_tasks:
Expand Down Expand Up @@ -411,6 +421,9 @@
- not containerized_deployment

roles:
- ceph-defaults
- { role: ceph-common, when: not containerized_deployment }
- { role: ceph-docker-common, when: containerized_deployment }
- ceph-rgw

post_tasks:
Expand All @@ -437,9 +450,26 @@

- name: restart containerized ceph rgws with systemd
service:
name: ceph-rgw@{{ ansible_hostname }}
name: ceph-radosgw@{{ ansible_hostname }}
state: restarted
enabled: yes
when:
- ansible_service_mgr == 'systemd'
- containerized_deployment


- name: upgrade ceph client node

vars:
upgrade_ceph_packages: True

hosts:
- "{{ client_group_name|default('clients') }}"

serial: 1
become: True

roles:
- ceph-defaults
- ceph-common
- ceph-client
2 changes: 1 addition & 1 deletion infrastructure-playbooks/shrink-mon.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
private: no

tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-defaults/defaults/main.yml
- include_vars: group_vars/all.yml

- name: exit playbook, if only one monitor is present in cluster
Expand Down
2 changes: 1 addition & 1 deletion infrastructure-playbooks/shrink-osd.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
private: no

tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-defaults/defaults/main.yml
- include_vars: group_vars/all.yml

- name: exit playbook, if user did not mean to shrink cluster
Expand Down
5 changes: 3 additions & 2 deletions infrastructure-playbooks/take-over-existing-cluster.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,17 @@
- hosts: mons
become: True
vars_files:
- roles/ceph-common/defaults/main.yml
- roles/ceph-defaults/defaults/main.yml
- group_vars/all.yml
roles:
- ceph-defaults
- ceph-fetch-keys

- hosts: all
become: true

tasks:
- include_vars: roles/ceph-common/defaults/main.yml
- include_vars: roles/ceph-defaults/defaults/main.yml
- include_vars: group_vars/all.yml

- name: get the name of the existing ceph cluster
Expand Down
3 changes: 1 addition & 2 deletions roles/ceph-client/meta/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,4 @@ galaxy_info:
- trusty
categories:
- system
dependencies:
- { role: ceph.ceph-common }
dependencies: []
Loading