diff --git a/generate_group_vars_sample.sh b/generate_group_vars_sample.sh index 1e7bd07a42..bb26a229f7 100755 --- a/generate_group_vars_sample.sh +++ b/generate_group_vars_sample.sh @@ -1,14 +1,13 @@ #!/usr/bin/env bash - set -euo pipefail + ############# # VARIABLES # ############# basedir=$(dirname "$0") -no_header="ceph-docker-common" # pipe separated list of roles with no header, MUST end with '$', e.g: 'foo$|bar$' -merge_in_all="ceph-common$|ceph-docker-common$" # pipe separated list of roles you want to merge in all.yml.sample, MUST end with '$', e.g: 'foo$|bar$' +do_not_generate="ceph-common$|ceph-docker-common$" # pipe separated list of roles we don't want to generate sample file, MUST end with '$', e.g: 'foo$|bar$' ############# @@ -52,7 +51,7 @@ generate_group_vars_file () { for role in "$basedir"/roles/ceph-*; do rolename=$(basename "$role") - if echo "$rolename" | grep -qE "$merge_in_all"; then + if [[ $rolename == "ceph-defaults" ]]; then output="all.yml.sample" elif [[ $rolename == "ceph-agent" ]]; then output="agent.yml.sample" @@ -62,17 +61,13 @@ for role in "$basedir"/roles/ceph-*; do output="${rolename:5}s.yml.sample" fi - - # Do not re-regenerate the header for certain roles - # since we merge them in all.yml.sample - if ! echo "$rolename" | grep -qE "$no_header"; then - populate_header - fi - defaults="$role"/defaults/main.yml if [[ ! -f $defaults ]]; then continue fi - generate_group_vars_file + if ! echo "$rolename" | grep -qE "$do_not_generate"; then + populate_header + generate_group_vars_file + fi done diff --git a/group_vars/all.yml.sample b/group_vars/all.yml.sample index 5466c4ce67..de54eca35b 100644 --- a/group_vars/all.yml.sample +++ b/group_vars/all.yml.sample @@ -30,6 +30,12 @@ dummy: # INSTALL # ########### +# Set uid/gid to default '64045' for bootstrap directories. +# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. +# These values have to be set according to the base OS used by the container image, NOT the host. +#bootstrap_dirs_owner: "64045" +#bootstrap_dirs_group: "64045" + #mon_group_name: mons #osd_group_name: osds #rgw_group_name: rgws @@ -197,9 +203,12 @@ dummy: # generated, you may find it useful to disable fsid generation to # avoid cluttering up your ansible repo. If you set `generate_fsid` to # false, you *must* generate `fsid` in another way. +# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT #fsid: "{{ cluster_uuid.stdout }}" #generate_fsid: true +#ceph_conf_key_directory: /etc/ceph + #cephx: true #max_open_files: 131072 @@ -324,6 +333,14 @@ dummy: #handler_health_osd_check_delay: 30 #handler_health_osd_check: true +# Confiure the type of NFS gatway access. At least one must be enabled for an +# NFS role to be useful +# +# Set this to true to enable File access via NFS. Requires an MDS role. +#nfs_file_gw: true +# Set this to true to enable Object access via NFS. Requires an RGW role. +#nfs_obj_gw: false + ################### # CONFIG OVERRIDE # ################### @@ -358,7 +375,7 @@ dummy: ########## # DOCKER # ########## - +#docker_exec_cmd: #docker: false #ceph_docker_image: "ceph/daemon" #ceph_docker_image_tag: latest @@ -368,42 +385,10 @@ dummy: #containerized_deployment_with_kv: false #containerized_deployment: false #mon_containerized_default_ceph_conf_with_kv: false - -# Confiure the type of NFS gatway access. At least one must be enabled for an -# NFS role to be useful -# -# Set this to true to enable File access via NFS. Requires an MDS role. -#nfs_file_gw: true -# Set this to true to enable Object access via NFS. Requires an RGW role. -#nfs_obj_gw: false - -# this is only here for usage with the rolling_update.yml playbook -# do not ever change this here -#rolling_update: false - -#fsid: "{{ cluster_uuid.stdout }}" -#generate_fsid: true #ceph_docker_registry: docker.io #ceph_docker_enable_centos_extra_repo: false - #ceph_docker_on_openstack: false -#mon_use_fqdn: false # if set to true, the MON name used will be the fqdn - -# Set uid/gid to default '64045' for bootstrap directories. -# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. -# These values have to be set according to the base OS used by the container image, NOT the host. -#bootstrap_dirs_owner: "64045" -#bootstrap_dirs_group: "64045" - -#ceph_conf_key_directory: /etc/ceph - -########### -# Network # -########### -#monitor_interface: 'interface' -#monitor_address: '0.0.0.0' -#monitor_address_block: [] ############ # KV store # @@ -413,3 +398,8 @@ dummy: #kv_port: 2379 #containerized_deployment_with_kv: false + +# this is only here for usage with the rolling_update.yml playbook +# do not ever change this here +#rolling_update: false + diff --git a/group_vars/mons.yml.sample b/group_vars/mons.yml.sample index 1069c5f94f..ab3b058c67 100644 --- a/group_vars/mons.yml.sample +++ b/group_vars/mons.yml.sample @@ -13,12 +13,9 @@ dummy: # GENERAL # ########### -#fetch_directory: fetch/ - #mon_group_name: mons # ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT -#fsid: "{{ cluster_uuid.stdout }}" #monitor_secret: "{{ monitor_keyring.stdout }}" #admin_secret: 'admin_secret' @@ -116,7 +113,6 @@ dummy: ########## # DOCKER # ########## -#docker_exec_cmd: #ceph_mon_docker_subnet: "{{ public_network }}"# subnet of the monitor_interface # ceph_mon_docker_extra_env: diff --git a/group_vars/osds.yml.sample b/group_vars/osds.yml.sample index 5dcacfca96..0a270399c7 100644 --- a/group_vars/osds.yml.sample +++ b/group_vars/osds.yml.sample @@ -15,8 +15,6 @@ dummy: # GENERAL # ########### -#fetch_directory: fetch/ - # Even though OSD nodes should not have the admin key # at their disposal, some people might want to have it # distributed on OSD nodes. Setting 'copy_admin_key' to 'true' @@ -58,9 +56,6 @@ dummy: # CEPH OPTIONS ############## -# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT -#fsid: "{{ cluster_uuid.stdout }}" - # Devices to be used as OSDs # You can pre-provision disks that are not present yet. # Ansible will just skip them. Newly added disk will be diff --git a/infrastructure-playbooks/osd-configure.yml b/infrastructure-playbooks/osd-configure.yml index a193dca102..b35e121428 100644 --- a/infrastructure-playbooks/osd-configure.yml +++ b/infrastructure-playbooks/osd-configure.yml @@ -10,9 +10,11 @@ - hosts: mons become: True roles: + - ceph-defaults - ceph-fetch-keys - hosts: osds become: True roles: - - ceph-osd + - ceph-defaults + - ceph-osd diff --git a/infrastructure-playbooks/purge-docker-cluster.yml b/infrastructure-playbooks/purge-docker-cluster.yml index 9ef7ccc821..03c623f614 100644 --- a/infrastructure-playbooks/purge-docker-cluster.yml +++ b/infrastructure-playbooks/purge-docker-cluster.yml @@ -97,13 +97,21 @@ tasks: - - name: disable ceph rgw service +# For backward compatibility + - name: disable ceph rgw service (old unit name, for backward compatibility) service: name: "ceph-rgw@{{ ansible_hostname }}" state: stopped enabled: no ignore_errors: true + - name: disable ceph rgw service (new unit name) + service: + name: "ceph-radosgw@{{ ansible_hostname }}" + state: stopped + enabled: no + ignore_errors: true + - name: remove ceph rgw container docker: image: "{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}" @@ -113,8 +121,12 @@ - name: remove ceph rgw service file: - path: /etc/systemd/system/ceph-rgw@.service + path: "{{ item }}" state: absent + with_items: +# For backward compatibility + - /etc/systemd/system/ceph-rgw@.service + - /etc/systemd/system/ceph-radosgw@.service - name: remove ceph rgw image docker_image: diff --git a/infrastructure-playbooks/rgw-standalone.yml b/infrastructure-playbooks/rgw-standalone.yml index d4cba6d189..409d3282ac 100644 --- a/infrastructure-playbooks/rgw-standalone.yml +++ b/infrastructure-playbooks/rgw-standalone.yml @@ -6,9 +6,11 @@ - hosts: mons become: True roles: + - ceph-defaults - ceph-fetch-keys - hosts: rgws become: True roles: - - ceph-rgw + - ceph-defaults + - ceph-rgw diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index 79a32317fa..9ddd4a77c5 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -41,6 +41,7 @@ - "{{ mds_group_name|default('mdss') }}" - "{{ rgw_group_name|default('rgws') }}" - "{{ mgr_group_name|default('mgrs') }}" + - "{{ client_group_name|default('clients') }}" become: True tasks: @@ -91,6 +92,9 @@ - not containerized_deployment roles: + - ceph-defaults + - { role: ceph-common, when: not containerized_deployment } + - { role: ceph-docker-common, when: containerized_deployment } - ceph-mon post_tasks: @@ -224,6 +228,9 @@ - not containerized_deployment roles: + - ceph-defaults + - { role: ceph-common, when: not containerized_deployment } + - { role: ceph-docker-common, when: containerized_deployment } - ceph-osd post_tasks: @@ -341,6 +348,9 @@ - not containerized_deployment roles: + - ceph-defaults + - { role: ceph-common, when: not containerized_deployment } + - { role: ceph-docker-common, when: containerized_deployment } - ceph-mds post_tasks: @@ -411,6 +421,9 @@ - not containerized_deployment roles: + - ceph-defaults + - { role: ceph-common, when: not containerized_deployment } + - { role: ceph-docker-common, when: containerized_deployment } - ceph-rgw post_tasks: @@ -437,9 +450,26 @@ - name: restart containerized ceph rgws with systemd service: - name: ceph-rgw@{{ ansible_hostname }} + name: ceph-radosgw@{{ ansible_hostname }} state: restarted enabled: yes when: - ansible_service_mgr == 'systemd' - containerized_deployment + + +- name: upgrade ceph client node + + vars: + upgrade_ceph_packages: True + + hosts: + - "{{ client_group_name|default('clients') }}" + + serial: 1 + become: True + + roles: + - ceph-defaults + - ceph-common + - ceph-client diff --git a/infrastructure-playbooks/shrink-mon.yml b/infrastructure-playbooks/shrink-mon.yml index e99a11a1e0..0471f245c3 100644 --- a/infrastructure-playbooks/shrink-mon.yml +++ b/infrastructure-playbooks/shrink-mon.yml @@ -27,7 +27,7 @@ private: no tasks: - - include_vars: roles/ceph-common/defaults/main.yml + - include_vars: roles/ceph-defaults/defaults/main.yml - include_vars: group_vars/all.yml - name: exit playbook, if only one monitor is present in cluster diff --git a/infrastructure-playbooks/shrink-osd.yml b/infrastructure-playbooks/shrink-osd.yml index 3fc54ce78a..7ffe1baf11 100644 --- a/infrastructure-playbooks/shrink-osd.yml +++ b/infrastructure-playbooks/shrink-osd.yml @@ -27,7 +27,7 @@ private: no tasks: - - include_vars: roles/ceph-common/defaults/main.yml + - include_vars: roles/ceph-defaults/defaults/main.yml - include_vars: group_vars/all.yml - name: exit playbook, if user did not mean to shrink cluster diff --git a/infrastructure-playbooks/take-over-existing-cluster.yml b/infrastructure-playbooks/take-over-existing-cluster.yml index fe4f3a36f4..b4db027038 100644 --- a/infrastructure-playbooks/take-over-existing-cluster.yml +++ b/infrastructure-playbooks/take-over-existing-cluster.yml @@ -14,16 +14,17 @@ - hosts: mons become: True vars_files: - - roles/ceph-common/defaults/main.yml + - roles/ceph-defaults/defaults/main.yml - group_vars/all.yml roles: + - ceph-defaults - ceph-fetch-keys - hosts: all become: true tasks: - - include_vars: roles/ceph-common/defaults/main.yml + - include_vars: roles/ceph-defaults/defaults/main.yml - include_vars: group_vars/all.yml - name: get the name of the existing ceph cluster diff --git a/roles/ceph-client/meta/main.yml b/roles/ceph-client/meta/main.yml index f0a366a395..98d8c91d79 100644 --- a/roles/ceph-client/meta/main.yml +++ b/roles/ceph-client/meta/main.yml @@ -10,5 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common } +dependencies: [] diff --git a/roles/ceph-common/defaults/main.yml b/roles/ceph-common/defaults/main.yml index b3e0352f09..ed97d539c0 100644 --- a/roles/ceph-common/defaults/main.yml +++ b/roles/ceph-common/defaults/main.yml @@ -1,374 +1 @@ --- -# You can override vars by using host or group vars - -########### -# GENERAL # -########### - -fetch_directory: fetch/ - -# The 'cluster' variable determines the name of the cluster. -# Changing the default value to something else means that you will -# need to change all the command line calls as well, for example if -# your cluster name is 'foo': -# "ceph health" will become "ceph --cluster foo health" -# -# An easier way to handle this is to use the environment variable CEPH_ARGS -# So run: "export CEPH_ARGS="--cluster foo" -# With that you will be able to run "ceph health" normally -cluster: ceph - -########### -# INSTALL # -########### - -mon_group_name: mons -osd_group_name: osds -rgw_group_name: rgws -mds_group_name: mdss -nfs_group_name: nfss -restapi_group_name: restapis -rbdmirror_group_name: rbdmirrors -client_group_name: clients -iscsi_group_name: iscsigws -mgr_group_name: mgrs - -# If check_firewall is true, then ansible will try to determine if the -# Ceph ports are blocked by a firewall. If the machine running ansible -# cannot reach the Ceph ports for some other reason, you may need or -# want to set this to False to skip those checks. -check_firewall: False - -# This variable determines if ceph packages can be updated. If False, the -# package resources will use "state=present". If True, they will use -# "state=latest". -upgrade_ceph_packages: False - -# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\ - -debian_package_dependencies: - - python-pycurl - - hdparm - -centos_package_dependencies: - - python-pycurl - - hdparm - - epel-release - - python-setuptools - - libselinux-python - -redhat_package_dependencies: - - python-pycurl - - hdparm - - python-setuptools - -# Enable the ntp service by default to avoid clock skew on -# ceph nodes -ntp_service_enabled: true - -# Whether or not to install the ceph-test package. -ceph_test: False - -## Configure package origin -# -ceph_origin: 'upstream' # or 'distro' or 'local' -# 'distro' means that no separate repo file will be added -# you will get whatever version of Ceph is included in your Linux distro. -# 'local' means that the ceph binaries will be copied over from the local machine - -# LOCAL CEPH INSTALLATION (ceph_origin==local) -# -# Path to DESTDIR of the ceph install -#ceph_installation_dir: "/path/to/ceph_installation/" -# Whether or not to use installer script rundep_installer.sh -# This script takes in rundep and installs the packages line by line onto the machine -# If this is set to false then it is assumed that the machine ceph is being copied onto will already have -# all runtime dependencies installed -#use_installer: false -# Root directory for ceph-ansible -#ansible_dir: "/path/to/ceph-ansible" - -ceph_use_distro_backports: false # DEBIAN ONLY - -# STABLE -######## - -# COMMUNITY VERSION -ceph_stable: false # use ceph stable branch -ceph_mirror: http://download.ceph.com -ceph_stable_key: https://download.ceph.com/keys/release.asc -ceph_stable_release: dummy -ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" - -###################################### -# Releases name to number dictionary # -###################################### -ceph_release_num: - dumpling: 0.67 - emperor: 0.72 - firefly: 0.80 - giant: 0.87 - hammer: 0.94 - infernalis: 9 - jewel: 10 - kraken: 11 - luminous: 12 - -# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions -# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ -# for more info read: https://github.com/ceph/ceph-ansible/issues/305 -#ceph_stable_distro_source: - -# This option is needed for _both_ stable and dev version, so please always fill the right version -# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/ -ceph_stable_redhat_distro: el7 - -# ENTERPRISE VERSION RED HAT STORAGE (from 1.3) -# This version is only supported on RHEL >= 7.1 -# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel -# packages natively. The RHEL 7.1 kernel packages are more stable and secure than -# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL -# 7.1 or later if you want to use the kernel RBD client. -# -# The CephFS kernel client is undergoing rapid development upstream, and we do -# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this -# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS -# on RHEL 7. -# -# -# Backward compatibility of variable names -# Commit 492518a2 changed variable names of rhcs installations -# to not break backward compatiblity we re-declare these variables -# with the content of the new variable -ceph_rhcs: "{{ ceph_stable_rh_storage | default(false) }}" -# This will affect how/what repositories are enabled depending on the desired -# version. The previous version was 1.3. The current version is 2. -ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}" -ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com -ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com -ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}" -ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}" -ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content - - -# UBUNTU CLOUD ARCHIVE -# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive -# usually has newer Ceph releases than the normal distro repository. -# -ceph_stable_uca: false -#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" -#ceph_stable_openstack_release_uca: liberty -#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" - -# DEV -# ### - -ceph_dev: false # use ceph development branch -ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack -ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) - -# CUSTOM -# ### - -# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be -# a URL to the .repo file to be installed on the targets. For deb, -# ceph_custom_repo should be the URL to the repo base. -ceph_custom: false # use custom ceph repository -ceph_custom_repo: https://server.domain.com/ceph-custom-repo - - -###################### -# CEPH CONFIGURATION # -###################### - -## Ceph options -# -# Each cluster requires a unique, consistent filesystem ID. By -# default, the playbook generates one for you and stores it in a file -# in `fetch_directory`. If you want to customize how the fsid is -# generated, you may find it useful to disable fsid generation to -# avoid cluttering up your ansible repo. If you set `generate_fsid` to -# false, you *must* generate `fsid` in another way. -fsid: "{{ cluster_uuid.stdout }}" -generate_fsid: true - -cephx: true -max_open_files: 131072 - -## Client options -# -rbd_cache: "true" -rbd_cache_writethrough_until_flush: "true" -rbd_concurrent_management_ops: 20 - -rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions - -# Permissions for the rbd_client_log_path and -# rbd_client_admin_socket_path. Depending on your use case for Ceph -# you may want to change these values. The default, which is used if -# any of the variables are unset or set to a false value (like `null` -# or `false`) is to automatically determine what is appropriate for -# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 -# for infernalis releases, and root:root and 1777 for pre-infernalis -# releases. -# -# For other use cases, including running Ceph with OpenStack, you'll -# want to set these differently: -# -# For OpenStack on RHEL, you'll want: -# rbd_client_directory_owner: "qemu" -# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) -# rbd_client_directory_mode: "0755" -# -# For OpenStack on Ubuntu or Debian, set: -# rbd_client_directory_owner: "libvirt-qemu" -# rbd_client_directory_group: "kvm" -# rbd_client_directory_mode: "0755" -# -# If you set rbd_client_directory_mode, you must use a string (e.g., -# 'rbd_client_directory_mode: "0755"', *not* -# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode -# must be in octal or symbolic form -rbd_client_directory_owner: null -rbd_client_directory_group: null -rbd_client_directory_mode: null - -rbd_client_log_path: /var/log/ceph -rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor -rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor - -## Monitor options -# -# You must define either monitor_interface, monitor_address or monitor_address_block. -# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). -# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. -# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. -# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6) -monitor_interface: interface -monitor_address: 0.0.0.0 -monitor_address_block: [] -# set to either ipv4 or ipv6, whichever your network is using -ip_version: ipv4 -mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf - -## OSD options -# -journal_size: 5120 # OSD journal size in MB -public_network: 0.0.0.0/0 -cluster_network: "{{ public_network }}" -osd_mkfs_type: xfs -osd_mkfs_options_xfs: -f -i size=2048 -osd_mount_options_xfs: noatime,largeio,inode64,swalloc -osd_objectstore: filestore - -# xattrs. by default, 'filestore xattr use omap' is set to 'true' if -# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can -# be set to 'true' or 'false' to explicitly override those -# defaults. Leave it 'null' to use the default for your chosen mkfs -# type. -filestore_xattr_use_omap: null - -## MDS options -# -mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf -mds_allow_multimds: false -mds_max_mds: 3 - -## Rados Gateway options -# -#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls -radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names -radosgw_civetweb_port: 8080 -radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]" -radosgw_civetweb_num_threads: 100 -# For additional civetweb configuration options available such as SSL, logging, -# keepalive, and timeout settings, please see the civetweb docs at -# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md -radosgw_civetweb_options: "port={{ radosgw_civetweb_bind_ip }}:{{ radosgw_civetweb_port }} num_threads={{ radosgw_civetweb_num_threads }}" -radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/ -# Rados Gateway options -email_address: foo@bar.com - -## REST API options -# -restapi_interface: "{{ monitor_interface }}" -restapi_address: "{{ monitor_address }}" -restapi_port: 5000 - -## Testing mode -# enable this mode _only_ when you have a single node -# if you don't want it keep the option commented -#common_single_host_mode: true - -## Handlers - restarting daemons after a config change -# if for whatever reasons the content of your ceph configuration changes -# ceph daemons will be restarted as well. At the moment, we can not detect -# which config option changed so all the daemons will be restarted. Although -# this restart will be serialized for each node, in between a health check -# will be performed so we make sure we don't move to the next node until -# ceph is not healthy -# Obviously between the checks (for monitors to be in quorum and for osd's pgs -# to be clean) we have to wait. These retries and delays can be configurable -# for both monitors and osds. -handler_health_mon_check_retries: 5 -handler_health_mon_check_delay: 10 -handler_health_osd_check_retries: 40 -handler_health_osd_check_delay: 30 -handler_health_osd_check: true - -################### -# CONFIG OVERRIDE # -################### - -# Ceph configuration file override. -# This allows you to specify more configuration options -# using an INI style format. -# The following sections are supported: [global], [mon], [osd], [mds], [rgw] -# -# Example: -# ceph_conf_overrides: -# global: -# foo: 1234 -# bar: 5678 -# -ceph_conf_overrides: {} - - -############# -# OS TUNING # -############# - -disable_transparent_hugepage: true -os_tuning_params: - - { name: kernel.pid_max, value: 4194303 } - - { name: fs.file-max, value: 26234859 } - - { name: vm.zone_reclaim_mode, value: 0 } - - { name: vm.swappiness, value: 10 } - - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } - - -########## -# DOCKER # -########## - -docker: false -ceph_docker_image: "ceph/daemon" -ceph_docker_image_tag: latest - -# Do not comment the following variables containerized_deployment_* here. These variables are being used -# by ceph.conf.j2 template. so it should always be defined -containerized_deployment_with_kv: false -containerized_deployment: false -mon_containerized_default_ceph_conf_with_kv: false - -# Confiure the type of NFS gatway access. At least one must be enabled for an -# NFS role to be useful -# -# Set this to true to enable File access via NFS. Requires an MDS role. -nfs_file_gw: true -# Set this to true to enable Object access via NFS. Requires an RGW role. -nfs_obj_gw: false - -# this is only here for usage with the rolling_update.yml playbook -# do not ever change this here -rolling_update: false diff --git a/roles/ceph-common/tasks/checks/check_socket.yml b/roles/ceph-common/tasks/checks/check_socket.yml deleted file mode 100644 index 79b512c8ea..0000000000 --- a/roles/ceph-common/tasks/checks/check_socket.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -# These checks are used to avoid running handlers at initial deployment. -- name: check for a ceph socket - shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1" - changed_when: false - failed_when: false - always_run: true - register: socket - -- name: check for a rados gateway socket - shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1" - changed_when: false - failed_when: false - always_run: true - register: socketrgw diff --git a/roles/ceph-common/tasks/generate_cluster_fsid.yml b/roles/ceph-common/tasks/generate_cluster_fsid.yml deleted file mode 100644 index 5f49612cac..0000000000 --- a/roles/ceph-common/tasks/generate_cluster_fsid.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: generate cluster fsid - local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf - creates="{{ fetch_directory }}/ceph_cluster_uuid.conf" - register: cluster_uuid - become: false - when: - - generate_fsid - - ceph_current_fsid.rc != 0 - -- name: reuse cluster fsid when cluster is already running - local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf - creates="{{ fetch_directory }}/ceph_cluster_uuid.conf" - become: false - when: ceph_current_fsid.rc == 0 - -- name: read cluster fsid if it already exists - local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf - removes="{{ fetch_directory }}/ceph_cluster_uuid.conf" - changed_when: false - register: cluster_uuid - become: false - always_run: true - when: generate_fsid - -- name: set fsid fact when generate_fsid = true - set_fact: - fsid: "{{ cluster_uuid.stdout }}" - when: generate_fsid diff --git a/roles/ceph-common/tasks/main.yml b/roles/ceph-common/tasks/main.yml index 71b482cbae..9c65a27bfc 100644 --- a/roles/ceph-common/tasks/main.yml +++ b/roles/ceph-common/tasks/main.yml @@ -2,11 +2,6 @@ - include: ./checks/check_system.yml - include: ./checks/check_mandatory_vars.yml -# Set ceph_release -- include: ./release.yml - tags: - - always - - include: ./checks/check_firewall.yml when: check_firewall # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) @@ -93,10 +88,24 @@ # Hard code this so we will skip the entire file instead of individual tasks (Default isn't Consistent) static: False -- include: facts.yml -- include: ./checks/check_socket.yml +- name: get ceph version + command: ceph --version + changed_when: false + always_run: yes + register: ceph_version + +- set_fact: + ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" + +- include: facts_mon_fsid.yml + run_once: true + when: + - cephx + - not monitor_keyring_conf.stat.exists + - ceph_current_fsid.rc == 0 + - mon_group_name in group_names + - include: create_ceph_initial_dirs.yml -- include: generate_cluster_fsid.yml - include: generate_ceph_conf.yml - include: create_rbd_client_dir.yml - include: configure_cluster_name.yml diff --git a/roles/ceph-common/tasks/release.yml b/roles/ceph-common/tasks/release.yml deleted file mode 100644 index f3e33a32a3..0000000000 --- a/roles/ceph-common/tasks/release.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# Set ceph_release to ceph_stable by default -- set_fact: - ceph_release: "{{ ceph_stable_release }}" diff --git a/roles/ceph-common/templates/restart_osd_daemon.sh.j2 b/roles/ceph-common/templates/restart_osd_daemon.sh.j2 deleted file mode 100644 index ae31f405e3..0000000000 --- a/roles/ceph-common/templates/restart_osd_daemon.sh.j2 +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash - -RETRIES="{{ handler_health_osd_check_retries }}" -DELAY="{{ handler_health_osd_check_delay }}" -CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}" - -check_pgs() { - while [ $RETRIES -ne 0 ]; do - test "[""$(ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$(ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')" - RET=$? - test $RET -eq 0 && return 0 - sleep $DELAY - let RETRIES=RETRIES-1 - done - # PGs not clean, exiting with return code 1 - echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean" - echo "It is possible that the cluster has less OSDs than the replica configuration" - echo "Will refuse to continue" - ceph $CEPH_CLI -s - exit 1 -} - -for id in $(ls /var/lib/ceph/osd/ | sed 's/.*-//'); do - # First, restart daemon(s) - systemctl restart ceph-osd@${id} - # We need to wait because it may take some time for the socket to actually exists - COUNT=10 - # Wait and ensure the socket exists after restarting the daemon - SOCKET=/var/run/ceph/{{ cluster }}-osd.${id}.asok - while [ $COUNT -ne 0 ]; do - test -S $SOCKET && check_pgs && continue 2 - sleep 1 - let COUNT=COUNT-1 - done - # If we reach this point, it means the socket is not present. - echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running." - exit 1 -done diff --git a/roles/ceph-defaults/defaults/main.yml b/roles/ceph-defaults/defaults/main.yml new file mode 100644 index 0000000000..3a41ed5fba --- /dev/null +++ b/roles/ceph-defaults/defaults/main.yml @@ -0,0 +1,396 @@ +--- +# You can override vars by using host or group vars + +########### +# GENERAL # +########### + +fetch_directory: fetch/ + +# The 'cluster' variable determines the name of the cluster. +# Changing the default value to something else means that you will +# need to change all the command line calls as well, for example if +# your cluster name is 'foo': +# "ceph health" will become "ceph --cluster foo health" +# +# An easier way to handle this is to use the environment variable CEPH_ARGS +# So run: "export CEPH_ARGS="--cluster foo" +# With that you will be able to run "ceph health" normally +cluster: ceph + +########### +# INSTALL # +########### + +# Set uid/gid to default '64045' for bootstrap directories. +# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. +# These values have to be set according to the base OS used by the container image, NOT the host. +bootstrap_dirs_owner: "64045" +bootstrap_dirs_group: "64045" + +mon_group_name: mons +osd_group_name: osds +rgw_group_name: rgws +mds_group_name: mdss +nfs_group_name: nfss +restapi_group_name: restapis +rbdmirror_group_name: rbdmirrors +client_group_name: clients +iscsi_group_name: iscsigws +mgr_group_name: mgrs + +# If check_firewall is true, then ansible will try to determine if the +# Ceph ports are blocked by a firewall. If the machine running ansible +# cannot reach the Ceph ports for some other reason, you may need or +# want to set this to False to skip those checks. +check_firewall: False + +# This variable determines if ceph packages can be updated. If False, the +# package resources will use "state=present". If True, they will use +# "state=latest". +upgrade_ceph_packages: False + +# /!\ EITHER ACTIVE ceph_stable OR ceph_stable_uca OR ceph_dev OR ceph_custom /!\ + +debian_package_dependencies: + - python-pycurl + - hdparm + +centos_package_dependencies: + - python-pycurl + - hdparm + - epel-release + - python-setuptools + - libselinux-python + +redhat_package_dependencies: + - python-pycurl + - hdparm + - python-setuptools + +# Enable the ntp service by default to avoid clock skew on +# ceph nodes +ntp_service_enabled: true + +# Whether or not to install the ceph-test package. +ceph_test: False + +## Configure package origin +# +ceph_origin: 'upstream' # or 'distro' or 'local' +# 'distro' means that no separate repo file will be added +# you will get whatever version of Ceph is included in your Linux distro. +# 'local' means that the ceph binaries will be copied over from the local machine + +# LOCAL CEPH INSTALLATION (ceph_origin==local) +# +# Path to DESTDIR of the ceph install +#ceph_installation_dir: "/path/to/ceph_installation/" +# Whether or not to use installer script rundep_installer.sh +# This script takes in rundep and installs the packages line by line onto the machine +# If this is set to false then it is assumed that the machine ceph is being copied onto will already have +# all runtime dependencies installed +#use_installer: false +# Root directory for ceph-ansible +#ansible_dir: "/path/to/ceph-ansible" + +ceph_use_distro_backports: false # DEBIAN ONLY + +# STABLE +######## + +# COMMUNITY VERSION +ceph_stable: false # use ceph stable branch +ceph_mirror: http://download.ceph.com +ceph_stable_key: https://download.ceph.com/keys/release.asc +ceph_stable_release: dummy +ceph_stable_repo: "{{ ceph_mirror }}/debian-{{ ceph_stable_release }}" + +###################################### +# Releases name to number dictionary # +###################################### +ceph_release_num: + dumpling: 0.67 + emperor: 0.72 + firefly: 0.80 + giant: 0.87 + hammer: 0.94 + infernalis: 9 + jewel: 10 + kraken: 11 + luminous: 12 + +# Use the option below to specify your applicable package tree, eg. when using non-LTS Ubuntu versions +# # for a list of available Debian distributions, visit http://download.ceph.com/debian-{{ ceph_stable_release }}/dists/ +# for more info read: https://github.com/ceph/ceph-ansible/issues/305 +#ceph_stable_distro_source: + +# This option is needed for _both_ stable and dev version, so please always fill the right version +# # for supported distros, see http://download.ceph.com/rpm-{{ ceph_stable_release }}/ +ceph_stable_redhat_distro: el7 + +# ENTERPRISE VERSION RED HAT STORAGE (from 1.3) +# This version is only supported on RHEL >= 7.1 +# As of RHEL 7.1, libceph.ko and rbd.ko are now included in Red Hat's kernel +# packages natively. The RHEL 7.1 kernel packages are more stable and secure than +# using these 3rd-party kmods with RHEL 7.0. Please update your systems to RHEL +# 7.1 or later if you want to use the kernel RBD client. +# +# The CephFS kernel client is undergoing rapid development upstream, and we do +# not recommend running the CephFS kernel module on RHEL 7's 3.10 kernel at this +# time. Please use ELRepo's latest upstream 4.x kernels if you want to run CephFS +# on RHEL 7. +# +# +# Backward compatibility of variable names +# Commit 492518a2 changed variable names of rhcs installations +# to not break backward compatiblity we re-declare these variables +# with the content of the new variable +ceph_rhcs: "{{ ceph_stable_rh_storage | default(false) }}" +# This will affect how/what repositories are enabled depending on the desired +# version. The previous version was 1.3. The current version is 2. +ceph_rhcs_version: "{{ ceph_stable_rh_storage_version | default(2) }}" +ceph_rhcs_cdn_install: "{{ ceph_stable_rh_storage_cdn_install | default(false) }}" # assumes all the nodes can connect to cdn.redhat.com +ceph_rhcs_iso_install: "{{ ceph_stable_rh_storage_iso_install | default(false) }}" # usually used when nodes don't have access to cdn.redhat.com +ceph_rhcs_iso_path: "{{ ceph_stable_rh_storage_iso_path | default('') }}" +ceph_rhcs_mount_path: "{{ ceph_stable_rh_storage_mount_path | default('/tmp/rh-storage-mount') }}" +ceph_rhcs_repository_path: "{{ ceph_stable_rh_storage_repository_path | default('/tmp/rh-storage-repo') }}" # where to copy iso's content + + +# UBUNTU CLOUD ARCHIVE +# This allows the install of Ceph from the Ubuntu Cloud Archive. The Ubuntu Cloud Archive +# usually has newer Ceph releases than the normal distro repository. +# +ceph_stable_uca: false +#ceph_stable_repo_uca: "http://ubuntu-cloud.archive.canonical.com/ubuntu" +#ceph_stable_openstack_release_uca: liberty +#ceph_stable_release_uca: "{{ansible_lsb.codename}}-updates/{{ceph_stable_openstack_release_uca}}" + +# DEV +# ### + +ceph_dev: false # use ceph development branch +ceph_dev_branch: master # development branch you would like to use e.g: master, wip-hack +ceph_dev_sha1: latest # distinct sha1 to use, defaults to 'latest' (as in latest built) + +# CUSTOM +# ### + +# Use a custom repository to install ceph. For RPM, ceph_custom_repo should be +# a URL to the .repo file to be installed on the targets. For deb, +# ceph_custom_repo should be the URL to the repo base. +ceph_custom: false # use custom ceph repository +ceph_custom_repo: https://server.domain.com/ceph-custom-repo + + +###################### +# CEPH CONFIGURATION # +###################### + +## Ceph options +# +# Each cluster requires a unique, consistent filesystem ID. By +# default, the playbook generates one for you and stores it in a file +# in `fetch_directory`. If you want to customize how the fsid is +# generated, you may find it useful to disable fsid generation to +# avoid cluttering up your ansible repo. If you set `generate_fsid` to +# false, you *must* generate `fsid` in another way. +# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT +fsid: "{{ cluster_uuid.stdout }}" +generate_fsid: true + +ceph_conf_key_directory: /etc/ceph + +cephx: true +max_open_files: 131072 + +## Client options +# +rbd_cache: "true" +rbd_cache_writethrough_until_flush: "true" +rbd_concurrent_management_ops: 20 + +rbd_client_directories: true # this will create rbd_client_log_path and rbd_client_admin_socket_path directories with proper permissions + +# Permissions for the rbd_client_log_path and +# rbd_client_admin_socket_path. Depending on your use case for Ceph +# you may want to change these values. The default, which is used if +# any of the variables are unset or set to a false value (like `null` +# or `false`) is to automatically determine what is appropriate for +# the Ceph version with non-OpenStack workloads -- ceph:ceph and 0770 +# for infernalis releases, and root:root and 1777 for pre-infernalis +# releases. +# +# For other use cases, including running Ceph with OpenStack, you'll +# want to set these differently: +# +# For OpenStack on RHEL, you'll want: +# rbd_client_directory_owner: "qemu" +# rbd_client_directory_group: "libvirtd" (or "libvirt", depending on your version of libvirt) +# rbd_client_directory_mode: "0755" +# +# For OpenStack on Ubuntu or Debian, set: +# rbd_client_directory_owner: "libvirt-qemu" +# rbd_client_directory_group: "kvm" +# rbd_client_directory_mode: "0755" +# +# If you set rbd_client_directory_mode, you must use a string (e.g., +# 'rbd_client_directory_mode: "0755"', *not* +# 'rbd_client_directory_mode: 0755', or Ansible will complain: mode +# must be in octal or symbolic form +rbd_client_directory_owner: null +rbd_client_directory_group: null +rbd_client_directory_mode: null + +rbd_client_log_path: /var/log/ceph +rbd_client_log_file: "{{ rbd_client_log_path }}/qemu-guest-$pid.log" # must be writable by QEMU and allowed by SELinux or AppArmor +rbd_client_admin_socket_path: /var/run/ceph # must be writable by QEMU and allowed by SELinux or AppArmor + +## Monitor options +# +# You must define either monitor_interface, monitor_address or monitor_address_block. +# These variables must be defined at least in all.yml and overrided if needed (inventory host file or group_vars/*.yml). +# Eg. If you want to specify for each monitor which address the monitor will bind to you can set it in your **inventory host file** by using 'monitor_address' variable. +# Preference will go to monitor_address if both monitor_address and monitor_interface are defined. +# To use an IPv6 address, use the monitor_address setting instead (and set ip_version to ipv6) +monitor_interface: interface +monitor_address: 0.0.0.0 +monitor_address_block: [] +# set to either ipv4 or ipv6, whichever your network is using +ip_version: ipv4 +mon_use_fqdn: false # if set to true, the MON name used will be the fqdn in the ceph.conf + +## OSD options +# +journal_size: 5120 # OSD journal size in MB +public_network: 0.0.0.0/0 +cluster_network: "{{ public_network }}" +osd_mkfs_type: xfs +osd_mkfs_options_xfs: -f -i size=2048 +osd_mount_options_xfs: noatime,largeio,inode64,swalloc +osd_objectstore: filestore + +# xattrs. by default, 'filestore xattr use omap' is set to 'true' if +# 'osd_mkfs_type' is set to 'ext4'; otherwise it isn't set. This can +# be set to 'true' or 'false' to explicitly override those +# defaults. Leave it 'null' to use the default for your chosen mkfs +# type. +filestore_xattr_use_omap: null + +## MDS options +# +mds_use_fqdn: false # if set to true, the MDS name used will be the fqdn in the ceph.conf +mds_allow_multimds: false +mds_max_mds: 3 + +## Rados Gateway options +# +#radosgw_dns_name: your.subdomain.tld # subdomains used by radosgw. See http://ceph.com/docs/master/radosgw/config/#enabling-subdomain-s3-calls +radosgw_resolve_cname: false # enable for radosgw to resolve DNS CNAME based bucket names +radosgw_civetweb_port: 8080 +radosgw_civetweb_bind_ip: "{{ ansible_default_ipv4.address }}" # when using ipv6 enclose with brackets: "[{{ ansible_default_ipv6.address }}]" +radosgw_civetweb_num_threads: 100 +# For additional civetweb configuration options available such as SSL, logging, +# keepalive, and timeout settings, please see the civetweb docs at +# https://github.com/civetweb/civetweb/blob/master/docs/UserManual.md +radosgw_civetweb_options: "port={{ radosgw_civetweb_bind_ip }}:{{ radosgw_civetweb_port }} num_threads={{ radosgw_civetweb_num_threads }}" +radosgw_keystone: false # activate OpenStack Keystone options full detail here: http://ceph.com/docs/master/radosgw/keystone/ +# Rados Gateway options +email_address: foo@bar.com + +## REST API options +# +restapi_interface: "{{ monitor_interface }}" +restapi_address: "{{ monitor_address }}" +restapi_port: 5000 + +## Testing mode +# enable this mode _only_ when you have a single node +# if you don't want it keep the option commented +#common_single_host_mode: true + +## Handlers - restarting daemons after a config change +# if for whatever reasons the content of your ceph configuration changes +# ceph daemons will be restarted as well. At the moment, we can not detect +# which config option changed so all the daemons will be restarted. Although +# this restart will be serialized for each node, in between a health check +# will be performed so we make sure we don't move to the next node until +# ceph is not healthy +# Obviously between the checks (for monitors to be in quorum and for osd's pgs +# to be clean) we have to wait. These retries and delays can be configurable +# for both monitors and osds. +handler_health_mon_check_retries: 5 +handler_health_mon_check_delay: 10 +handler_health_osd_check_retries: 40 +handler_health_osd_check_delay: 30 +handler_health_osd_check: true + +# Confiure the type of NFS gatway access. At least one must be enabled for an +# NFS role to be useful +# +# Set this to true to enable File access via NFS. Requires an MDS role. +nfs_file_gw: true +# Set this to true to enable Object access via NFS. Requires an RGW role. +nfs_obj_gw: false + +################### +# CONFIG OVERRIDE # +################### + +# Ceph configuration file override. +# This allows you to specify more configuration options +# using an INI style format. +# The following sections are supported: [global], [mon], [osd], [mds], [rgw] +# +# Example: +# ceph_conf_overrides: +# global: +# foo: 1234 +# bar: 5678 +# +ceph_conf_overrides: {} + + +############# +# OS TUNING # +############# + +disable_transparent_hugepage: true +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } + - { name: vm.zone_reclaim_mode, value: 0 } + - { name: vm.swappiness, value: 10 } + - { name: vm.min_free_kbytes, value: "{{ vm_min_free_kbytes }}" } + + +########## +# DOCKER # +########## +docker_exec_cmd: +docker: false +ceph_docker_image: "ceph/daemon" +ceph_docker_image_tag: latest + +# Do not comment the following variables containerized_deployment_* here. These variables are being used +# by ceph.conf.j2 template. so it should always be defined +containerized_deployment_with_kv: false +containerized_deployment: false +mon_containerized_default_ceph_conf_with_kv: false +ceph_docker_registry: docker.io +ceph_docker_enable_centos_extra_repo: false +ceph_docker_on_openstack: false + + +############ +# KV store # +############ +kv_type: etcd +kv_endpoint: 127.0.0.1 +kv_port: 2379 +containerized_deployment_with_kv: false + + +# this is only here for usage with the rolling_update.yml playbook +# do not ever change this here +rolling_update: false diff --git a/roles/ceph-common/handlers/main.yml b/roles/ceph-defaults/handlers/main.yml similarity index 56% rename from roles/ceph-common/handlers/main.yml rename to roles/ceph-defaults/handlers/main.yml index cf3ed1d7ce..e8659f9042 100644 --- a/roles/ceph-common/handlers/main.yml +++ b/roles/ceph-defaults/handlers/main.yml @@ -17,39 +17,51 @@ - name: restart ceph mon daemon(s) command: /tmp/restart_mon_daemon.sh listen: "restart ceph mons" - when: # We do not want to run these checks on initial deployment (`socket.rc == 0`) - socket.rc == 0 - - ceph_current_fsid.rc == 0 - mon_group_name in group_names # This does not just restart OSDs but everything else too. Unfortunately # at this time the ansible role does not have an OSD id list to use # for restarting them specifically. -- block: - - name: copy osd restart script - template: - src: restart_osd_daemon.sh.j2 - dest: /tmp/restart_osd_daemon.sh - owner: root - group: root - mode: 0750 - listen: "restart ceph osds" +- name: copy osd restart script + template: + src: restart_osd_daemon.sh.j2 + dest: /tmp/restart_osd_daemon.sh + owner: root + group: root + mode: 0750 + listen: "restart ceph osds" + when: + - inventory_hostname in play_hosts + - osd_group_name in group_names - - name: restart ceph osds daemon(s) - command: /tmp/restart_osd_daemon.sh - listen: "restart ceph osds" - when: handler_health_osd_check +- name: restart containerized ceph osds daemon(s) + command: /tmp/restart_osd_daemon.sh + listen: "restart ceph osds" + with_items: "{{ socket_osd_container.results }}" + when: + # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`) + # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified + - ((crush_location is defined and crush_location) or item.get('rc') == 0) + - handler_health_osd_check + # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below + - inventory_hostname in play_hosts + - osd_group_name in group_names +- name: restart non-containerized ceph osds daemon(s) + command: /tmp/restart_osd_daemon.sh + listen: "restart ceph osds" when: -# We do not want to run these checks on initial deployment (`socket.rc == 0`) -# except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified + # We do not want to run these checks on initial deployment (`socket_osd_container.results[n].rc == 0`) + # except when a crush location is specified. ceph-disk will start the osds before the osd crush location is specified - ((crush_location is defined and crush_location) or socket.rc == 0) - ceph_current_fsid.rc == 0 - - osd_group_name in group_names -# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below + - handler_health_osd_check + # See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below - inventory_hostname in play_hosts + - osd_group_name in group_names - name: restart ceph mdss service: diff --git a/roles/ceph-defaults/meta/main.yml b/roles/ceph-defaults/meta/main.yml new file mode 100644 index 0000000000..b52a2e84c4 --- /dev/null +++ b/roles/ceph-defaults/meta/main.yml @@ -0,0 +1,13 @@ +--- +galaxy_info: + author: Sébastien Han + description: Handles ceph-ansible default vars for all roles + license: Apache + min_ansible_version: 1.7 + platforms: + - name: Ubuntu + versions: + - trusty + categories: + - system +dependencies: [] diff --git a/roles/ceph-defaults/tasks/check_socket.yml b/roles/ceph-defaults/tasks/check_socket.yml new file mode 100644 index 0000000000..11f04f6d3e --- /dev/null +++ b/roles/ceph-defaults/tasks/check_socket.yml @@ -0,0 +1,21 @@ +--- +# These checks are used to avoid running handlers at initial deployment. +- name: check for a ceph socket + shell: | + {{ docker_exec_cmd }} bash -c 'stat {{ rbd_client_admin_socket_path }}/*.asok > /dev/null 2>&1' + changed_when: false + failed_when: false + always_run: true + register: socket + +- name: check for a ceph socket in containerized deployment (osds) + shell: | + docker exec ceph-osd-"{{ ansible_hostname }}"-"{{ item | replace('/', '') }}" bash -c 'stat /var/run/ceph/*.asok > /dev/null 2>&1' + changed_when: false + failed_when: false + always_run: true + register: socket_osd_container + with_items: "{{ devices }}" + when: + - containerized_deployment + - inventory_hostname in groups.get(osd_group_name) diff --git a/roles/ceph-common/tasks/facts.yml b/roles/ceph-defaults/tasks/facts.yml similarity index 60% rename from roles/ceph-common/tasks/facts.yml rename to roles/ceph-defaults/tasks/facts.yml index 2056ce9dcd..d650259aef 100644 --- a/roles/ceph-common/tasks/facts.yml +++ b/roles/ceph-defaults/tasks/facts.yml @@ -1,9 +1,11 @@ --- -- name: get ceph version - command: ceph --version - changed_when: false - always_run: yes - register: ceph_version +- set_fact: + monitor_name: "{{ ansible_hostname }}" + when: not mon_use_fqdn + +- set_fact: + monitor_name: "{{ ansible_fqdn }}" + when: mon_use_fqdn # this task shouldn't run in a rolling_update situation # because it blindly picks a mon, which may be down because @@ -17,6 +19,13 @@ delegate_to: "{{ groups[mon_group_name][0] }}" when: not rolling_update +# We want this check to be run only on the first node +- name: check if {{ fetch_directory }} directory exists + local_action: stat path="{{ fetch_directory }}/monitor_keyring.conf" + become: false + register: monitor_keyring_conf + run_once: true + # set this as a default when performing a rolling_update # so the rest of the tasks here will succeed - set_fact: @@ -36,31 +45,43 @@ when: - ceph_current_fsid.rc == 0 +# Set ceph_release to ceph_stable by default - set_fact: - monitor_name: "{{ ansible_hostname }}" - when: not mon_use_fqdn + ceph_release: "{{ ceph_stable_release }}" -- set_fact: - monitor_name: "{{ ansible_fqdn }}" - when: mon_use_fqdn +- name: generate cluster fsid + local_action: shell python -c 'import uuid; print(str(uuid.uuid4()))' | tee {{ fetch_directory }}/ceph_cluster_uuid.conf + creates="{{ fetch_directory }}/ceph_cluster_uuid.conf" + register: cluster_uuid + become: false + when: + - generate_fsid + - ceph_current_fsid.rc != 0 -# We want this check to be run only on one mon -- name: check if {{ fetch_directory }} directory exists - local_action: stat path="{{ fetch_directory }}/monitor_keyring.conf" +- name: reuse cluster fsid when cluster is already running + local_action: shell echo {{ fsid }} | tee {{ fetch_directory }}/ceph_cluster_uuid.conf + creates="{{ fetch_directory }}/ceph_cluster_uuid.conf" become: false - register: monitor_keyring_conf - run_once: true + when: ceph_current_fsid.rc == 0 -- include: facts_mon_fsid.yml - run_once: true - when: - - cephx - - not monitor_keyring_conf.stat.exists - - ceph_current_fsid.rc == 0 - - mon_group_name in group_names +- name: read cluster fsid if it already exists + local_action: command cat {{ fetch_directory }}/ceph_cluster_uuid.conf + removes="{{ fetch_directory }}/ceph_cluster_uuid.conf" + changed_when: false + register: cluster_uuid + become: false + always_run: true + when: generate_fsid -- set_fact: - ceph_version: "{{ ceph_version.stdout.split(' ')[2] }}" +- name: set fsid fact when generate_fsid = true + set_fact: + fsid: "{{ cluster_uuid.stdout }}" + when: generate_fsid + +- name: set docker_exec_cmd fact + set_fact: + docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}" + when: containerized_deployment - set_fact: mds_name: "{{ ansible_hostname }}" diff --git a/roles/ceph-defaults/tasks/main.yml b/roles/ceph-defaults/tasks/main.yml new file mode 100644 index 0000000000..25887efa78 --- /dev/null +++ b/roles/ceph-defaults/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- include: facts.yml +- include: check_socket.yml diff --git a/roles/ceph-common/templates/restart_mon_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 similarity index 72% rename from roles/ceph-common/templates/restart_mon_daemon.sh.j2 rename to roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 index 4424ccacb0..745f6915fd 100644 --- a/roles/ceph-common/templates/restart_mon_daemon.sh.j2 +++ b/roles/ceph-defaults/templates/restart_mon_daemon.sh.j2 @@ -3,13 +3,12 @@ RETRIES="{{ handler_health_mon_check_retries }}" DELAY="{{ handler_health_mon_check_delay }}" MONITOR_NAME="{{ monitor_name }}" -CLUSTER="{{ cluster }}" -SOCKET=/var/run/ceph/${CLUSTER}-mon.${MONITOR_NAME}.asok +SOCKET=/var/run/ceph/{{ cluster }}-mon.${MONITOR_NAME}.asok check_quorum() { while [ $RETRIES -ne 0 ]; do - MEMBERS=$(ceph --cluster ${CLUSTER} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/') + MEMBERS=$({{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s --format json | sed -r 's/.*"quorum_names":(\[[^]]+\]).*/\1/') test "${MEMBERS/$MONITOR_NAME}" != "$MEMBERS" && exit 0 sleep $DELAY let RETRIES=RETRIES-1 @@ -17,7 +16,7 @@ done # If we reach this point, it means there is a problem with the quorum echo "Error with quorum." echo "cluster status:" -ceph --cluster ${CLUSTER} -s +{{ docker_exec_cmd }} ceph --cluster {{ cluster }} -s exit 1 } @@ -27,7 +26,7 @@ systemctl restart ceph-mon@${MONITOR_NAME} COUNT=10 # Wait and ensure the socket exists after restarting the daemon while [ $COUNT -ne 0 ]; do - test -S $SOCKET && check_quorum + {{ docker_exec_cmd }} test -S $SOCKET && check_quorum sleep 1 let COUNT=COUNT-1 done diff --git a/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 new file mode 100644 index 0000000000..de1fe101bb --- /dev/null +++ b/roles/ceph-defaults/templates/restart_osd_daemon.sh.j2 @@ -0,0 +1,78 @@ +#!/bin/bash + +RETRIES="{{ handler_health_osd_check_retries }}" +DELAY="{{ handler_health_osd_check_delay }}" +CEPH_CLI="--name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/{{ cluster }}.keyring --cluster {{ cluster }}" + +check_pgs() { + while [ $RETRIES -ne 0 ]; do + test "[""$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print(json.load(sys.stdin)["pgmap"]["num_pgs"])')""]" = "$($docker_exec ceph $CEPH_CLI -s -f json | python -c 'import sys, json; print [ i["count"] for i in json.load(sys.stdin)["pgmap"]["pgs_by_state"] if i["state_name"] == "active+clean"]')" + RET=$? + test $RET -eq 0 && return 0 + sleep $DELAY + let RETRIES=RETRIES-1 + done + # PGs not clean, exiting with return code 1 + echo "Error while running 'ceph $CEPH_CLI -s', PGs were not reported as active+clean" + echo "It is possible that the cluster has less OSDs than the replica configuration" + echo "Will refuse to continue" + $docker_exec ceph "$CEPH_CLI" -s + exit 1 +} + +wait_for_socket_in_docker() { + if ! docker exec "$1" timeout 10 bash -c "while [ ! -e /var/run/ceph/*.asok ]; do sleep 1 ; done"; then + log "Timed out while trying to look for a Ceph OSD socket." + log "Abort mission!" + exit 1 + fi +} + +get_dev_name() { + echo $1 | sed -r 's/ceph-osd@([a-z]{1,4})\.service/\1/' +} + +get_docker_id_from_dev_name() { + local id + local count + count=10 + while [ $count -ne 0 ]; do + id=$(docker ps -q -f "name=$1") + test "$id" != "" && break + sleep 1 + let count=count-1 + done + echo "$id" +} + +get_docker_osd_id() { + wait_for_socket_in_docker $1 + docker exec "$1" ls /var/run/ceph | cut -d'.' -f2 +} + +# For containerized deployments, the unit file looks like: ceph-osd@sda.service +# For non-containerized deployments, the unit file looks like: ceph-osd@0.service +for unit in $(systemctl list-units | grep -oE "ceph-osd@([0-9]{1,2}|[a-z]+).service"); do + # First, restart daemon(s) + systemctl restart "${unit}" + # We need to wait because it may take some time for the socket to actually exists + COUNT=10 + # Wait and ensure the socket exists after restarting the daemon + {% if containerized_deployment -%} + id=$(get_dev_name "$unit") + container_id=$(get_docker_id_from_dev_name "$id") + osd_id=$(get_docker_osd_id "$container_id") + docker_exec="docker exec $container_id" + {% else %} + osd_id=$(echo ${unit#ceph-osd@} | grep -oE '[0-9]{1,2}') + {% endif %} + SOCKET=/var/run/ceph/test-osd.${osd_id}.asok + while [ $COUNT -ne 0 ]; do + $docker_exec test -S "$SOCKET" && check_pgs && continue 2 + sleep 1 + let COUNT=COUNT-1 + done + # If we reach this point, it means the socket is not present. + echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running." + exit 1 +done diff --git a/roles/ceph-docker-common/defaults/main.yml b/roles/ceph-docker-common/defaults/main.yml index 73da30a325..ed97d539c0 100644 --- a/roles/ceph-docker-common/defaults/main.yml +++ b/roles/ceph-docker-common/defaults/main.yml @@ -1,32 +1 @@ --- -fsid: "{{ cluster_uuid.stdout }}" -generate_fsid: true -ceph_docker_registry: docker.io -ceph_docker_enable_centos_extra_repo: false - -ceph_docker_on_openstack: false - -mon_use_fqdn: false # if set to true, the MON name used will be the fqdn - -# Set uid/gid to default '64045' for bootstrap directories. -# '64045' is used for debian based distros. It must be set to 167 in case of rhel based distros. -# These values have to be set according to the base OS used by the container image, NOT the host. -bootstrap_dirs_owner: "64045" -bootstrap_dirs_group: "64045" - -ceph_conf_key_directory: /etc/ceph - -########### -# Network # -########### -monitor_interface: 'interface' -monitor_address: '0.0.0.0' -monitor_address_block: [] - -############ -# KV store # -############ -kv_type: etcd -kv_endpoint: 127.0.0.1 -kv_port: 2379 -containerized_deployment_with_kv: false diff --git a/roles/ceph-docker-common/tasks/create_configs.yml b/roles/ceph-docker-common/tasks/create_configs.yml index d1e6a2926c..d85644222d 100644 --- a/roles/ceph-docker-common/tasks/create_configs.yml +++ b/roles/ceph-docker-common/tasks/create_configs.yml @@ -42,12 +42,11 @@ mode: "0644" config_overrides: "{{ ceph_conf_overrides }}" config_type: ini - when: - - (not mon_containerized_default_ceph_conf_with_kv and - (inventory_hostname in groups.get(mon_group_name, []))) or - (not mon_containerized_default_ceph_conf_with_kv and - ((groups.get(nfs_group_name, []) | length > 0) - and (inventory_hostname == groups.get(nfs_group_name, [])[0]))) + notify: + - restart ceph mons + - restart ceph osds + - restart ceph mdss + - restart ceph rgws - name: set fsid fact when generate_fsid = true set_fact: diff --git a/roles/ceph-docker-common/tasks/fetch_configs.yml b/roles/ceph-docker-common/tasks/fetch_configs.yml index b321b54953..a432a7ea21 100644 --- a/roles/ceph-docker-common/tasks/fetch_configs.yml +++ b/roles/ceph-docker-common/tasks/fetch_configs.yml @@ -2,7 +2,6 @@ - name: set config and keys paths set_fact: ceph_config_keys: - - "{{ ceph_conf_key_directory }}/{{ cluster }}.conf" - "{{ ceph_conf_key_directory }}/{{ cluster }}.client.admin.keyring" - "{{ ceph_conf_key_directory }}/monmap-{{ cluster }}" - "{{ ceph_conf_key_directory }}/{{ cluster }}.mon.keyring" diff --git a/roles/ceph-mds/meta/main.yml b/roles/ceph-mds/meta/main.yml index 9b7943261b..7f5d25ce62 100644 --- a/roles/ceph-mds/meta/main.yml +++ b/roles/ceph-mds/meta/main.yml @@ -10,6 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-mgr/meta/main.yml b/roles/ceph-mgr/meta/main.yml index 6829b9ebc8..ffe7e82996 100644 --- a/roles/ceph-mgr/meta/main.yml +++ b/roles/ceph-mgr/meta/main.yml @@ -13,6 +13,4 @@ galaxy_info: - 7 categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-mon/defaults/main.yml b/roles/ceph-mon/defaults/main.yml index 8bd45f287b..f81b254eb8 100644 --- a/roles/ceph-mon/defaults/main.yml +++ b/roles/ceph-mon/defaults/main.yml @@ -5,12 +5,9 @@ # GENERAL # ########### -fetch_directory: fetch/ - mon_group_name: mons # ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT -fsid: "{{ cluster_uuid.stdout }}" monitor_secret: "{{ monitor_keyring.stdout }}" admin_secret: 'admin_secret' @@ -108,7 +105,6 @@ openstack_keys: ########## # DOCKER # ########## -docker_exec_cmd: ceph_mon_docker_subnet: "{{ public_network }}"# subnet of the monitor_interface # ceph_mon_docker_extra_env: diff --git a/roles/ceph-mon/meta/main.yml b/roles/ceph-mon/meta/main.yml index 0d99a1b3b9..797a14dafb 100644 --- a/roles/ceph-mon/meta/main.yml +++ b/roles/ceph-mon/meta/main.yml @@ -10,6 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-mon/tasks/docker/main.yml b/roles/ceph-mon/tasks/docker/main.yml index f7d073d9de..1b5291526a 100644 --- a/roles/ceph-mon/tasks/docker/main.yml +++ b/roles/ceph-mon/tasks/docker/main.yml @@ -1,8 +1,4 @@ --- -- name: set docker_exec_cmd fact - set_fact: - docker_exec_cmd: "docker exec ceph-mon-{{ ansible_hostname }}" - - include: start_docker_monitor.yml - name: wait for monitor socket to exist diff --git a/roles/ceph-nfs/meta/main.yml b/roles/ceph-nfs/meta/main.yml index 9b30523883..5c3d8a6111 100644 --- a/roles/ceph-nfs/meta/main.yml +++ b/roles/ceph-nfs/meta/main.yml @@ -10,6 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-osd/defaults/main.yml b/roles/ceph-osd/defaults/main.yml index 5aef8bb509..6b7067c1a7 100644 --- a/roles/ceph-osd/defaults/main.yml +++ b/roles/ceph-osd/defaults/main.yml @@ -7,8 +7,6 @@ # GENERAL # ########### -fetch_directory: fetch/ - # Even though OSD nodes should not have the admin key # at their disposal, some people might want to have it # distributed on OSD nodes. Setting 'copy_admin_key' to 'true' @@ -50,9 +48,6 @@ osd_crush_location: "\"root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} hos # CEPH OPTIONS ############## -# ACTIVATE THE FSID VARIABLE FOR NON-VAGRANT DEPLOYMENT -fsid: "{{ cluster_uuid.stdout }}" - # Devices to be used as OSDs # You can pre-provision disks that are not present yet. # Ansible will just skip them. Newly added disk will be diff --git a/roles/ceph-osd/meta/main.yml b/roles/ceph-osd/meta/main.yml index ef9c436af7..afa08ec9eb 100644 --- a/roles/ceph-osd/meta/main.yml +++ b/roles/ceph-osd/meta/main.yml @@ -10,6 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-rbd-mirror/meta/main.yml b/roles/ceph-rbd-mirror/meta/main.yml index a73a1447c2..4a0799943c 100644 --- a/roles/ceph-rbd-mirror/meta/main.yml +++ b/roles/ceph-rbd-mirror/meta/main.yml @@ -13,6 +13,4 @@ galaxy_info: - 7 categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-restapi/meta/main.yml b/roles/ceph-restapi/meta/main.yml index 809d979c2b..fb4a0bc788 100644 --- a/roles/ceph-restapi/meta/main.yml +++ b/roles/ceph-restapi/meta/main.yml @@ -10,6 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-rgw/meta/main.yml b/roles/ceph-rgw/meta/main.yml index cffb6d934d..745a72174f 100644 --- a/roles/ceph-rgw/meta/main.yml +++ b/roles/ceph-rgw/meta/main.yml @@ -10,6 +10,4 @@ galaxy_info: - trusty categories: - system -dependencies: - - { role: ceph.ceph-common, when: not containerized_deployment } - - { role: ceph.ceph-docker-common, when: containerized_deployment } +dependencies: [] diff --git a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml index e3bef36035..3bce3d4fea 100644 --- a/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml +++ b/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml @@ -2,14 +2,21 @@ - name: generate systemd unit file become: true template: - src: "{{ role_path }}/templates/ceph-rgw.service.j2" - dest: /etc/systemd/system/ceph-rgw@.service + src: "{{ role_path }}/templates/ceph-radosgw.service.j2" + dest: /etc/systemd/system/ceph-radosgw@.service owner: "root" group: "root" mode: "0644" +# For backward compatibility +- name: disable old systemd unit ('ceph-rgw@') if present + service: + name: ceph-rgw@{{ ansible_hostname }} + state: disable + ignore_errors: true + - name: enable systemd unit file for rgw instance - shell: systemctl enable ceph-rgw@{{ ansible_hostname }}.service + shell: systemctl enable ceph-radosgw@{{ ansible_hostname }}.service failed_when: false changed_when: false @@ -20,7 +27,7 @@ - name: systemd start rgw container service: - name: ceph-rgw@{{ ansible_hostname }} + name: ceph-radosgw@{{ ansible_hostname }} state: started enabled: yes changed_when: false diff --git a/roles/ceph-rgw/templates/ceph-rgw.service.j2 b/roles/ceph-rgw/templates/ceph-radosgw.service.j2 similarity index 100% rename from roles/ceph-rgw/templates/ceph-rgw.service.j2 rename to roles/ceph-rgw/templates/ceph-radosgw.service.j2 diff --git a/site-docker.yml.sample b/site-docker.yml.sample index 8bafee7cda..5740c399f8 100644 --- a/site-docker.yml.sample +++ b/site-docker.yml.sample @@ -19,45 +19,63 @@ become: True gather_facts: false roles: - - ceph-mon + - ceph-defaults + - ceph-docker-common + - ceph-mon serial: 1 # MUST be '1' WHEN DEPLOYING MONITORS ON DOCKER CONTAINERS - hosts: osds become: True roles: - - ceph-osd + - ceph-defaults + - ceph-docker-common + - ceph-osd - hosts: mdss become: True roles: - - ceph-mds + - ceph-defaults + - ceph-docker-common + - ceph-mds - hosts: rgws become: True roles: - - ceph-rgw + - ceph-defaults + - ceph-docker-common + - ceph-rgw - hosts: nfss become: True roles: - - ceph-nfs + - ceph-defaults + - ceph-docker-common + - ceph-nfs - hosts: rbd_mirrors become: True roles: - - ceph-rbd-mirror + - ceph-defaults + - ceph-docker-common + - ceph-rbd-mirror - hosts: restapis become: True roles: - - ceph-restapi + - ceph-defaults + - ceph-docker-common + - ceph-restapi - hosts: mgrs become: True roles: + - { role: ceph-defaults, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } + - { role: ceph-docker-common, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - { role: ceph-mgr, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - hosts: clients become: True roles: + - ceph-defaults + - ceph-common - ceph-client diff --git a/site.yml.sample b/site.yml.sample index 16fd1b8214..34b9234270 100644 --- a/site.yml.sample +++ b/site.yml.sample @@ -42,58 +42,78 @@ gather_facts: false become: True roles: - - ceph-mon + - ceph-defaults + - ceph-common + - ceph-mon - hosts: agents gather_facts: false become: True roles: - - ceph-agent + - ceph-defaults + - ceph-common + - ceph-agent - hosts: osds gather_facts: false become: True roles: - - ceph-osd + - ceph-defaults + - ceph-common + - ceph-osd - hosts: mdss gather_facts: false become: True roles: - - ceph-mds + - ceph-defaults + - ceph-common + - ceph-mds - hosts: rgws gather_facts: false become: True roles: - - ceph-rgw + - ceph-defaults + - ceph-common + - ceph-rgw - hosts: nfss gather_facts: false become: True roles: - - ceph-nfs + - ceph-defaults + - ceph-common + - ceph-nfs - hosts: restapis gather_facts: false become: True roles: - - ceph-restapi + - ceph-defaults + - ceph-common + - ceph-restapi - hosts: rbdmirrors gather_facts: false become: True roles: - - ceph-rbd-mirror + - ceph-defaults + - ceph-common + - ceph-rbd-mirror - hosts: clients gather_facts: false become: True roles: - - ceph-client + - ceph-defaults + - ceph-common + - ceph-client - hosts: mgrs gather_facts: false become: True roles: + - { role: ceph-defaults, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } + - { role: ceph-common, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" } - { role: ceph-mgr, when: "ceph_release_num.{{ ceph_stable_release }} > ceph_release_num.jewel" }