From c9bca7ddfac453ea53c05e714b0fcc489584d963 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 9 Jul 2018 16:58:35 +0200 Subject: [PATCH] ceph-osd: ceph-volume container support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Sébastien Han --- library/ceph_volume.py | 77 +++++++++++++++------ roles/ceph-osd/tasks/scenarios/lvm.yml | 32 ++++++++- roles/ceph-osd/templates/ceph-osd-run.sh.j2 | 5 ++ 3 files changed, 88 insertions(+), 26 deletions(-) diff --git a/library/ceph_volume.py b/library/ceph_volume.py index e95c9794989..8747268f32a 100644 --- a/library/ceph_volume.py +++ b/library/ceph_volume.py @@ -1,6 +1,5 @@ #!/usr/bin/python import datetime -import json ANSIBLE_METADATA = { 'metadata_version': '1.0', @@ -36,7 +35,7 @@ description: - The action to take. Either creating OSDs or zapping devices. required: true - choices: ['create', 'zap'] + choices: ['create', 'zap', 'prepare', 'activate'] default: create data: description: @@ -63,7 +62,7 @@ required: false db_vg: description: - - If db is a lv, this must be the name of the volume group it belongs to. + - If db is a lv, this must be the name of the volume group it belongs to. # noqa E501 - Only applicable if objectstore is 'bluestore'. required: false wal: @@ -73,7 +72,7 @@ required: false wal_vg: description: - - If wal is a lv, this must be the name of the volume group it belongs to. + - If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501 - Only applicable if objectstore is 'bluestore'. required: false crush_device_class: @@ -97,23 +96,27 @@ data: data-lv data_vg: data-vg journal: /dev/sdc1 + action: create - name: set up a bluestore osd with a raw device for data ceph_volume: objectstore: bluestore data: /dev/sdc + action: create -- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db + +- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa E501 ceph_volume: objectstore: bluestore data: data-lv data_vg: data-vg db: /dev/sdc1 wal: /dev/sdc2 + action: create ''' -from ansible.module_utils.basic import AnsibleModule +from ansible.module_utils.basic import AnsibleModule # noqa 4502 def get_data(data, data_vg): @@ -140,7 +143,28 @@ def get_wal(wal, wal_vg): return wal -def create_osd(module): +def ceph_volume_cmd(subcommand, containerized, cluster=None): + cmd = ['ceph-volume'] + if cluster: + cmd.extend(["--cluster", cluster]) + cmd.append('lvm') + cmd.append(subcommand) + + if containerized: + cmd = containerized.split() + cmd + + return cmd + + +def activate_osd(module, containerized=None): + subcommand = "activate" + cmd = ceph_volume_cmd(subcommand) + cmd.append("--all") + + return True + + +def prepare_osd(module): cluster = module.params['cluster'] objectstore = module.params['objectstore'] data = module.params['data'] @@ -153,16 +177,12 @@ def create_osd(module): wal_vg = module.params.get('wal_vg', None) crush_device_class = module.params.get('crush_device_class', None) dmcrypt = module.params['dmcrypt'] + containerized = module.params.get('containerized') + subcommand = "create" - cmd = [ - 'ceph-volume', - '--cluster', - cluster, - 'lvm', - 'create', - '--%s' % objectstore, - '--data', - ] + cmd = ceph_volume_cmd(subcommand, containerized, cluster) + cmd.extend(["--%s", objectstore]) + cmd.append("--data") data = get_data(data, data_vg) cmd.append(data) @@ -201,11 +221,14 @@ def create_osd(module): # check to see if osd already exists # FIXME: this does not work when data is a raw device - # support for 'lvm list' and raw devices was added with https://github.com/ceph/ceph/pull/20620 but + # support for 'lvm list' and raw devices + # was added with https://github.com/ceph/ceph/pull/20620 but # has not made it to a luminous release as of 12.2.4 - rc, out, err = module.run_command(["ceph-volume", "lvm", "list", data], encoding=None) + rc, out, err = module.run_command( + ["ceph-volume", "lvm", "list", data], encoding=None) if rc == 0: - result["stdout"] = "skipped, since {0} is already used for an osd".format(data) + result["stdout"] = "skipped, since {0} is already used for an osd".format( # noqa E501 + data) result['rc'] = 0 module.exit_json(**result) @@ -312,8 +335,10 @@ def zap_devices(module): def run_module(): module_args = dict( cluster=dict(type='str', required=False, default='ceph'), - objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'), - action=dict(type='str', required=False, choices=['create', 'zap'], default='create'), + objectstore=dict(type='str', required=False, choices=[ + 'bluestore', 'filestore'], default='bluestore'), + action=dict(type='str', required=False, choices=[ + 'create', 'zap'], default='create'), data=dict(type='str', required=True), data_vg=dict(type='str', required=False), journal=dict(type='str', required=False), @@ -334,11 +359,17 @@ def run_module(): action = module.params['action'] if action == "create": - create_osd(module) + prepare_osd(module) + activate_osd(module) + elif action == "prepare": + activate_osd(module) + elif action == "activate": + activate_osd(module) elif action == "zap": zap_devices(module) - module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1) + module.fail_json( + msg='State must either be "present" or "absent".', changed=False, rc=1) def main(): diff --git a/roles/ceph-osd/tasks/scenarios/lvm.yml b/roles/ceph-osd/tasks/scenarios/lvm.yml index ed53967fd56..0612b41c993 100644 --- a/roles/ceph-osd/tasks/scenarios/lvm.yml +++ b/roles/ceph-osd/tasks/scenarios/lvm.yml @@ -1,4 +1,28 @@ --- +- name: set_fact docker_exec_prepare_cmd + set_fact: + docker_exec_prepare_cmd: "docker exec ceph-volume-prepare" + when: + - containerized_deployment + +- name: run a ceph-volume prepare container (sleep 3000) + command: > + docker run \ + --rm \ + --privileged=true \ + --net=host \ + -v /dev:/dev \ + -d \ + -v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \ + -v /var/lib/ceph/:/var/lib/ceph/:z \ + -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ + --name ceph-volume-prepare \ + --entrypoint=sleep \ + {{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \ + 3000 + changed_when: false + when: + - containerized_deployment - name: "use ceph-volume to create {{ osd_objectstore }} osds" ceph_volume: @@ -14,6 +38,8 @@ wal_vg: "{{ item.wal_vg|default(omit) }}" crush_device_class: "{{ item.crush_device_class|default(omit) }}" dmcrypt: "{{ dmcrypt|default(omit) }}" - environment: - CEPH_VOLUME_DEBUG: 1 - with_items: "{{ lvm_volumes }}" + containerized: "{{ docker_exec_prepare_cmd | default(False) }}" + action: "{{ 'prepare' if containerized_deployment else 'create' }}" + environment: + CEPH_VOLUME_DEBUG: 1 + with_items: "{{ lvm_volumes }}" \ No newline at end of file diff --git a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 index 58151e65f96..01f0b7cedc2 100644 --- a/roles/ceph-osd/templates/ceph-osd-run.sh.j2 +++ b/roles/ceph-osd/templates/ceph-osd-run.sh.j2 @@ -108,7 +108,12 @@ expose_partitions "$1" {% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%} -e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \ {% endif -%} + {% if osd_scenario == 'lvm' -%} + -v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \ + -e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \ + {% else -%} -e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \ + {% endif -%} {{ ceph_osd_docker_extra_env }} \ --name=ceph-osd-{{ ansible_hostname }}-${1} \ {{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}