Skip to content

Commit

Permalink
ceph-osd: ceph-volume container support
Browse files Browse the repository at this point in the history
Signed-off-by: Sébastien Han <seb@redhat.com>
  • Loading branch information
leseb committed Jul 9, 2018
1 parent 07852ed commit c9bca7d
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 26 deletions.
77 changes: 54 additions & 23 deletions library/ceph_volume.py
@@ -1,6 +1,5 @@
#!/usr/bin/python
import datetime
import json

ANSIBLE_METADATA = {
'metadata_version': '1.0',
Expand Down Expand Up @@ -36,7 +35,7 @@
description:
- The action to take. Either creating OSDs or zapping devices.
required: true
choices: ['create', 'zap']
choices: ['create', 'zap', 'prepare', 'activate']
default: create
data:
description:
Expand All @@ -63,7 +62,7 @@
required: false
db_vg:
description:
- If db is a lv, this must be the name of the volume group it belongs to.
- If db is a lv, this must be the name of the volume group it belongs to. # noqa E501
- Only applicable if objectstore is 'bluestore'.
required: false
wal:
Expand All @@ -73,7 +72,7 @@
required: false
wal_vg:
description:
- If wal is a lv, this must be the name of the volume group it belongs to.
- If wal is a lv, this must be the name of the volume group it belongs to. # noqa E501
- Only applicable if objectstore is 'bluestore'.
required: false
crush_device_class:
Expand All @@ -97,23 +96,27 @@
data: data-lv
data_vg: data-vg
journal: /dev/sdc1
action: create
- name: set up a bluestore osd with a raw device for data
ceph_volume:
objectstore: bluestore
data: /dev/sdc
action: create
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db
- name: set up a bluestore osd with an lv for data and partitions for block.wal and block.db # noqa E501
ceph_volume:
objectstore: bluestore
data: data-lv
data_vg: data-vg
db: /dev/sdc1
wal: /dev/sdc2
action: create
'''


from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import AnsibleModule # noqa 4502


def get_data(data, data_vg):
Expand All @@ -140,7 +143,28 @@ def get_wal(wal, wal_vg):
return wal


def create_osd(module):
def ceph_volume_cmd(subcommand, containerized, cluster=None):
cmd = ['ceph-volume']
if cluster:
cmd.extend(["--cluster", cluster])
cmd.append('lvm')
cmd.append(subcommand)

if containerized:
cmd = containerized.split() + cmd

return cmd


def activate_osd(module, containerized=None):
subcommand = "activate"
cmd = ceph_volume_cmd(subcommand)
cmd.append("--all")

return True


def prepare_osd(module):
cluster = module.params['cluster']
objectstore = module.params['objectstore']
data = module.params['data']
Expand All @@ -153,16 +177,12 @@ def create_osd(module):
wal_vg = module.params.get('wal_vg', None)
crush_device_class = module.params.get('crush_device_class', None)
dmcrypt = module.params['dmcrypt']
containerized = module.params.get('containerized')
subcommand = "create"

cmd = [
'ceph-volume',
'--cluster',
cluster,
'lvm',
'create',
'--%s' % objectstore,
'--data',
]
cmd = ceph_volume_cmd(subcommand, containerized, cluster)
cmd.extend(["--%s", objectstore])
cmd.append("--data")

data = get_data(data, data_vg)
cmd.append(data)
Expand Down Expand Up @@ -201,11 +221,14 @@ def create_osd(module):

# check to see if osd already exists
# FIXME: this does not work when data is a raw device
# support for 'lvm list' and raw devices was added with https://github.com/ceph/ceph/pull/20620 but
# support for 'lvm list' and raw devices
# was added with https://github.com/ceph/ceph/pull/20620 but
# has not made it to a luminous release as of 12.2.4
rc, out, err = module.run_command(["ceph-volume", "lvm", "list", data], encoding=None)
rc, out, err = module.run_command(
["ceph-volume", "lvm", "list", data], encoding=None)
if rc == 0:
result["stdout"] = "skipped, since {0} is already used for an osd".format(data)
result["stdout"] = "skipped, since {0} is already used for an osd".format( # noqa E501
data)
result['rc'] = 0
module.exit_json(**result)

Expand Down Expand Up @@ -312,8 +335,10 @@ def zap_devices(module):
def run_module():
module_args = dict(
cluster=dict(type='str', required=False, default='ceph'),
objectstore=dict(type='str', required=False, choices=['bluestore', 'filestore'], default='bluestore'),
action=dict(type='str', required=False, choices=['create', 'zap'], default='create'),
objectstore=dict(type='str', required=False, choices=[
'bluestore', 'filestore'], default='bluestore'),
action=dict(type='str', required=False, choices=[
'create', 'zap'], default='create'),
data=dict(type='str', required=True),
data_vg=dict(type='str', required=False),
journal=dict(type='str', required=False),
Expand All @@ -334,11 +359,17 @@ def run_module():
action = module.params['action']

if action == "create":
create_osd(module)
prepare_osd(module)
activate_osd(module)
elif action == "prepare":
activate_osd(module)
elif action == "activate":
activate_osd(module)
elif action == "zap":
zap_devices(module)

module.fail_json(msg='State must either be "present" or "absent".', changed=False, rc=1)
module.fail_json(
msg='State must either be "present" or "absent".', changed=False, rc=1)


def main():
Expand Down
32 changes: 29 additions & 3 deletions roles/ceph-osd/tasks/scenarios/lvm.yml
@@ -1,4 +1,28 @@
---
- name: set_fact docker_exec_prepare_cmd
set_fact:
docker_exec_prepare_cmd: "docker exec ceph-volume-prepare"
when:
- containerized_deployment

- name: run a ceph-volume prepare container (sleep 3000)
command: >
docker run \
--rm \
--privileged=true \
--net=host \
-v /dev:/dev \
-d \
-v {{ ceph_conf_key_directory }}:{{ ceph_conf_key_directory }}:z \
-v /var/lib/ceph/:/var/lib/ceph/:z \
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
--name ceph-volume-prepare \
--entrypoint=sleep \
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }} \
3000
changed_when: false
when:
- containerized_deployment

- name: "use ceph-volume to create {{ osd_objectstore }} osds"
ceph_volume:
Expand All @@ -14,6 +38,8 @@
wal_vg: "{{ item.wal_vg|default(omit) }}"
crush_device_class: "{{ item.crush_device_class|default(omit) }}"
dmcrypt: "{{ dmcrypt|default(omit) }}"
environment:
CEPH_VOLUME_DEBUG: 1
with_items: "{{ lvm_volumes }}"
containerized: "{{ docker_exec_prepare_cmd | default(False) }}"
action: "{{ 'prepare' if containerized_deployment else 'create' }}"
environment:
CEPH_VOLUME_DEBUG: 1
with_items: "{{ lvm_volumes }}"
5 changes: 5 additions & 0 deletions roles/ceph-osd/templates/ceph-osd-run.sh.j2
Expand Up @@ -108,7 +108,12 @@ expose_partitions "$1"
{% if (ceph_tcmalloc_max_total_thread_cache | int) > 0 and osd_objectstore == 'filestore' -%}
-e TCMALLOC_MAX_TOTAL_THREAD_CACHE_BYTES={{ ceph_tcmalloc_max_total_thread_cache }} \
{% endif -%}
{% if osd_scenario == 'lvm' -%}
-v /run/lvm/lvmetad.socket:/run/lvm/lvmetad.socket \
-e CEPH_DAEMON=OSD_CEPH_VOLUME_ACTIVATE \
{% else -%}
-e CEPH_DAEMON=OSD_CEPH_DISK_ACTIVATE \
{% endif -%}
{{ ceph_osd_docker_extra_env }} \
--name=ceph-osd-{{ ansible_hostname }}-${1} \
{{ ceph_docker_registry }}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}

0 comments on commit c9bca7d

Please sign in to comment.